repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
dgjnpr/py-junos-eznc | lib/jnpr/junos/factory/viewfields.py | 1 | 1923 | class ViewFields(object):
"""
Used to dynamically create a field dictionary used with the
RunstatView class
"""
def __init__(self):
self._fields = dict()
def _prockvargs(self, field, name, **kvargs):
if not len(kvargs):
return
field[name].update(kvargs)
@property
def end(self):
return self._fields
def str(self, name, xpath=None, **kvargs):
""" field is a string """
if xpath is None:
xpath = name
field = {name: {'xpath': xpath}}
self._prockvargs(field, name, **kvargs)
self._fields.update(field)
return self
def astype(self, name, xpath=None, astype=int, **kvargs):
"""
field string value will be passed to function :astype:
This is typically used to do simple type conversions,
but also works really well if you set :astype: to
a function that does a basic converstion like look
at the value and change it to a True/False. For
example:
astype=lambda x: True if x == 'enabled' else False
"""
if xpath is None:
xpath = name
field = {
name: {'xpath': xpath, 'astype': astype}
}
self._prockvargs(field, name, **kvargs)
self._fields.update(field)
return self
def int(self, name, xpath=None, **kvargs):
""" field is an integer """
return self.astype(name, xpath, int, **kvargs)
def flag(self, name, xpath=None, **kvargs):
"""
field is a flag, results in True/False if the xpath element exists or
not. Model this as a boolean type <bool>
"""
return self.astype(name, xpath, bool, **kvargs)
def table(self, name, table):
""" field is a RunstatTable """
self._fields.update({
name: {'table': table}
})
return self
| apache-2.0 | 5,510,952,686,028,726,000 | 28.136364 | 77 | 0.558502 | false |
missionpinball/mpf | setup.py | 1 | 3976 | """Mission Pinball Framework (mpf) setup.py."""
import re
from setuptools import setup
# http://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package
VERSIONFILE = "mpf/_version.py"
VERSION_STRING_LONG = open(VERSIONFILE, "rt").read()
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
_MO = re.search(VSRE, VERSION_STRING_LONG, re.M)
if _MO:
VERSION_STRING = _MO.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
pin2dmd_requires = [
'pyusb==1.1.0'
]
linux_i2c_requires = [
'smbus2_asyncio==0.0.5'
]
rpi_requires = [
'apigpio-mpf==0.0.3'
]
cli_requires = [
'prompt_toolkit==3.0.8',
'asciimatics==1.12.0',
'terminaltables==3.1.0',
]
osc_requires = [
'python-osc==1.7.4'
]
irc_requires = [
'irc==19.0.1'
]
vpe_requires = [
'grpcio_tools==1.34.0',
'grpcio==1.34.0',
'protobuf==3.14.0',
]
crash_reporter_requires = [
'requests==2.22.0'
]
all_requires = (pin2dmd_requires + cli_requires + linux_i2c_requires + rpi_requires + osc_requires + irc_requires +
vpe_requires + crash_reporter_requires)
setup(
name='mpf',
version=VERSION_STRING,
description='Mission Pinball Framework',
long_description='''Let's build a pinball machine!
The Mission Pinball Framework (MPF) is an open source, cross-platform,
Python-based software framework for powering real pinball machines.
MPF is written in Python. It can run on Windows, OS X, and Linux
with the same code and configurations.
MPF interacts with real, physical pinball machines via modern pinball
controller hardware such as a Multimorphic P-ROC or P3-ROC, a FAST Pinball
controller, or Open Pinball Project hardware controllers. You can use MPF to
power your own custom-built machine or to update the software in existing
Williams, Bally, Stern, or Data East machines.
MPF is a work-in-progress that is not yet complete, though we're actively
developing it and checking in several commits a week. It's MIT licensed,
actively developed by fun people, and supported by a vibrant, pinball-loving
community.''',
url='https://missionpinball.org',
author='The Mission Pinball Framework Team',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Topic :: Artistic Software',
'Topic :: Games/Entertainment :: Arcade'
],
keywords='pinball',
include_package_data=True,
package_data={'': ['*.yaml', '*.png', '*.so', '*.pyd', '*.ogg', '*.wav']},
# MANIFEST.in picks up the rest
packages=['mpf'],
zip_safe=False,
install_requires=['ruamel.yaml==0.15.100',
'pyserial==3.5',
'pyserial-asyncio==0.4;platform_system=="Windows"',
'pyserial-asyncio==0.5;platform_system!="Windows"',
'sortedcontainers==2.3.0',
'psutil==5.7.3',
],
extras_require={
'all': all_requires,
'pin2dmd': pin2dmd_requires,
'linux_i2c': linux_i2c_requires,
'rpi': rpi_requires,
'cli': cli_requires,
'osc': osc_requires,
'irc': irc_requires,
'vpe': vpe_requires,
'crash_reporter': crash_reporter_requires,
},
tests_require=[],
test_suite="mpf.tests",
entry_points={
'console_scripts': [
'mpf = mpf.commands:run_from_command_line',
]
}
)
| mit | 4,193,858,183,606,862,000 | 27.604317 | 115 | 0.617203 | false |
jddeal/python-cmr | tests/test_collection.py | 1 | 1877 | import unittest
from cmr.queries import CollectionQuery
class TestCollectionClass(unittest.TestCase):
def test_archive_center(self):
query = CollectionQuery()
query.archive_center("LP DAAC")
self.assertIn("archive_center", query.params)
self.assertEqual(query.params["archive_center"], "LP DAAC")
def test_keyword(self):
query = CollectionQuery()
query.keyword("AST_*")
self.assertIn("keyword", query.params)
self.assertEqual(query.params["keyword"], "AST_*")
def test_valid_formats(self):
query = CollectionQuery()
formats = [
"json", "xml", "echo10", "iso", "iso19115",
"csv", "atom", "kml", "native", "dif", "dif10",
"opendata", "umm_json", "umm_json_v1_1" "umm_json_v1_9"]
for _format in formats:
query.format(_format)
self.assertEqual(query._format, _format)
def test_invalid_format(self):
query = CollectionQuery()
with self.assertRaises(ValueError):
query.format("invalid")
query.format("jsonn")
query.format("iso19116")
def test_valid_concept_id(self):
query = CollectionQuery()
query.concept_id("C1299783579-LPDAAC_ECS")
self.assertEqual(query.params["concept_id"], ["C1299783579-LPDAAC_ECS"])
query.concept_id(["C1299783579-LPDAAC_ECS", "C1441380236-PODAAC"])
self.assertEqual(query.params["concept_id"], ["C1299783579-LPDAAC_ECS", "C1441380236-PODAAC"])
def test_invalid_concept_id(self):
query = CollectionQuery()
with self.assertRaises(ValueError):
query.concept_id("G1327299284-LPDAAC_ECS")
with self.assertRaises(ValueError):
query.concept_id(["C1299783579-LPDAAC_ECS", "G1327299284-LPDAAC_ECS"])
| mit | 3,611,592,017,431,405,600 | 31.929825 | 102 | 0.605221 | false |
genialis/resolwe | resolwe/flow/tests/test_manager.py | 1 | 8154 | # pylint: disable=missing-docstring
import os
from asgiref.sync import async_to_sync
from guardian.shortcuts import assign_perm
from resolwe.flow.managers import manager
from resolwe.flow.managers.utils import disable_auto_calls
from resolwe.flow.models import (
Collection,
Data,
DataDependency,
DescriptorSchema,
Process,
)
from resolwe.test import ProcessTestCase, TransactionTestCase
PROCESSES_DIR = os.path.join(os.path.dirname(__file__), "processes")
class TestManager(ProcessTestCase):
def setUp(self):
super().setUp()
self.collection = Collection.objects.create(contributor=self.contributor)
self._register_schemas(processes_paths=[PROCESSES_DIR])
def test_create_data(self):
"""Test that manager is run when new object is created."""
process = Process.objects.filter(slug="test-min").latest()
data = Data.objects.create(
name="Test data",
contributor=self.contributor,
process=process,
)
data.refresh_from_db()
self.assertEqual(data.status, Data.STATUS_DONE)
def test_spawned_process(self):
"""Test that manager is run for spawned processes and permissions are copied."""
DescriptorSchema.objects.create(
name="Test schema", slug="test-schema", contributor=self.contributor
)
spawned_process = Process.objects.filter(slug="test-save-file").latest()
# Patch the process to create Entity, so its bahaviour can be tested.
spawned_process.entity_type = "test-schema"
spawned_process.entity_descriptor_schema = "test-schema"
spawned_process.save()
assign_perm("view_collection", self.user, self.collection)
Data.objects.create(
name="Test data",
contributor=self.contributor,
process=Process.objects.filter(slug="test-spawn-new").latest(),
collection=self.collection,
)
# Created and spawned objects should be done.
self.assertEqual(Data.objects.filter(status=Data.STATUS_DONE).count(), 2)
# Check that permissions are inherited.
child = Data.objects.last()
self.assertTrue(self.user.has_perm("view_data", child))
self.assertEqual(child.collection.pk, self.collection.pk)
self.assertEqual(child.entity.collection.pk, self.collection.pk)
def test_workflow(self):
"""Test that manager is run for workflows."""
workflow = Process.objects.filter(slug="test-workflow-1").latest()
data1 = Data.objects.create(
name="Test data 1",
contributor=self.contributor,
process=workflow,
input={"param1": "world"},
)
data2 = Data.objects.create(
name="Test data 2",
contributor=self.contributor,
process=workflow,
input={"param1": "foobar"},
)
# Created and spawned objects should be done.
self.assertEqual(Data.objects.filter(status=Data.STATUS_DONE).count(), 6)
# Check correct dependency type is created.
self.assertEqual(
{d.kind for d in data1.children_dependency.all()},
{DataDependency.KIND_SUBPROCESS},
)
self.assertEqual(
{d.kind for d in data2.children_dependency.all()},
{DataDependency.KIND_SUBPROCESS},
)
def test_dependencies(self):
"""Test that manager handles dependencies correctly."""
process_parent = Process.objects.filter(slug="test-dependency-parent").latest()
process_child = Process.objects.filter(slug="test-dependency-child").latest()
data_parent = Data.objects.create(
name="Test parent", contributor=self.contributor, process=process_parent
)
data_child1 = Data.objects.create(
name="Test child",
contributor=self.contributor,
process=process_child,
input={},
)
data_child2 = Data.objects.create(
name="Test child",
contributor=self.contributor,
process=process_child,
input={"parent": data_parent.pk},
)
data_child3 = Data.objects.create(
name="Test child",
contributor=self.contributor,
process=process_child,
input={"parent": None},
)
data_parent.refresh_from_db()
data_child1.refresh_from_db()
data_child2.refresh_from_db()
data_child3.refresh_from_db()
# Check locks are created in manager.
self.assertFalse(data_parent.access_logs.exists())
self.assertFalse(data_child1.access_logs.exists())
self.assertTrue(data_child2.access_logs.exists())
self.assertFalse(data_child3.access_logs.exists())
# Check that the data_parent location was locked.
access_log = data_child2.access_logs.get()
self.assertEqual(
access_log.storage_location.file_storage.data.get().id, data_parent.id
)
# Check that the log is released.
self.assertIsNotNone(access_log.started)
self.assertIsNotNone(access_log.finished)
# Check status.
self.assertEqual(data_parent.status, Data.STATUS_DONE)
self.assertEqual(data_child1.status, Data.STATUS_DONE)
self.assertEqual(data_child2.status, Data.STATUS_DONE)
self.assertEqual(data_child3.status, Data.STATUS_DONE)
def test_process_notifications(self):
process = Process.objects.filter(slug="test-process-notifications").latest()
data = Data.objects.create(
name="Test data",
contributor=self.contributor,
process=process,
)
data.refresh_from_db()
self.assertEqual(len(data.process_info), 3)
self.assertEqual(data.process_info[0], "abc")
self.assertEqual(data.process_info[1][-5:], "xx...")
self.assertEqual(data.process_info[2][:8], "Response")
self.assertEqual(len(data.process_warning), 1)
self.assertEqual(data.process_warning[0][-5:], "yy...")
self.assertEqual(len(data.process_error), 1)
self.assertEqual(data.process_error[0][-5:], "zz...")
class TransactionTestManager(TransactionTestCase):
@disable_auto_calls()
def test_communicate(self):
process = Process.objects.create(
name="Input process",
contributor=self.contributor,
type="data:test:",
input_schema=[
{
"name": "input_data",
"type": "data:test:",
"required": False,
},
],
)
data_1 = Data.objects.create(contributor=self.contributor, process=process)
data_2 = Data.objects.create(
contributor=self.contributor,
process=process,
input={"input_data": data_1.id},
)
Data.objects.create(contributor=self.contributor, process=process)
Data.objects.create(contributor=self.contributor, process=process)
self.assertEqual(Data.objects.filter(status=Data.STATUS_RESOLVING).count(), 4)
# Allow unfinished data objects to exist when checking for execution
# barrier condition in the dispatcher.
async_to_sync(manager.communicate)(data_id=data_1.pk, run_sync=True)
data_1.refresh_from_db()
self.assertEqual(data_1.status, Data.STATUS_WAITING)
self.assertEqual(Data.objects.filter(status=Data.STATUS_RESOLVING).count(), 3)
data_1.status = Data.STATUS_DONE
data_1.save()
# Process object's children.
async_to_sync(manager.communicate)(data_id=data_1.pk, run_sync=True)
data_2.refresh_from_db()
self.assertEqual(data_2.status, Data.STATUS_WAITING)
self.assertEqual(Data.objects.filter(status=Data.STATUS_RESOLVING).count(), 2)
# Process all objects.
async_to_sync(manager.communicate)(run_sync=True)
self.assertEqual(Data.objects.filter(status=Data.STATUS_RESOLVING).count(), 0)
| apache-2.0 | 1,628,991,398,672,439,000 | 35.895928 | 88 | 0.625215 | false |
jreback/pandas | pandas/plotting/_matplotlib/hist.py | 1 | 11983 | from typing import TYPE_CHECKING
import numpy as np
from pandas.core.dtypes.common import is_integer, is_list_like
from pandas.core.dtypes.generic import ABCDataFrame, ABCIndex
from pandas.core.dtypes.missing import isna, remove_na_arraylike
from pandas.io.formats.printing import pprint_thing
from pandas.plotting._matplotlib.core import LinePlot, MPLPlot
from pandas.plotting._matplotlib.tools import (
create_subplots,
flatten_axes,
set_ticks_props,
)
if TYPE_CHECKING:
from matplotlib.axes import Axes
class HistPlot(LinePlot):
_kind = "hist"
def __init__(self, data, bins=10, bottom=0, **kwargs):
self.bins = bins # use mpl default
self.bottom = bottom
# Do not call LinePlot.__init__ which may fill nan
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
if is_integer(self.bins):
# create common bin edge
values = self.data._convert(datetime=True)._get_numeric_data()
values = np.ravel(values)
values = values[~isna(values)]
_, self.bins = np.histogram(
values, bins=self.bins, range=self.kwds.get("range", None)
)
if is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
@classmethod
def _plot(
cls,
ax,
y,
style=None,
bins=None,
bottom=0,
column_num=0,
stacking_id=None,
**kwds,
):
if column_num == 0:
cls._initialize_stacker(ax, stacking_id, len(bins) - 1)
y = y[~isna(y)]
base = np.zeros(len(bins) - 1)
bottom = bottom + cls._get_stacked_values(ax, stacking_id, base, kwds["label"])
# ignore style
n, bins, patches = ax.hist(y, bins=bins, bottom=bottom, **kwds)
cls._update_stacker(ax, stacking_id, n)
return patches
def _make_plot(self):
colors = self._get_colors()
stacking_id = self._get_stacking_id()
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
kwds = self.kwds.copy()
label = pprint_thing(label)
kwds["label"] = label
style, kwds = self._apply_style_colors(colors, kwds, i, label)
if style is not None:
kwds["style"] = style
kwds = self._make_plot_keywords(kwds, y)
# We allow weights to be a multi-dimensional array, e.g. a (10, 2) array,
# and each sub-array (10,) will be called in each iteration. If users only
# provide 1D array, we assume the same weights is used for all iterations
weights = kwds.get("weights", None)
if weights is not None and np.ndim(weights) != 1:
kwds["weights"] = weights[:, i]
artists = self._plot(ax, y, column_num=i, stacking_id=stacking_id, **kwds)
self._add_legend_handle(artists[0], label, index=i)
def _make_plot_keywords(self, kwds, y):
"""merge BoxPlot/KdePlot properties to passed kwds"""
# y is required for KdePlot
kwds["bottom"] = self.bottom
kwds["bins"] = self.bins
return kwds
def _post_plot_logic(self, ax: "Axes", data):
if self.orientation == "horizontal":
ax.set_xlabel("Frequency")
else:
ax.set_ylabel("Frequency")
@property
def orientation(self):
if self.kwds.get("orientation", None) == "horizontal":
return "horizontal"
else:
return "vertical"
class KdePlot(HistPlot):
_kind = "kde"
orientation = "vertical"
def __init__(self, data, bw_method=None, ind=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
self.bw_method = bw_method
self.ind = ind
def _args_adjust(self):
pass
def _get_ind(self, y):
if self.ind is None:
# np.nanmax() and np.nanmin() ignores the missing values
sample_range = np.nanmax(y) - np.nanmin(y)
ind = np.linspace(
np.nanmin(y) - 0.5 * sample_range,
np.nanmax(y) + 0.5 * sample_range,
1000,
)
elif is_integer(self.ind):
sample_range = np.nanmax(y) - np.nanmin(y)
ind = np.linspace(
np.nanmin(y) - 0.5 * sample_range,
np.nanmax(y) + 0.5 * sample_range,
self.ind,
)
else:
ind = self.ind
return ind
@classmethod
def _plot(
cls,
ax,
y,
style=None,
bw_method=None,
ind=None,
column_num=None,
stacking_id=None,
**kwds,
):
from scipy.stats import gaussian_kde
y = remove_na_arraylike(y)
gkde = gaussian_kde(y, bw_method=bw_method)
y = gkde.evaluate(ind)
lines = MPLPlot._plot(ax, ind, y, style=style, **kwds)
return lines
def _make_plot_keywords(self, kwds, y):
kwds["bw_method"] = self.bw_method
kwds["ind"] = self._get_ind(y)
return kwds
def _post_plot_logic(self, ax, data):
ax.set_ylabel("Density")
def _grouped_plot(
plotf,
data,
column=None,
by=None,
numeric_only=True,
figsize=None,
sharex=True,
sharey=True,
layout=None,
rot=0,
ax=None,
**kwargs,
):
if figsize == "default":
# allowed to specify mpl default with 'default'
raise ValueError(
"figsize='default' is no longer supported. "
"Specify figure size by tuple instead"
)
grouped = data.groupby(by)
if column is not None:
grouped = grouped[column]
naxes = len(grouped)
fig, axes = create_subplots(
naxes=naxes, figsize=figsize, sharex=sharex, sharey=sharey, ax=ax, layout=layout
)
_axes = flatten_axes(axes)
for i, (key, group) in enumerate(grouped):
ax = _axes[i]
if numeric_only and isinstance(group, ABCDataFrame):
group = group._get_numeric_data()
plotf(group, ax, **kwargs)
ax.set_title(pprint_thing(key))
return fig, axes
def _grouped_hist(
data,
column=None,
by=None,
ax=None,
bins=50,
figsize=None,
layout=None,
sharex=False,
sharey=False,
rot=90,
grid=True,
xlabelsize=None,
xrot=None,
ylabelsize=None,
yrot=None,
legend=False,
**kwargs,
):
"""
Grouped histogram
Parameters
----------
data : Series/DataFrame
column : object, optional
by : object, optional
ax : axes, optional
bins : int, default 50
figsize : tuple, optional
layout : optional
sharex : bool, default False
sharey : bool, default False
rot : int, default 90
grid : bool, default True
legend: : bool, default False
kwargs : dict, keyword arguments passed to matplotlib.Axes.hist
Returns
-------
collection of Matplotlib Axes
"""
if legend:
assert "label" not in kwargs
if data.ndim == 1:
kwargs["label"] = data.name
elif column is None:
kwargs["label"] = data.columns
else:
kwargs["label"] = column
def plot_group(group, ax):
ax.hist(group.dropna().values, bins=bins, **kwargs)
if legend:
ax.legend()
if xrot is None:
xrot = rot
fig, axes = _grouped_plot(
plot_group,
data,
column=column,
by=by,
sharex=sharex,
sharey=sharey,
ax=ax,
figsize=figsize,
layout=layout,
rot=rot,
)
set_ticks_props(
axes, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot
)
fig.subplots_adjust(
bottom=0.15, top=0.9, left=0.1, right=0.9, hspace=0.5, wspace=0.3
)
return axes
def hist_series(
self,
by=None,
ax=None,
grid=True,
xlabelsize=None,
xrot=None,
ylabelsize=None,
yrot=None,
figsize=None,
bins=10,
legend: bool = False,
**kwds,
):
import matplotlib.pyplot as plt
if legend and "label" in kwds:
raise ValueError("Cannot use both legend and label")
if by is None:
if kwds.get("layout", None) is not None:
raise ValueError("The 'layout' keyword is not supported when 'by' is None")
# hack until the plotting interface is a bit more unified
fig = kwds.pop(
"figure", plt.gcf() if plt.get_fignums() else plt.figure(figsize=figsize)
)
if figsize is not None and tuple(figsize) != tuple(fig.get_size_inches()):
fig.set_size_inches(*figsize, forward=True)
if ax is None:
ax = fig.gca()
elif ax.get_figure() != fig:
raise AssertionError("passed axis not bound to passed figure")
values = self.dropna().values
if legend:
kwds["label"] = self.name
ax.hist(values, bins=bins, **kwds)
if legend:
ax.legend()
ax.grid(grid)
axes = np.array([ax])
set_ticks_props(
axes, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot
)
else:
if "figure" in kwds:
raise ValueError(
"Cannot pass 'figure' when using the "
"'by' argument, since a new 'Figure' instance will be created"
)
axes = _grouped_hist(
self,
by=by,
ax=ax,
grid=grid,
figsize=figsize,
bins=bins,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
legend=legend,
**kwds,
)
if hasattr(axes, "ndim"):
if axes.ndim == 1 and len(axes) == 1:
return axes[0]
return axes
def hist_frame(
data,
column=None,
by=None,
grid=True,
xlabelsize=None,
xrot=None,
ylabelsize=None,
yrot=None,
ax=None,
sharex=False,
sharey=False,
figsize=None,
layout=None,
bins=10,
legend: bool = False,
**kwds,
):
if legend and "label" in kwds:
raise ValueError("Cannot use both legend and label")
if by is not None:
axes = _grouped_hist(
data,
column=column,
by=by,
ax=ax,
grid=grid,
figsize=figsize,
sharex=sharex,
sharey=sharey,
layout=layout,
bins=bins,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
legend=legend,
**kwds,
)
return axes
if column is not None:
if not isinstance(column, (list, np.ndarray, ABCIndex)):
column = [column]
data = data[column]
# GH32590
data = data.select_dtypes(
include=(np.number, "datetime64", "datetimetz"), exclude="timedelta"
)
naxes = len(data.columns)
if naxes == 0:
raise ValueError(
"hist method requires numerical or datetime columns, nothing to plot."
)
fig, axes = create_subplots(
naxes=naxes,
ax=ax,
squeeze=False,
sharex=sharex,
sharey=sharey,
figsize=figsize,
layout=layout,
)
_axes = flatten_axes(axes)
can_set_label = "label" not in kwds
for i, col in enumerate(data.columns):
ax = _axes[i]
if legend and can_set_label:
kwds["label"] = col
ax.hist(data[col].dropna().values, bins=bins, **kwds)
ax.set_title(col)
ax.grid(grid)
if legend:
ax.legend()
set_ticks_props(
axes, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot
)
fig.subplots_adjust(wspace=0.3, hspace=0.3)
return axes
| bsd-3-clause | 6,666,785,963,615,121,000 | 25.106754 | 88 | 0.548026 | false |
vedmathai/dateCorroborator | corroboratorPOC.py | 1 | 6019 | from subprocess import *
import re
import treetaggerwrapper
import sparqlQuerypy
from bs4 import BeautifulSoup
CONSTANTKEYVERBS="die, died, death, born, birth, sworn in" #Set of words that if present in the sentence, then don't discard the sentence, we are interested.
tagger = treetaggerwrapper.TreeTagger(TAGLANG = 'en', TAGDIR = '/home/vedu29/python/Gsoc/treetagger')
def jarWrapper(*args): # The helper function to use the jar file.
process = Popen(['java', '-jar']+list(args), stdout=PIPE, stderr=PIPE)
ret=[]
while process.poll() is None:
line = process.stdout.readline()
if line != '' and line.endswith('\n'):
ret.append(line[:-1])
stdout, stderr = process.communicate()
ret += stdout.split('\n')
if stderr != '':
ret += stderr.split('\n')
ret.remove('')
return ret
def returnProperty(word): #helper function to map the verb to a property. This will be small considering the number of date properties in DBpedia.
if word in ['death', 'die']: return 'http://dbpedia.org/ontology/deathDate'
if word in ['birth', 'born', 'bear']: return 'http://dbpedia.org/ontology/birthDate'
def normalizeAnnotations(sentence): # helper function to remove the references annotation, that appear as square brackets at the end of the sentence.
return re.sub(r'\[[0-9]*\]', ' ', sentence)
def sentenceSplitter(sentence): # helper regular function to correctly find end of sentences.
return re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', sentence)
def normaliseResult(result):
normRes=[]
for sentence in result:
sent=normalizeAnnotations(sentence)
normRes += sentenceSplitter(sent)
return normRes
def findAndGenericAnnotateTime(sentence): #Replacing heidelTime tagged Timex tags to a generic 'TIME' so that treeTagger can work its magic without hiccups.
return re.sub('<TIMEX3((?!<TIMEX3).)*</TIMEX3>', 'TIME', sentence)
def treetag(sentence, encoding = None): # TreeTagger helper function.
if encoding != None:
return treetaggerwrapper.make_tags(tagger.tag_text(unicode(sentence, "utf-8")))
else:
return treetaggerwrapper.make_tags(tagger.tag_text(sentence))
def returnKeyverbs(): #formats the key verbs above.
return '|'.join(verb for verb in CONSTANTKEYVERBS.split(', '))
def findSubVerbsTime(tagsentence): # The main helper function that figures out the subject in the sentence and finds the correct core verbs marked by an '*'
pos=[]
pos2=[]
seenSubject=False
seenVerb=False
lastfew=0
for i, tags in enumerate(tagsentence):
if tags.pos=='NP' or tags.pos=='PP':
pos += [tags.word]
seenSubject=True
lastfew+=1
if re.match(u'V..|V.', tags.pos) != None and seenSubject:
if not seenVerb:
subject = pos[-lastfew:]
pos2 += [[subject]]
if re.match(u'VB.', tags.pos) != None:
pos2[-1] += [tags.word]
else:
pos2[-1] += [tags.word+'*']
seenVerb=True
if re.match(u'V..|V.', tags.pos) == None and seenVerb:
seenVerb=False
seenSubject=False
lastfew=0
return pos2
def lemmatizeMainVerb(item):
for verb in item[1:]:
if '*' in verb:
return treetag(verb)[0].lemma
def listTimes(sentence): # uses beautiful soup to get the date information.
soup = BeautifulSoup(sentence, 'html.parser')
return soup.find_all('timex3')
def main(args):
result = jarWrapper(*args)
for sentence in normaliseResult(result):
sent=findAndGenericAnnotateTime(sentence)
m = re.match(r"(?P<first_part>.*) (?P<predicate>%s) (?P<second_part>.*)"%(returnKeyverbs()), sent) #scans the sentences for this pattern.
if m!=None:
left=treetag(m.group('first_part'), "utf-8")
middle=treetag(m.group('predicate'), "utf-8")
right=treetag(m.group('second_part'), "utf-8")
tagsentence = left + middle + right
if 'TIME' in m.group('first_part') or 'TIME' in m.group('second_part'): #Skip sentence if not date details.
subVerbTime = findSubVerbsTime(tagsentence)
for item in subVerbTime:
subject=" ".join(thing for thing in item[0])
if subject.lower() in ['he','she', 'it']:
subject=previousSubject
annotate = sparqlQuerypy.findAnnotation(subject)
annotatedSubject = annotate[0]['s']['value']
previousSubject = subject #heuristic that subject of this pronoun is in deed the previous subject, (not well thought through!)
verbLemma=lemmatizeMainVerb(item)
if verbLemma != None: prop=returnProperty(verbLemma)
timexList = listTimes(sentence)
i=0
while timexList[i]['type']not in ["DATE","TIME"]:
i+=1
time= timexList[i]['value']
date= sparqlQuerypy.findDate(annotatedSubject, prop)
if len(date) != 0:
date= date[0]['z']['value']
print '- - - - - - - - - - - - - - - - \n \n'
print sentence
print ' '
print 'The subject is:', subject
print 'The annotated subject is:', annotatedSubject
print 'The property is:', prop
print 'Date according to dbpedia:', date
print 'Date mined from the text:', time
print '\n \n'
if __name__=='__main__':
args = ['de.unihd.dbs.heideltime.standalone.jar', 'input']
result = jarWrapper(*args)
tagger = treetaggerwrapper.TreeTagger(TAGLANG = 'en', TAGDIR = '/home/vedu29/python/Gsoc/treetagger')
main(args)
| gpl-3.0 | -6,094,858,273,225,851,000 | 39.668919 | 157 | 0.587805 | false |
rsjohnco/rez | src/rez/build_process_.py | 1 | 12128 | from rez.packages_ import get_developer_package, iter_packages
from rez.exceptions import BuildProcessError, BuildContextResolveError, \
ReleaseHookCancellingError, RezError, ReleaseError, BuildError
from rez.resolved_context import ResolvedContext
from rez.release_hook import create_release_hooks
from rez.resolver import ResolverStatus
from rez.config import config
from rez.vendor.enum import Enum
import getpass
import os.path
def get_build_process_types():
"""Returns the available build process implementations."""
from rez.plugin_managers import plugin_manager
return plugin_manager.get_plugins('build_process')
def create_build_process(process_type, working_dir, build_system, vcs=None,
ensure_latest=True, verbose=False):
"""Create a `BuildProcess` instance."""
from rez.plugin_managers import plugin_manager
process_types = get_build_process_types()
if process_type not in process_type:
raise BuildProcessError("Unknown build process: %r" % process_type)
cls = plugin_manager.get_plugin_class('build_process', process_type)
return cls(working_dir,
build_system=build_system,
vcs=vcs,
ensure_latest=ensure_latest,
verbose=verbose)
class BuildType(Enum):
""" Enum to represent the type of build."""
local = 0
central = 1
class BuildProcess(object):
"""A BuildProcess builds and possibly releases a package.
A build process iterates over the variants of a package, creates the
correct build environment for each variant, builds that variant using a
build system (or possibly creates a script so the user can do that
independently), and then possibly releases the package with the nominated
VCS. This is an abstract base class, you should use a BuildProcess
subclass.
"""
@classmethod
def name(cls):
raise NotImplementedError
def __init__(self, working_dir, build_system, vcs=None, ensure_latest=True,
verbose=False):
"""Create a BuildProcess.
Args:
working_dir (str): Directory containing the package to build.
build_system (`BuildSystem`): Build system used to build the package.
vcs (`ReleaseVCS`): Version control system to use for the release
process. If None, the package will only be built, not released.
ensure_latest: If True, do not allow the release process to occur
if an newer versioned package is already released.
"""
self.verbose = verbose
self.working_dir = working_dir
self.build_system = build_system
self.vcs = vcs
self.ensure_latest = ensure_latest
if vcs and vcs.path != working_dir:
raise BuildProcessError(
"Build process was instantiated with a mismatched VCS instance")
self.debug_print = config.debug_printer("package_release")
self.package = get_developer_package(working_dir)
hook_names = self.package.config.release_hooks or []
self.hooks = create_release_hooks(hook_names, working_dir)
self.build_path = os.path.join(self.working_dir,
self.package.config.build_directory)
def build(self, install_path=None, clean=False, install=False, variants=None):
"""Perform the build process.
Iterates over the package's variants, resolves the environment for
each, and runs the build system within each resolved environment.
Args:
install_path (str): The package repository path to install the
package to, if installing. If None, defaults to
`config.local_packages_path`.
clean (bool): If True, clear any previous build first. Otherwise,
rebuild over the top of a previous build.
install (bool): If True, install the build.
variants (list of int): Indexes of variants to build, all if None.
Raises:
`BuildError`: If the build failed.
Returns:
int: Number of variants successfully built.
"""
raise NotImplementedError
def release(self, release_message=None, variants=None):
"""Perform the release process.
Iterates over the package's variants, building and installing each into
the release path determined by `config.release_packages_path`.
Args:
release_message (str): Message to associate with the release.
variants (list of int): Indexes of variants to release, all if None.
Raises:
`ReleaseError`: If the release failed.
Returns:
int: Number of variants successfully released.
"""
raise NotImplementedError
class BuildProcessHelper(BuildProcess):
"""A BuildProcess base class with some useful functionality.
"""
def visit_variants(self, func, variants=None, **kwargs):
"""Iterate over variants and call a function on each."""
if variants:
present_variants = range(self.package.num_variants)
invalid_variants = set(variants) - set(present_variants)
if invalid_variants:
raise BuildError(
"The package does not contain the variants: %s"
% ", ".join(str(x) for x in sorted(invalid_variants)))
# iterate over variants
results = []
num_visited = 0
for variant in self.package.iter_variants():
if variants and variant.index not in variants:
self._print_header("Skipping %s..." % self._n_of_m(variant))
continue
result = func(variant, **kwargs)
results.append(result)
num_visited += 1
return num_visited, results
def get_package_install_path(self, path):
"""Return the installation path for a package (where its payload goes).
"""
path_ = os.path.join(path, self.package.name)
if self.package.version:
path_ = os.path.join(path_, str(self.package.version))
return path_
def create_build_context(self, variant, build_type, build_path):
"""Create a context to build the variant within."""
request = variant.get_requires(build_requires=True,
private_build_requires=True)
requests_str = ' '.join(map(str, request))
self._print("Resolving build environment: %s", requests_str)
if build_type == BuildType.local:
packages_path = self.package.config.packages_path
else:
packages_path = self.package.config.nonlocal_packages_path
context = ResolvedContext(request,
package_paths=packages_path,
building=True)
if self.verbose:
context.print_info()
# save context before possible fail, so user can debug
rxt_filepath = os.path.join(build_path, "build.rxt")
context.save(rxt_filepath)
if context.status != ResolverStatus.solved:
raise BuildContextResolveError(context)
return context, rxt_filepath
def pre_release(self):
# test that the release path exists
release_path = self.package.config.release_packages_path
if not os.path.exists(release_path):
raise ReleaseError("Release path does not exist: %r" % release_path)
# test that the repo is in a state to release
assert self.vcs
self._print("Checking state of repository...")
self.vcs.validate_repostate()
it = iter_packages(self.package.name, paths=[release_path])
packages = sorted(it, key=lambda x: x.version, reverse=True)
# check UUID. This stops unrelated packages that happen to have the same
# name, being released as though they are the same package
if self.package.uuid and packages:
latest_package = packages[0]
if latest_package.uuid and latest_package.uuid != self.package.uuid:
raise ReleaseError(
"Cannot release - the packages are not the same (UUID mismatch)")
# test that a newer package version hasn't already been released
if self.ensure_latest:
for package in packages:
if package.version > self.package.version:
raise ReleaseError(
"Cannot release - a newer package version already "
"exists (%s)" % package.uri)
else:
break
def post_release(self, release_message=None):
# format tag
release_settings = self.package.config.plugins.release_vcs
try:
tag_name = self.package.format(release_settings.tag_name)
except Exception as e:
raise ReleaseError("Error formatting release tag name: %s" % str(e))
if not tag_name:
tag_name = "unversioned"
# write a tag for the new release into the vcs
assert self.vcs
self.vcs.create_release_tag(tag_name=tag_name, message=release_message)
def run_hooks(self, hook_event, **kwargs):
for hook in self.hooks:
self.debug_print("Running %s hook '%s'...",
hook_event.label, hook.name())
try:
func = getattr(hook, hook_event.func_name)
func(user=getpass.getuser(), **kwargs)
except ReleaseHookCancellingError as e:
raise ReleaseError(
"%s cancelled by %s hook '%s': %s:\n%s"
% (hook_event.noun, hook_event.label, hook.name(),
e.__class__.__name__, str(e)))
except RezError:
self.debug_print(
"Error in %s hook '%s': %s:\n%s"
% (hook_event.label, hook.name(),
e.__class__.__name__, str(e)))
def get_previous_release(self):
release_path = self.package.config.release_packages_path
it = iter_packages(self.package.name, paths=[release_path])
packages = sorted(it, key=lambda x: x.version, reverse=True)
for package in packages:
if package.version < self.package.version:
return package
return None
def get_release_data(self):
"""Get release data for this release.
Returns:
dict.
"""
previous_package = self.get_previous_release()
if previous_package:
previous_version = previous_package.version
previous_revision = previous_package.revision
else:
previous_version = None
previous_revision = None
assert self.vcs
revision = self.vcs.get_current_revision()
changelog = self.vcs.get_changelog(previous_revision)
# truncate changelog - very large changelogs can cause package load
# times to be very high, we don't want that
maxlen = config.max_package_changelog_chars
if maxlen and changelog and len(changelog) > maxlen + 3:
changelog = changelog[:maxlen] + "..."
return dict(vcs=self.vcs.name(),
revision=revision,
changelog=changelog,
previous_version=previous_version,
previous_revision=previous_revision)
def _print(self, txt, *nargs):
if self.verbose:
if nargs:
txt = txt % nargs
print txt
def _print_header(self, txt, n=1):
self._print('')
if n <= 1:
self._print('-' * 80)
self._print(txt)
self._print('-' * 80)
else:
self._print(txt)
self._print('-' * len(txt))
def _n_of_m(self, variant):
num_variants = max(self.package.num_variants, 1)
index = (variant.index or 0) + 1
return "%d/%d" % (index, num_variants)
| gpl-3.0 | 1,182,070,438,105,260,300 | 37.996785 | 85 | 0.601995 | false |
fluggo/Canvas | fluggo/editor/model/connectors.py | 1 | 9949 | # This file is part of the Fluggo Media Library for high-quality
# video and audio processing.
#
# Copyright 2012 Brian J. Crowell <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from fluggo import logging
from fluggo.editor import plugins
from fluggo.editor.model import sources
_log = logging.getLogger(__name__)
class VideoSourceRefConnector(plugins.VideoStream):
'''Resolves a reference into a video stream.
This class publishes alerts for any error that happens when finding the
stream.'''
def __init__(self, asset_list, ref, model_obj=None):
plugins.VideoStream.__init__(self)
self.asset_list = asset_list
self.ref = ref
self.model_obj = model_obj
self.asset = None
self.source = None
self.stream = None
self._error = None
self.connect()
# TODO: Handle sources appearing, disappearing, and going online/offline
# TODO: Also potentially handle transforms
def set_ref(self, ref):
self.ref = ref
self.connect()
def _clear(self):
self.set_base_filter(None, new_range=(None, None))
self.set_format(None)
def connect(self):
try:
if self.asset:
self.asset = None
if self.source:
self.unfollow_alerts(self.source)
self.source = None
if self.stream:
self.unfollow_alerts(self.stream)
self.stream = None
if self._error:
self.hide_alert(self._error)
self._error = None
if not self.ref:
self._clear()
return
# TODO: Handle ad-hoc sources
if not isinstance(self.ref, sources.AssetStreamRef):
self._clear()
return
# Handle missing sources, failure to bring online, and missing streams
try:
self.asset = self.asset_list[self.ref.asset_path]
except KeyError:
self._clear()
self._error = plugins.Alert('Reference refers to asset "' + self.ref.asset_path + '", which doesn\'t exist.',
model_obj=self.model_obj, icon=plugins.AlertIcon.Error)
self.show_alert(self._error)
return
if not self.asset.is_source:
self._clear()
self._error = plugins.Alert('Reference refers to asset "' + self.ref.asset_path + '" which is not a video source.',
model_obj=self.model_obj, icon=plugins.AlertIcon.Error)
self.show_alert(self._error)
try:
self.source = self.asset.get_source()
except:
self._clear()
self._error = plugins.Alert('Error while getting source from asset',
model_obj=self.model_obj, icon=plugins.AlertIcon.Error)
self.show_alert(self._error)
return
self.follow_alerts(self.source)
if self.source.offline:
try:
self.source.bring_online()
except:
self._clear()
self._error = plugins.Alert('Error while bringing source online',
model_obj=self.model_obj, icon=plugins.AlertIcon.Error, exc_info=True)
self.show_alert(self._error)
return
if self.source.offline:
self._clear()
if not len(self.source.alerts):
self._error = plugins.Alert('Unable to bring source "' + self.ref.asset_path + '" online.',
model_obj=self.model_obj, icon=plugins.AlertIcon.Error)
self.show_alert(self._error)
return
try:
self.stream = self.source.get_stream(self.ref.stream)
except KeyError:
self._clear()
self._error = plugins.Alert('Can\'t find stream "' + self.ref.stream + '" in source "' + self.ref.asset_path + '".',
model_obj=self.model_obj, icon=plugins.AlertIcon.Error)
self.show_alert(self._error)
return
self.follow_alerts(self.stream)
self.set_format(None)
self.set_base_filter(self.stream, new_range=self.stream.defined_range)
self.set_format(self.stream.format)
except:
_log.debug('Error while resolving reference', exc_info=True)
self._clear()
self._error = plugins.Alert('Error while resolving reference', model_obj=self.model_obj, icon=plugins.AlertIcon.Error, exc_info=True)
self.show_alert(self._error)
class AudioSourceRefConnector(plugins.AudioStream):
# Really, this has almost the exact same behavior as the above; maybe
# combine the two
'''Resolves a reference into an audio stream.
This class publishes alerts for any error that happens when finding the
stream.'''
def __init__(self, asset_list, ref, model_obj=None):
plugins.AudioStream.__init__(self)
self.asset_list = asset_list
self.ref = ref
self.model_obj = model_obj
self.asset = None
self.stream = None
self._error = None
self.connect()
# TODO: Handle sources appearing, disappearing, and going online/offline
# TODO: Also potentially handle transforms
def set_ref(self, ref):
self.ref = ref
self.connect()
def _clear(self):
self.set_base_filter(None, new_range=(None, None))
self.set_format(None)
def connect(self):
try:
if self.asset:
self.asset = None
if self.source:
self.unfollow_alerts(self.source)
self.source = None
if self.stream:
self.unfollow_alerts(self.stream)
self.stream = None
if self._error:
self.hide_alert(self._error)
self._error = None
if not self.ref:
self._clear()
return
# TODO: Handle ad-hoc sources
if not isinstance(self.ref, sources.AssetStreamRef):
self._clear()
return
# Handle missing sources, failure to bring online, and missing streams
try:
self.asset = self.asset_list[self.ref.asset_path]
except KeyError:
self._clear()
self._error = plugins.Alert('Reference refers to asset "' + self.ref.asset_path + '", which doesn\'t exist.',
model_obj=self.model_obj, icon=plugins.AlertIcon.Error)
self.show_alert(self._error)
return
if not self.asset.is_source:
self._clear()
self._error = plugins.Alert('Reference refers to asset "' + self.ref.asset_path + '" which is not an audio source.',
model_obj=self.model_obj, icon=plugins.AlertIcon.Error)
self.show_alert(self._error)
try:
self.source = self.asset.get_source()
except:
self._clear()
self._error = plugins.Alert('Error while getting source from asset',
model_obj=self.model_obj, icon=plugins.AlertIcon.Error)
self.show_alert(self._error)
return
self.follow_alerts(self.source)
if self.source.offline:
try:
self.source.bring_online()
except:
self._clear()
self._error = plugins.Alert('Error while bringing source online',
model_obj=self.model_obj, icon=plugins.AlertIcon.Error, exc_info=True)
self.show_alert(self._error)
return
if self.source.offline:
self._clear()
if not len(self.source.alerts):
self._error = plugins.Alert('Unable to bring source "' + self.ref.asset_path + '" online.',
model_obj=self.model_obj, icon=plugins.AlertIcon.Error)
self.show_alert(self._error)
return
try:
self.stream = self.source.get_stream(self.ref.stream)
except KeyError:
self._clear()
self._error = plugins.Alert('Can\'t find stream "' + self.ref.stream + '" in source "' + self.ref.asset_path + '".',
model_obj=self.model_obj, icon=plugins.AlertIcon.Error)
self.show_alert(self._error)
return
self.follow_alerts(self.stream)
self.set_format(None)
self.set_base_filter(self.stream, new_range=self.stream.defined_range)
self.set_format(self.stream.format)
except:
_log.debug('Error while resolving reference', exc_info=True)
self._clear()
self._error = plugins.Alert('Error while resolving reference', model_obj=self.model_obj, icon=plugins.AlertIcon.Error, exc_info=True)
self.show_alert(self._error)
| gpl-3.0 | -7,344,997,437,159,359,000 | 35.577206 | 145 | 0.555232 | false |
diego-d5000/MisValesMd | env/lib/python2.7/site-packages/django/utils/version.py | 1 | 2539 | from __future__ import unicode_literals
import datetime
import os
import subprocess
from django.utils.lru_cache import lru_cache
def get_version(version=None):
"Returns a PEP 386-compliant version number from VERSION."
version = get_complete_version(version)
# Now build the two parts of the version number:
# major = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
major = get_major_version(version)
sub = ''
if version[3] == 'alpha' and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.dev%s' % git_changeset
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[version[3]] + str(version[4])
return str(major + sub)
def get_major_version(version=None):
"Returns major version from VERSION."
version = get_complete_version(version)
parts = 2 if version[2] == 0 else 3
major = '.'.join(str(x) for x in version[:parts])
return major
def get_complete_version(version=None):
"""Returns a tuple of the django version. If version argument is non-empty,
then checks for correctness of the tuple provided.
"""
if version is None:
from django import VERSION as version
else:
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
return version
def get_docs_version(version=None):
version = get_complete_version(version)
if version[3] != 'final':
return 'dev'
else:
return '%d.%d' % version[:2]
@lru_cache()
def get_git_changeset():
"""Returns a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
"""
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_log = subprocess.Popen('git log --pretty=format:%ct --quiet -1 HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir, universal_newlines=True)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError:
return None
return timestamp.strftime('%Y%m%d%H%M%S')
| mit | -5,298,214,203,842,086,000 | 29.7375 | 79 | 0.624655 | false |
jurcicek/extended-hidden-vector-state-parser | semantics-4/batch/cued_tune_scale.py | 1 | 2845 | # Script name : tune_scale.py
# Semantics version: semantics-4
# Description : Tento skript umoznuje ladeni scaling faktoru
# SCALE_CONCEPT12 a SCALE_PUSHPOP. Vychozi hodnoty jsou
# brany ze souboru settings, vychozi rozsah je +-0.6 a krok
# 0.2. Pro otestovani predat na prikazove radce retezec
# "test", pro postupne zjemnovani mrizky za kazde zjemneni
# pridat "refine", neni-li treba spoustet proceduru all,
# predejte "noall". Skript zapise do souboru
# 'tune_scale.csv' hodnoty kriterii pro dany beh.
import os
from svc.utils import linrange, linspace
if not 'noall' in argv and 'test' not in argv:
all(moveResults=False)
def tune_scale(**env):
eps = 1e-6
if env['SCALE_CONCEPT12'] < 1.0-eps or env['SCALE_PUSHPOP'] < 1.0-eps:
logger.info("Scaling factor is less than 1.0")
return 0
if 'test' not in argv:
scale(env=env)
res = decodeHldt()
# return res['cAcc'], res['uCorr']
return res['sActAcc'], res['iF']
else:
# V pripade testovani je ztratova funkce maximalni (rovna 1) v bodech
# 1.83, 1.97
global SCALE_CONCEPT12, SCALE_PUSHPOP
return 1 - (env['SCALE_CONCEPT12']-1.83)**2 \
- (env['SCALE_PUSHPOP']-1.97)**2
n_iters = argv.count('refine')+1
SCALE_PUSHPOP = float(env['SCALE_PUSHPOP'])
SCALE_PUSHPOP_RANGE = +-0.6
SCALE_PUSHPOP_STEP = 0.2
SCALE_CONCEPT12 = float(env['SCALE_CONCEPT12'])
SCALE_CONCEPT12_RANGE = +-0.6
SCALE_CONCEPT12_STEP = 0.2
for i in range(n_iters):
logger.info("_" * 80)
logger.info('')
logger.info("Setting tuning steps:")
logger.info("=====================")
logger.info(" SCALE_CONCEPT12_STEP: %.2f" % SCALE_CONCEPT12_STEP)
logger.info(" SCALE_PUSHPOP_STEP : %.2f" % SCALE_PUSHPOP_STEP)
logger.info("_" * 80)
logger.info('')
logger.info('')
params = {
'SCALE_PUSHPOP': linrange(SCALE_PUSHPOP, SCALE_PUSHPOP_RANGE, SCALE_PUSHPOP_STEP),
'SCALE_CONCEPT12': linrange(SCALE_CONCEPT12, SCALE_CONCEPT12_RANGE, SCALE_CONCEPT12_STEP),
}
params = Grid.cartezianGrid(params)
value, tuned_params = params.tune(tune_scale, logger=logger)
if i == 0:
fn = 'tune_cued_scale.csv'
else:
fn = 'tune_cued_scale%d.csv' % (i+1, )
params.writeCSV(os.path.join(env['BUILD_DIR'], fn))
SCALE_CONCEPT12 = tuned_params['SCALE_CONCEPT12']
SCALE_CONCEPT12_RANGE = +-SCALE_CONCEPT12_STEP
SCALE_CONCEPT12_STEP /= 2
SCALE_PUSHPOP = tuned_params['SCALE_PUSHPOP']
SCALE_PUSHPOP_RANGE = +-SCALE_PUSHPOP_STEP
SCALE_PUSHPOP_STEP /= 2
env.update(tuned_params)
if 'test' not in argv:
scale()
decodeHldt()
decodeTst()
moveResults()
| gpl-2.0 | -8,246,472,734,814,509,000 | 32.470588 | 98 | 0.611599 | false |
dorianpula/learn-django | charleston/models.py | 1 | 5628 | from datetime import datetime
from django.core.urlresolvers import reverse, reverse_lazy
from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
from django.utils.encoding import smart_str
from tagging.fields import TagField
from markdown import markdown
class Category(models.Model):
"""Categories of stories."""
title = models.CharField(max_length=250, help_text="Maximum 250 characters.")
slug = models.SlugField(unique=True,
help_text="Suggested value automatically generated from title. Must be unique.")
description = models.TextField()
class Meta:
ordering = ['title']
verbose_name_plural = "Categories"
def __unicode__(self):
return self.title
def get_absolute_url(self):
return "/categories/{}/".format(self.slug)
def live_entry_set(self):
return self.entry_set.filter(status=Entry.LIVE_STATUS)
class LiveEntryManager(models.Manager):
"""Gets only the entries that have a live status."""
def get_queryset(self):
return super(LiveEntryManager, self).get_queryset().filter(status=self.model.LIVE_STATUS)
class Entry(models.Model):
"""Entry or blog post model."""
title = models.CharField(max_length=250)
excerpt = models.TextField(blank=True)
body = models.TextField()
pub_date = models.DateTimeField(default=datetime.now)
slug = models.SlugField(unique_for_date='pub_date')
# Authors, comments and the like.
author = models.ForeignKey(User)
enable_comments = models.BooleanField(default=True)
featured = models.BooleanField(default=False)
# Status to enable different types of entries
LIVE_STATUS = 1
DRAFT_STATUS = 2
HIDDEN_STATUS = 3
STATUS_CHOICES = (
(LIVE_STATUS, 'Live'),
(DRAFT_STATUS, 'Draft'),
(HIDDEN_STATUS, 'Hidden'),
)
status = models.IntegerField(choices=STATUS_CHOICES, default=LIVE_STATUS)
# Now for the categories and tags
categories = models.ManyToManyField(Category)
tags = TagField()
# Separate HTML rendered entries to allow for fast loading. (Space vs. processor tradeoff)
excerpt_html = models.TextField(editable=False, blank=True)
body_html = models.TextField(editable=False, blank=True)
# Hook in the nice manager we've written above.
live = LiveEntryManager()
objects = models.Manager()
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
"""Recreate the HTML from Markdown before saving the entry."""
self.body_html = markdown(self.body)
if self.excerpt:
self.excerpt_html = markdown(self.excerpt)
super(Entry, self).save(force_insert, force_update, using, update_fields)
def __unicode__(self):
return self.title
def get_absolute_url(self):
"""Gets the absolute URL for an entry."""
return reverse("charleston_entry_detail",
kwargs={"year": self.pub_date.strftime("%Y"),
"month": self.pub_date.strftime("%b").lower(),
"day": self.pub_date.strftime("%d"),
"slug": self.slug})
class Meta:
verbose_name_plural = "Entries"
ordering = ["-pub_date"]
class Link(models.Model):
"""Links model hyperlinks to various URLs both external and internal."""
title = models.CharField(max_length=250)
description = models.TextField(blank=True)
description_html = models.TextField(blank=True)
url = models.URLField(unique=True)
posted_by = models.ForeignKey(User)
pub_date = models.DateTimeField(default=datetime.now)
slug = models.SlugField(unique_for_date='pub_date')
tags = TagField()
# Allow for commenting and posting to external sites
enable_comments = models.BooleanField(default=True)
post_elsewhere = models.BooleanField(default=True)
# Extra link metadata
via_name = models.CharField('Via', max_length=250, blank=True,
help_text='The name of the person whose site you spotted the link on. Optional.')
via_url = models.URLField('Via URL', blank=True,
help_text='The URL of the site where you spotted the link. Optional.')
class Meta:
ordering = ['-pub_date']
def __unicode__(self):
return self.title
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
"""
Saves a link. Updates the rendered description HTML and make sure the link gets posted elsewhere.
"""
if self.description:
self.description_html = markdown(self.description)
# Update delicious
if not self.id and self.post_elsewhere:
import pydelicious
pydelicious.add(settings.DELICIOUS_USER, settings.DELICIOUS_PASSWORD,
smart_str(self.url), smart_str(self.title), smart_str(self.tags))
super(Link, self).save(force_insert=force_insert, force_update=force_update, using=using,
update_fields=update_fields)
def get_absolute_url(self):
"""Gets the absolute URL of the link."""
return reverse("charleston_link_detail",
kwargs={"year": self.pub_date.strftime("%Y"),
"month": self.pub_date.strftime("%b").lower(),
"day": self.pub_date.strftime("%d"),
"slug": self.slug})
| bsd-2-clause | 5,318,329,303,083,099,000 | 33.740741 | 114 | 0.63344 | false |
The-Compiler/qutebrowser | qutebrowser/completion/models/listcategory.py | 1 | 3679 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2017-2020 Ryan Roden-Corrent (rcorre) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Completion category that uses a list of tuples as a data source."""
import re
from typing import Iterable, Tuple
from PyQt5.QtCore import Qt, QSortFilterProxyModel, QRegExp
from PyQt5.QtGui import QStandardItem, QStandardItemModel
from PyQt5.QtWidgets import QWidget
from qutebrowser.completion.models import util
from qutebrowser.utils import qtutils, log
class ListCategory(QSortFilterProxyModel):
"""Expose a list of items as a category for the CompletionModel."""
def __init__(self,
name: str,
items: Iterable[Tuple[str, ...]],
sort: bool = True,
delete_func: util.DeleteFuncType = None,
parent: QWidget = None):
super().__init__(parent)
self.name = name
self.srcmodel = QStandardItemModel(parent=self)
self._pattern = ''
# ListCategory filters all columns
self.columns_to_filter = [0, 1, 2]
self.setFilterKeyColumn(-1)
for item in items:
self.srcmodel.appendRow([QStandardItem(x) for x in item])
self.setSourceModel(self.srcmodel)
self.delete_func = delete_func
self._sort = sort
def set_pattern(self, val):
"""Setter for pattern.
Args:
val: The value to set.
"""
self._pattern = val
val = re.sub(r' +', r' ', val) # See #1919
val = re.escape(val)
val = val.replace(r'\ ', '.*')
rx = QRegExp(val, Qt.CaseInsensitive)
self.setFilterRegExp(rx)
self.invalidate()
sortcol = 0
self.sort(sortcol)
def lessThan(self, lindex, rindex):
"""Custom sorting implementation.
Prefers all items which start with self._pattern. Other than that, uses
normal Python string sorting.
Args:
lindex: The QModelIndex of the left item (*left* < right)
rindex: The QModelIndex of the right item (left < *right*)
Return:
True if left < right, else False
"""
qtutils.ensure_valid(lindex)
qtutils.ensure_valid(rindex)
left = self.srcmodel.data(lindex)
right = self.srcmodel.data(rindex)
if left is None or right is None: # pragma: no cover
log.completion.warning("Got unexpected None value, "
"left={!r} right={!r} "
"lindex={!r} rindex={!r}"
.format(left, right, lindex, rindex))
return False
leftstart = left.startswith(self._pattern)
rightstart = right.startswith(self._pattern)
if leftstart and not rightstart:
return True
elif rightstart and not leftstart:
return False
elif self._sort:
return left < right
else:
return False
| gpl-3.0 | 2,326,568,685,677,522,000 | 33.064815 | 79 | 0.613754 | false |
abhipec/pec | emailApp/emailApp/migrations/0001_squashed_0009_dashboard.py | 1 | 2395 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
replaces = [(b'emailApp', '0001_initial'), (b'emailApp', '0002_email_textcleaned'), (b'emailApp', '0003_email_removedcontent'), (b'emailApp', '0004_auto_20150329_0757'), (b'emailApp', '0005_auto_20150329_1216'), (b'emailApp', '0006_auto_20150329_1251'), (b'emailApp', '0007_auto_20150329_1252'), (b'emailApp', '0008_auto_20150403_1346'), (b'emailApp', '0009_dashboard')]
dependencies = [
]
operations = [
migrations.CreateModel(
name='Email',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('messageId', models.SlugField(unique=True, max_length=100)),
('sender', models.EmailField(max_length=254)),
('timeStamp', models.DateTimeField()),
('subject', models.CharField(max_length=998, null=True)),
('textPlain', models.TextField(null=True, blank=True)),
('textHtml', models.TextField(null=True, blank=True)),
('removedContentHtml', models.TextField(null=True, blank=True)),
('removedContentPlain', models.TextField(null=True, blank=True)),
('textCleanHtml', models.TextField(null=True, blank=True)),
('textCleanPlain', models.TextField(null=True, blank=True)),
('category', models.CharField(default=b'', max_length=15, choices=[(b'NULL', b'Not categorized'), (b'promotional', b'Promotional'), (b'spam', b'Spam'), (b'human', b'Human'), (b'notification', b'Notification'), (b'others', b'Others')])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Dashboard',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('data', models.TextField(null=True, blank=True)),
('timeStamp', models.DateTimeField()),
('validTill', models.DateTimeField()),
('source', models.OneToOneField(to='emailApp.Email')),
],
options={
},
bases=(models.Model,),
),
]
| mit | 4,397,749,009,530,095,600 | 48.895833 | 374 | 0.56952 | false |
mwalercz/virus-total-helper | server/dispatcher.py | 1 | 1180 | import inspect
from server.customhttp import HTTPResponse
class NoSuchUrl(Exception):
def __init__(self, url):
self.url = url
class Dispatcher:
def __init__(self, urls, scheduler, deque):
self.deque = deque
self.urls = urls
self.scheduler = scheduler
def dispatch(self, request):
fun = self._pick_handler_function(request.command, request.path)
return self._execute_handler_function(request, fun)
def _pick_handler_function(self, command, path):
key = command + path
if key in self.urls:
return self.urls[key]
else:
raise NoSuchUrl(key)
def _execute_handler_function(self, request, fun):
parameter_number = len(inspect.signature(fun).parameters)
if parameter_number == 2:
request.scheduler = self.scheduler
request.deque = self.deque
return fun(request, HTTPResponse())
else:
raise ArgumentLookupError(fun)
class ArgumentLookupError(Exception):
def __init__(self, fun):
self.fun = fun
def __str__(self):
return repr('cant find proper params in' + self.fun)
| bsd-3-clause | 8,067,938,445,552,639,000 | 26.44186 | 72 | 0.616949 | false |
blitzmann/Pyfa | gui/builtinContextMenus/spoolUp.py | 1 | 3085 | # noinspection PyPackageRequirements
import wx
import eos.config
import gui.mainFrame
from eos.utils.spoolSupport import SpoolType, SpoolOptions
from gui import globalEvents as GE
from gui.contextMenu import ContextMenu
from service.settings import ContextMenuSettings
from service.fit import Fit
class SpoolUp(ContextMenu):
def __init__(self):
self.mainFrame = gui.mainFrame.MainFrame.getInstance()
self.settings = ContextMenuSettings.getInstance()
self.cycleMap = {}
self.resetId = None
def display(self, srcContext, selection):
if not self.settings.get('spoolup'):
return False
if srcContext not in ("fittingModule") or self.mainFrame.getActiveFit() is None:
return False
self.mod = selection[0]
return self.mod.item.group.name in ("Precursor Weapon", "Mutadaptive Remote Armor Repairer")
def getText(self, itmContext, selection):
return "Spoolup Cycles"
def getSubMenu(self, context, selection, rootMenu, i, pitem):
m = wx.Menu()
if "wxMSW" in wx.PlatformInfo:
bindmenu = rootMenu
else:
bindmenu = m
isNotDefault = self.mod.spoolType is not None and self.mod.spoolAmount is not None
cycleDefault = self.mod.getSpoolData(spoolOptions=SpoolOptions(SpoolType.SCALE, eos.config.settings['globalDefaultSpoolupPercentage'], True))[0]
cycleCurrent = self.mod.getSpoolData(spoolOptions=SpoolOptions(SpoolType.SCALE, eos.config.settings['globalDefaultSpoolupPercentage'], False))[0]
cycleMin = self.mod.getSpoolData(spoolOptions=SpoolOptions(SpoolType.SCALE, 0, True))[0]
cycleMax = self.mod.getSpoolData(spoolOptions=SpoolOptions(SpoolType.SCALE, 1, True))[0]
for cycle in range(cycleMin, cycleMax + 1):
menuId = ContextMenu.nextID()
# Show default only for current value and when not overriden
if not isNotDefault and cycle == cycleDefault:
text = "{} (default)".format(cycle)
else:
text = "{}".format(cycle)
item = wx.MenuItem(m, menuId, text, kind=wx.ITEM_CHECK)
bindmenu.Bind(wx.EVT_MENU, self.handleSpoolChange, item)
m.Append(item)
item.Check(isNotDefault and cycle == cycleCurrent)
self.cycleMap[menuId] = cycle
self.resetId = ContextMenu.nextID()
item = wx.MenuItem(m, self.resetId, "Reset")
bindmenu.Bind(wx.EVT_MENU, self.handleSpoolChange, item)
m.Append(item)
return m
def handleSpoolChange(self, event):
if event.Id == self.resetId:
self.mod.spoolType = None
self.mod.spoolAmount = None
elif event.Id in self.cycleMap:
cycles = self.cycleMap[event.Id]
self.mod.spoolType = SpoolType.CYCLES
self.mod.spoolAmount = cycles
fitID = self.mainFrame.getActiveFit()
Fit.getInstance().recalc(fitID)
wx.PostEvent(self.mainFrame, GE.FitChanged(fitID=fitID))
SpoolUp.register()
| gpl-3.0 | -3,779,389,863,283,970,600 | 36.621951 | 153 | 0.65705 | false |
SpectoLabs/hoverpy | hoverpy/decorators.py | 1 | 1513 | from .hp import HoverPy
class capture(object):
def __init__(self, dbpath="requests.db", capture=True, **kwargs):
self.dbpath = dbpath
self.capture = capture
self.kwargs = kwargs
def __call__(self, f):
def wrapped_f(*args):
with HoverPy(capture=self.capture, dbpath=self.dbpath, **self.kwargs):
return f(*args)
return wrapped_f
class simulate(object):
def __init__(self, dbpath="requests.db", capture=False, **kwargs):
self.dbpath = dbpath
self.capture = capture
self.kwargs = kwargs
def __call__(self, f):
def wrapped_f(*args):
with HoverPy(capture=self.capture, dbpath=self.dbpath, **self.kwargs):
return f(*args)
return wrapped_f
class spy(object):
def __init__(self, dbpath="requests.db", capture=False, **kwargs):
self.dbpath = dbpath
self.capture = capture
self.kwargs = kwargs
def __call__(self, f):
def wrapped_f(*args):
with HoverPy(spy=True, capture=self.capture, dbpath=self.dbpath, **self.kwargs):
return f(*args)
return wrapped_f
class modify(object):
def __init__(self, middleware, **kwargs):
self.middleware = middleware
self.kwargs = kwargs
def __call__(self, f):
def wrapped_f(*args):
with HoverPy(modify=True, middleware=self.middleware, **self.kwargs):
return f(*args)
return wrapped_f
| apache-2.0 | 4,016,808,002,218,339,300 | 27.54717 | 92 | 0.576999 | false |
christ2go/pycal | src/tree/treeprint.py | 1 | 1219 | def write(x):
print(x,end="")
class treeprinter:
"""
Prints an abstract syntax tree
as a XML-Document
tree has to be instaneceof node
(tree is the AST to be printed)
"""
def __init__(self,tree):
self.tree = tree
def printt(self):
if not self.tree:
raise "Tree Exception - Tree not initialized"
return False
self.recprint(self.tree)
def writeattr(self,node):
for key in node.attr:
write(" "+key+"='"+str(node.attr[key])+"'")
def recprint(self,node,ident=0):
if node != None:
delim = "\t"
write(delim*ident)
write("<")
write(node.name.replace(" ","_"))
write("")
self.writeattr(node)
if len(node.children) != 0:
write(">")
write("\n")
for item in node.children:
self.recprint(item,ident+1)
# write("\n")
write(delim*ident)
write("</"+node.name.replace(" ","_")+">")
write("\n")
else:
write(" />"+"\n")
| apache-2.0 | -6,523,374,612,603,958,000 | 28.731707 | 58 | 0.437244 | false |
aksampath123/Python_Development_Projects | Dungeon_game/character.py | 1 | 1226 | import random
from combat import Combat
class Character(Combat):
attack_limit = 10
experience = 0
base_hit_points = 10
def attack(self):
roll = random.randint(1, self.attack_limit)
if self.weapon == 'sword':
roll += 1
elif self.weapon == 'axe':
roll += 2
elif self.weapon == 'bow':
roll += 0
return roll > 4
def get_weapon(self):
weapon_choice = input("Enter weapon of choice, [S]word, [A]xe or [B]ow: ").lower()
if weapon_choice in 'sab':
if weapon_choice == 's':
return 'sword'
elif weapon_choice == 'a':
return 'axe'
else:
return 'bow'
else:
return self.get_weapon()
def __init__(self, **kwargs):
self.name = input("Enter name: ")
self.weapon = self.get_weapon()
self.hit_points = self.base_hit_points
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
return "Character Name: {}, Weapon: {}, HP: {}, XP: {}".format(self.name, self.weapon, self.hit_points, self.experience)
def rest(self):
if self.hit_points < self.base_hit_points:
self.hit_points +=1
def level_up(self):
return self.experience >= 5
| unlicense | -2,503,536,181,508,784,600 | 23.039216 | 124 | 0.579119 | false |
hn0/hzzapp | lib/config.py | 1 | 2968 | #_**_ coding: utf-8 _*_
"""
Module for parsing configuration files
Application will use hard coded configuration file for its behaviur
hzzapp.conf in root directory should be written according to RFC 8822 style
Created 10. Dec 2014
Author: Hrvoje Novosel<[email protected]>
"""
import ConfigParser
#classes for exceptions
class SectionNotFound(Exception):
def __init__(self, sectionName):
self.value = "Section "+sectionName+" cannot be found in configuration file"
def __str__(self):
return repr(self.value)
#exception risen when specific keyword for given configuration value was not found
class KeyNotFound(Exception):
def __init__(self, sectionName, keyName):
self.value = "Key "+ keyName +" (under section "+ sectionName +") cannot be found in configuration file"
def __str__(self):
return repr(self.value)
#exception risen when missmatch between requested type and type of configuration value occur, not implemented yet
class KeyIncorrectType(Exception):
def __init__(self, key, expectedType, realType):
self.value = "Key "+ key + " has type "+ realType +" but "+ expectedType +" was expected"
def __str__(self):
return repr(self.value)
class config:
def __init__(self, configfile):
self.cfg = ConfigParser.RawConfigParser()
self.cfg.read(configfile)
def Read(self, section, **keys):
#special case debug, for simplicity define here, maybe it should go to configuration file
if section.upper() in ['DEBUG', 'DEVELOPMENT']:
return False
elif section.upper() == 'SENDMAIL':
return True
#wrap retrive methods, return default value if passed, otherwise raise exception
try:
if len(keys) == 1:
item = keys.popitem()
return self.getValue(section, item[0], type(item[1]))
else:
for k in keys:
keys[k] = self.getValue(section, k, type(keys[k]))
return keys
except (KeyNotFound):
if len(keys) == 0 and item[1] != None:
return item[1]
elif len(keys) > 0 and True not in [keys[x] == None for x in keys]:
return keys
#by default rerise exception
raise
def getValue(self, section, name, type=type(" ")):
if not self.cfg.has_section(section):
raise SectionNotFound(section)
if not self.cfg.has_option(section, name):
raise KeyNotFound(section, name)
#return typed response
if type is bool:
return self.cfg.getboolean(section, name)
elif type is int:
return self.cfg.getint(section, name)
elif type is float:
return self.cfg.getfloat(section, name)
else:
return self.cfg.get(section, name)
| gpl-2.0 | 2,611,523,763,958,312,400 | 32.738636 | 113 | 0.600404 | false |
mablae/weblate | weblate/trans/south_migrations/0017_auto__add_field_change_action.py | 1 | 14562 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <[email protected]>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Change.action'
db.add_column('trans_change', 'action',
self.gf('django.db.models.fields.IntegerField')(default=2),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Change.action'
db.delete_column('trans_change', 'action')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lang.language': {
'Meta': {'ordering': "['name']", 'object_name': 'Language'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'nplurals': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'pluralequation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'trans.change': {
'Meta': {'ordering': "['-timestamp']", 'object_name': 'Change'},
'action': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Unit']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'trans.check': {
'Meta': {'object_name': 'Check'},
'check': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignore': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lang.Language']", 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Project']"})
},
'trans.comment': {
'Meta': {'ordering': "['timestamp']", 'object_name': 'Comment'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lang.Language']", 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Project']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'trans.dictionary': {
'Meta': {'ordering': "['source']", 'object_name': 'Dictionary'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lang.Language']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Project']"}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'trans.indexupdate': {
'Meta': {'object_name': 'IndexUpdate'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Unit']"})
},
'trans.project': {
'Meta': {'ordering': "['name']", 'object_name': 'Project'},
'commit_message': ('django.db.models.fields.CharField', [], {'default': "'Translated using Weblate.'", 'max_length': '200'}),
'committer_email': ('django.db.models.fields.EmailField', [], {'default': "'[email protected]'", 'max_length': '75'}),
'committer_name': ('django.db.models.fields.CharField', [], {'default': "'Weblate'", 'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructions': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'mail': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'merge_style': ('django.db.models.fields.CharField', [], {'default': "'merge'", 'max_length': '10'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'new_lang': ('django.db.models.fields.CharField', [], {'default': "'contact'", 'max_length': '10'}),
'push_on_commit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'set_translation_team': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'web': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'trans.subproject': {
'Meta': {'ordering': "['project__name', 'name']", 'object_name': 'SubProject'},
'branch': ('django.db.models.fields.CharField', [], {'default': "'master'", 'max_length': '50'}),
'file_format': ('django.db.models.fields.CharField', [], {'default': "'auto'", 'max_length': '50'}),
'filemask': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Project']"}),
'push': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'repo': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'report_source_bugs': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'repoweb': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
},
'trans.suggestion': {
'Meta': {'object_name': 'Suggestion'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lang.Language']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Project']"}),
'target': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'trans.translation': {
'Meta': {'ordering': "['language__name']", 'object_name': 'Translation'},
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'fuzzy': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lang.Language']"}),
'language_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20'}),
'lock_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'lock_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'revision': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'subproject': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.SubProject']"}),
'total': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'translated': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'})
},
'trans.unit': {
'Meta': {'ordering': "['position']", 'object_name': 'Unit'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'context': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'flags': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'fuzzy': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'source': ('django.db.models.fields.TextField', [], {}),
'target': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'translated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'translation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trans.Translation']"})
}
}
complete_apps = ['trans']
| gpl-3.0 | -8,901,180,927,229,123,000 | 72.903553 | 182 | 0.548595 | false |
OSGConnect/bandwidth-monitors | submit/testnetwork.py | 1 | 2452 | #!/usr/bin/env python
import urllib
import urllib2
import time
import getopt
import sys
import os
import timeit
import platform
import subprocess
import re
REFERENCE_URL = 'http://stash.osgconnect.net/+sthapa/100MB_ref'
WSGI_URL = 'http://web-dev.ci-connect.net/~sthapa/record_network_test.wsgi'
def download_file():
"""
Download file and then remove it
"""
webref = urllib2.urlopen(REFERENCE_URL)
foo = webref.read()
def get_host_info():
"""
GET host information
"""
host_info = {}
if 'OSG_SITE_NAME' in os.environ:
host_info['site'] = os.environ['OSG_SITE_NAME']
elif 'GLIDEIN_RESOURCE_NAME' in os.env:
host_info['site'] = os.envron['GLIDEIN_RESOURCE_NAME']
host_info['hostname'] = platform.node()
return host_info
def send_record(test_record = None):
"""
Send record to wsgi
"""
if test_record is None:
return
try:
temp = test_record.copy()
if 'latency' in temp:
del temp['latency']
bandwidth_req = WSGI_URL + '?' + urllib.urlencode(temp)
req = urllib2.urlopen(bandwidth_req)
temp = test_record.copy()
if 'bandwidth' in temp:
del temp['bandwidth']
latency_req = WSGI_URL + '?' + urllib.urlencode(temp)
req = urllib2.urlopen(latency_req)
except Exception, e:
pass
def get_latency():
"""
Test ping time latency to stash
"""
try:
ping_output = subprocess.check_output(['/bin/ping', '-c', '10', 'stash.osgconnect.net'])
except AttributeError:
process = subprocess.Popen(['/bin/ping', '-c', '10', 'stash.osgconnect.net'], stdout=subprocess.PIPE)
ping_output = process.communicate()[0]
ping_regex = re.compile(r'rtt.*=\s+[\d.]+/([\d.]+)')
match = ping_regex.search(ping_output)
if match:
return float(match.group(1))
return 0.0
def main():
test_record = get_host_info()
test_record['date'] = time.time()
download_times = timeit.Timer('download_file()', "from __main__ import download_file").repeat(repeat = 5, number = 1)
avg_time = 0.0
records = 0
for x in download_times:
if x < 0.005:
continue
avg_time += x
records += 1
test_record['bandwidth'] = float(100 * 2**20) / (avg_time / float(records))
test_record['latency'] = get_latency()
send_record(test_record)
if __name__ == "__main__":
main()
| apache-2.0 | -1,105,823,344,387,599,200 | 27.511628 | 121 | 0.597879 | false |
miltonsarria/dsp-python | images/5_tf_deepNet_text.py | 1 | 5124 | import os
import nltk
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
#apagar wanings
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
import numpy as np
import pickle
import random
from collections import Counter
lemmatizer = WordNetLemmatizer()
hm_lines = 10000000
#files
neg_file='/home/sarria/data/lexicon/neg.txt'
pos_file='/home/sarria/data/lexicon/pos.txt'
def create_lexicon(pos,neg):
lexicon = []
for fi in [pos,neg]:
with open(fi,'r') as fh:
contents = fh.readlines()
for l in contents[:hm_lines]:
all_words=word_tokenize(l.lower())
lexicon +=list(all_words)
lexicon=[lemmatizer.lemmatize(i) for i in lexicon]
w_counts = Counter(lexicon)
l2=[]
for w in w_counts:
if 1000>w_counts[w]>50:
l2.append(w)
return l2
def sample_handling(sample,lexicon,classification):
featureset=[]
with open(sample,'r') as fh:
contents = fh.readlines()
for l in contents[:hm_lines]:
current_words = word_tokenize(l.lower())
current_words = [lemmatizer.lemmatize(i) for i in current_words]
features = np.zeros(len(lexicon))
for word in current_words:
if word.lower() in lexicon:
index_value = lexicon.index(word.lower())
features[index_value]+=1
featureset.append([features,classification])
return featureset
def create_feature_sets_labels(pos_file,neg_file,test_size=0.1):
lexicon = create_lexicon(pos_file,neg_file)
print('Lexicon: ', len(lexicon))
features=[]
features+=sample_handling(pos_file,lexicon,[1,0])
features+=sample_handling(neg_file,lexicon,[0,1])
random.shuffle(features)
features = np.array(features)
testing_size=int(test_size*len(features))
train_x=list(features[:,0][:-testing_size])
train_y=list(features[:,1][:-testing_size])
test_x=list(features[:,0][-testing_size:])
test_y=list(features[:,1][-testing_size:])
return train_x,train_y,test_x,test_y
#####################################################################################
#####################################################################################
#####################################################################################
print('Preparando datos...')
train_x,train_y,test_x,test_y=create_feature_sets_labels(pos_file,neg_file)
#with open ('/home/sarria/data/lexicon/sentimen_set.pickle','wb') as fh:
# pickle.dump([train_x,train_y,test_x,test_y],fh)
print('Done!!')
n_nodes_hl1 = 500
n_nodes_hl2 = 500
n_nodes_hl3 = 500
n_clases = 2
batch_size = 100
#dims = 28 x 28 = 784
x = tf.placeholder('float',[None,len(train_x[0])])
y = tf.placeholder('float')
def nn_model(data):
#entrada*weights + biases
hidden_1_layer={'weights':tf.Variable(tf.random_normal([len(train_x[0]), n_nodes_hl1])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer={'weights':tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl2]))}
hidden_3_layer={'weights':tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl3]))}
output_layer={'weights':tf.Variable(tf.random_normal([n_nodes_hl3, n_clases])),
'biases':tf.Variable(tf.random_normal([n_clases]))}
l1 = tf.add(tf.matmul(data,hidden_1_layer['weights']),hidden_1_layer['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1,hidden_2_layer['weights']),hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2,hidden_3_layer['weights']),hidden_3_layer['biases'])
l3 = tf.nn.relu(l3)
output = tf.add(tf.matmul(l3,output_layer['weights']),output_layer['biases'])
return output
def train_nn(x):
out = nn_model(x)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=out,labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
epochs=10
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(epochs):
epoch_loss = 0
i=0
while i<len(train_x):
start =i
end = i+batch_size
e_x = np.array(train_x[start:end])
e_y = np.array(train_y[start:end])
_,c=sess.run([optimizer,cost],feed_dict={x: e_x, y: e_y})
epoch_loss +=c
i+=batch_size
print('Epoca',epoch, ' completa, loss: ', epoch_loss)
correct=tf.equal(tf.argmax(out,1),tf.argmax(y,1))
acc=tf.reduce_mean(tf.cast(correct,'float'))
print('Accuracy: ', acc.eval({x:test_x,y:test_y}))
train_nn(x)
| mit | 5,958,327,614,851,862,000 | 31.846154 | 92 | 0.556206 | false |
EvilCult/moviecatcher | View/ResultView.py | 1 | 4486 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import tkinter
import tkinter.messagebox
from Bl import Play
from Lib import Tools
class GUI :
def __init__ (self, master) :
self.master = master
self.Tools = Tools.Tools()
self.listRst = ''
self.resRst = ''
self.getDetail = ''
def showList (self, searchKey) :
rstWindow = tkinter.Toplevel()
rstWindow.title('资源列表')
rstWindow.resizable(width = 'false', height = 'false')
if self.Tools.isWin() :
rstWindow.iconbitmap(self.Tools.getRes('biticon.ico'))
titleFrame = tkinter.Frame(rstWindow, bd = 0, bg="#444")
titleFrame.pack(expand = True, fill = 'both')
titleLabel = tkinter.Label(titleFrame, text = '关键词 :「 ' + searchKey + ' 」的搜索结果', fg = '#ddd', bg="#444", font = ("Helvetica", "12"))
titleLabel.grid(row = 1, column = 1, pady = 10)
titleFrame.grid_columnconfigure(0, weight=1)
titleFrame.grid_columnconfigure(2, weight=1)
self.frame = tkinter.Frame(rstWindow, bd = 0, bg="#222")
self.frame.pack(expand = True, fill = 'both')
self.window = tkinter.Listbox(self.frame, height = 14, width = 40, bd = 0, bg="#222", fg = '#ddd', selectbackground = '#116cd6', highlightthickness = 0)
self.window.grid(row = 0, column = 0, padx = 10, pady = 10)
self.window.bind('<Double-Button-1>', self.__getMovDetails)
try :
self.window.delete(0, 100)
except :
pass
def updateList (self) :
if self.listRst != '' :
idx = 0
for x in self.listRst :
self.window.insert(idx, x['title'])
idx += 1
else :
self.timer = self.frame.after(50, self.updateList)
def showRes (self) :
self.resWindow = tkinter.Toplevel()
self.resWindow.title(self.target['title'])
self.resWindow.resizable(width = 'false', height = 'false')
if self.Tools.isWin() :
self.resWindow.iconbitmap(self.Tools.getRes('biticon.ico'))
self.resWindow.config(background='#444')
self.resFrame = tkinter.Frame(self.resWindow, bd = 0, bg="#444")
self.resFrame.grid(row = 0, column = 0, sticky = '')
btnZone = tkinter.Frame(self.resWindow, bd = 10, bg="#444")
btnZone.grid(row = 1, column = 0, sticky = '')
self.resList = tkinter.Listbox(self.resFrame, height = 8, width = 50, bd = 0, bg="#222", fg = '#ddd',selectbackground = '#116cd6', highlightthickness = 0)
self.resList.grid(row = 0, sticky = '')
viewBtn = tkinter.Button(btnZone, text = '查看连接', width = 10, fg = '#222', highlightbackground = '#444', command = self.__taskShow)
viewBtn.grid(row = 0, column = 0, padx = 5)
watchBtn = tkinter.Button(btnZone, text = '在线观看', width = 10, fg = '#222', highlightbackground = '#444', command = self.__taskWatch)
watchBtn.grid(row = 0, column = 1, padx = 5)
dlBtn = tkinter.Button(btnZone, text = '离线下载', width = 10, fg = '#222', highlightbackground = '#444', command = self.__taskDownload)
dlBtn.grid(row = 0, column = 2, padx = 5)
def updateRes (self) :
if self.resRst != '' :
if len(self.resRst) > 0:
idx = 0
for x in self.resRst :
self.resList.insert(idx, x[0])
idx += 1
else :
self.resList.insert(0, '该资源已被和谐,暂时无法播放。')
else :
self.timer = self.resFrame.after(50, self.updateRes)
def __getMovDetails (self, event) :
idx = int(self.window.curselection()[0])
self.target = self.listRst[idx]
self.getDetail(self.target)
def __getChoose (self) :
if self.resList.curselection() == () :
tkinter.messagebox.showinfo('Notice', '请选择一个文件进行操作!')
else :
idx = int(self.resList.curselection()[0])
target = self.resRst[idx]
def __taskWatch (self) :
if self.resList.curselection() == () :
tkinter.messagebox.showinfo('提示', '请选择一个文件进行操作!')
else :
idx = int(self.resList.curselection()[0])
target = self.resRst[idx]
Player = Play.Play(self.master)
Player.watchLink(target)
def __taskShow (self) :
if self.resList.curselection() == () :
tkinter.messagebox.showinfo('提示', '请选择一个文件进行操作!')
else :
idx = int(self.resList.curselection()[0])
target = self.resRst[idx]
Player = Play.Play(self.master)
Player.showLink(target)
def __taskDownload (self) :
if self.resList.curselection() == () :
tkinter.messagebox.showinfo('提示', '请选择一个文件进行操作!')
else :
idx = int(self.resList.curselection()[0])
target = self.resRst[idx]
Player = Play.Play(self.master)
Player.dlLink(target)
| mit | -3,049,180,664,325,927,000 | 29.906475 | 156 | 0.649674 | false |
ramineni/myironic | ironic/tests/drivers/ilo/test_power.py | 1 | 9414 | # Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for IloPower module."""
import mock
from oslo.utils import importutils
from oslo_config import cfg
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules.ilo import common as ilo_common
from ironic.drivers.modules.ilo import deploy as ilo_deploy
from ironic.drivers.modules.ilo import power as ilo_power
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import base as db_base
from ironic.tests.db import utils as db_utils
from ironic.tests.objects import utils as obj_utils
ilo_client = importutils.try_import('proliantutils.ilo.ribcl')
INFO_DICT = db_utils.get_test_ilo_info()
CONF = cfg.CONF
@mock.patch.object(ilo_common, 'get_ilo_object')
class IloPowerInternalMethodsTestCase(db_base.DbTestCase):
def setUp(self):
super(IloPowerInternalMethodsTestCase, self).setUp()
driver_info = INFO_DICT
mgr_utils.mock_the_extension_manager(driver="fake_ilo")
self.node = db_utils.create_test_node(
driver='fake_ilo',
driver_info=driver_info,
instance_uuid='instance_uuid_123')
CONF.set_override('power_retry', 2, 'ilo')
CONF.set_override('power_wait', 0, 'ilo')
def test__get_power_state(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
ilo_mock_object.get_host_power_status.return_value = 'ON'
self.assertEqual(
states.POWER_ON, ilo_power._get_power_state(self.node))
ilo_mock_object.get_host_power_status.return_value = 'OFF'
self.assertEqual(
states.POWER_OFF, ilo_power._get_power_state(self.node))
ilo_mock_object.get_host_power_status.return_value = 'ERROR'
self.assertEqual(states.ERROR, ilo_power._get_power_state(self.node))
def test__get_power_state_fail(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
exc = ilo_client.IloError('error')
ilo_mock_object.get_host_power_status.side_effect = exc
self.assertRaises(exception.IloOperationError,
ilo_power._get_power_state,
self.node)
ilo_mock_object.get_host_power_status.assert_called_once_with()
def test__set_power_state_invalid_state(self, get_ilo_object_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
ilo_power._set_power_state,
task,
states.ERROR)
def test__set_power_state_reboot_fail(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
exc = ilo_client.IloError('error')
ilo_mock_object.reset_server.side_effect = exc
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.IloOperationError,
ilo_power._set_power_state,
task,
states.REBOOT)
ilo_mock_object.reset_server.assert_called_once_with()
def test__set_power_state_reboot_ok(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
ilo_mock_object.get_host_power_status.side_effect = ['ON', 'OFF', 'ON']
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
ilo_power._set_power_state(task, states.REBOOT)
ilo_mock_object.reset_server.assert_called_once_with()
def test__set_power_state_off_fail(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
ilo_mock_object.get_host_power_status.return_value = 'ON'
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.PowerStateFailure,
ilo_power._set_power_state,
task,
states.POWER_OFF)
ilo_mock_object.get_host_power_status.assert_called_with()
ilo_mock_object.hold_pwr_btn.assert_called_once_with()
def test__set_power_state_on_ok(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
ilo_mock_object.get_host_power_status.side_effect = ['OFF', 'ON']
target_state = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
ilo_power._set_power_state(task, target_state)
ilo_mock_object.get_host_power_status.assert_called_with()
ilo_mock_object.set_host_power.assert_called_once_with('ON')
@mock.patch.object(manager_utils, 'node_set_boot_device')
@mock.patch.object(ilo_common, 'setup_vmedia_for_boot')
def test__attach_boot_iso(self, setup_vmedia_mock, set_boot_device_mock,
get_ilo_object_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.instance_info['ilo_boot_iso'] = 'boot-iso'
ilo_power._attach_boot_iso(task)
setup_vmedia_mock.assert_called_once_with(task, 'boot-iso')
set_boot_device_mock.assert_called_once_with(task,
boot_devices.CDROM)
class IloPowerTestCase(db_base.DbTestCase):
def setUp(self):
super(IloPowerTestCase, self).setUp()
driver_info = INFO_DICT
mgr_utils.mock_the_extension_manager(driver="fake_ilo")
self.node = obj_utils.create_test_node(self.context,
driver='fake_ilo',
driver_info=driver_info)
def test_get_properties(self):
expected = ilo_common.COMMON_PROPERTIES
expected.update(ilo_deploy.COMMON_PROPERTIES)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertEqual(expected, task.driver.get_properties())
@mock.patch.object(ilo_common, 'parse_driver_info')
def test_validate(self, mock_drvinfo):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.power.validate(task)
mock_drvinfo.assert_called_once_with(task.node)
@mock.patch.object(ilo_common, 'parse_driver_info')
def test_validate_fail(self, mock_drvinfo):
side_effect = exception.InvalidParameterValue("Invalid Input")
mock_drvinfo.side_effect = side_effect
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.power.validate,
task)
@mock.patch.object(ilo_power, '_get_power_state')
def test_get_power_state(self, mock_get_power):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
mock_get_power.return_value = states.POWER_ON
self.assertEqual(states.POWER_ON,
task.driver.power.get_power_state(task))
mock_get_power.assert_called_once_with(task.node)
@mock.patch.object(ilo_power, '_set_power_state')
def test_set_power_state(self, mock_set_power):
mock_set_power.return_value = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.power.set_power_state(task, states.POWER_ON)
mock_set_power.assert_called_once_with(task, states.POWER_ON)
@mock.patch.object(ilo_power, '_set_power_state')
@mock.patch.object(ilo_power, '_get_power_state')
def test_reboot(self, mock_get_power, mock_set_power):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
mock_get_power.return_value = states.POWER_ON
mock_set_power.return_value = states.POWER_ON
task.driver.power.reboot(task)
mock_get_power.assert_called_once_with(task.node)
mock_set_power.assert_called_once_with(task, states.REBOOT)
| apache-2.0 | -3,160,510,754,953,371,000 | 44.259615 | 79 | 0.621521 | false |
matt77hias/Clipping | src/surfacearea.py | 1 | 2427 | import numpy as np
###############################################################################
## Surface Area
## ---------------------------------
## Planar polygon
###############################################################################
# Theorem of Green
#------------------------------------------------------------------------------
# integral_contour(L dx + M dy) = integral_area((dM/dx - dL/dy) dx dy)
# contour = oriented, piecewise smooth, simple closed curve in a plane
# area = region bounded by perimeter
# L, M = functions of (x,y) defined on an open region containing area with continuous partial derivatives
#
# Application:
# Planimeter
# integral_contour(-y dx + x dy) = integral_area((dx/dx - -dy/dy) dx dy) = 2 area
def area(p_vs, n=None):
if (len(p_vs) < 3):
return 0.0
dim = p_vs[0].shape[0]
if dim == 2:
return _area2D(p_vs)
elif dim == 3:
return _area3D(p_vs, n=n)
def _area2D(p_vs):
area = 0.0
nb_p_vs = len(p_vs)
#for j in range(nb_p_vs):
# p_v1 = p_vs[(j+nb_p_vs-1) % nb_p_vs]
# p_v2 = p_vs[j]
# area += + p_v1[0]*p_v2[1] - p_v2[0]*p_v1[1]
for j in range(nb_p_vs):
p_v1 = p_vs[(j+nb_p_vs-1) % nb_p_vs]
p_v2 = p_vs[j]
p_v3 = p_vs[(j+nb_p_vs+1) % nb_p_vs]
area += p_v2[0] * (p_v3[1] - p_v1[1])
return 0.5 * abs(area)
def _area3D(p_vs, n):
area = 0.0
nb_p_vs = len(p_vs)
ax = abs(n[0])
ay = abs(n[1])
az = abs(n[2])
if (ax > ay and ax > az): lca = 0
elif (ay > az): lca = 1
else: lca = 2
an = np.sqrt(ax*ax + ay*ay + az*az)
if lca == 0:
for j in range(nb_p_vs):
p_v1 = p_vs[(j+nb_p_vs-1) % nb_p_vs]
p_v2 = p_vs[j]
p_v3 = p_vs[(j+nb_p_vs+1) % nb_p_vs]
area += p_v2[1] * (p_v3[2] - p_v1[2])
area *= (an / n[0])
elif lca == 1:
for j in range(nb_p_vs):
p_v1 = p_vs[(j+nb_p_vs-1) % nb_p_vs]
p_v2 = p_vs[j]
p_v3 = p_vs[(j+nb_p_vs+1) % nb_p_vs]
area += p_v2[2] * (p_v3[0] - p_v1[0])
area *= (an / n[1])
else:
for j in range(nb_p_vs):
p_v1 = p_vs[(j+nb_p_vs-1) % nb_p_vs]
p_v2 = p_vs[j]
p_v3 = p_vs[(j+nb_p_vs+1) % nb_p_vs]
area += p_v2[0] * (p_v3[1] - p_v1[1])
area *= (an / n[2])
return 0.5 * abs(area)
| gpl-3.0 | -9,147,105,581,252,938,000 | 28.962963 | 113 | 0.409971 | false |
kmee/pySigepWeb | pysigepweb/pysigep_exception.py | 1 | 1805 | # -*- coding: utf-8 -*-
# #############################################################################
#
# Brazillian Carrier Correios Sigep WEB
# Copyright (C) 2015 KMEE (http://www.kmee.com.br)
# @author: Michell Stuttgart <[email protected]>
#
# Sponsored by Europestar www.europestar.com.br
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
class SigepWEBBaseException(Exception):
def __init__(self, msg, *args):
self.message = msg.format(args)
def __str__(self):
return repr(self.message)
class ErroSemConexaoComInternet(SigepWEBBaseException):
def __init__(self, msg, *args):
self.message = 'No Internet conection.'
def __str__(self):
return repr(self.message)
class ErroConexaoComServidor(SigepWEBBaseException):
def __str__(self):
return repr(self.message)
class ErroTamanhoParamentroIncorreto(SigepWEBBaseException):
def __str__(self):
return repr(self.message)
class ErroValidacaoXML(SigepWEBBaseException):
def __str__(self):
return repr(self.message)
| agpl-3.0 | -7,700,560,116,779,816,000 | 30.666667 | 79 | 0.633795 | false |
CCBatIIT/AlGDock | Pipeline/prep_complex_for_AlGDock.py | 1 | 3734 | # Prepares a complex for AlGDock
try:
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('ligand_mol2', default=None,
help='Input mol2 of the ligand with sybyl atom types')
parser.add_argument('receptor_pdb', default=None,
help='Input PDB of the receptor with AMBER atom types')
parser.add_argument('complex_tarball', default=None,
help='Prefix for the complex prmtop and inpcrd files')
parser.add_argument('--debug', action='store_true')
args = parser.parse_args()
except:
import sys
class args:
ligand_mol2 = sys.argv[1]
receptor_pdb = sys.argv[2]
import os
for FN in [args.ligand_mol2, args.receptor_pdb]:
if not os.path.isfile(FN):
raise Exception('Input file %s not found!'%FN)
args.ligand_mol2 = os.path.abspath(args.ligand_mol2)
args.receptor_pdb = os.path.abspath(args.receptor_pdb)
args.complex_tarball = os.path.abspath(args.complex_tarball)
import os, inspect
dirs = {'script':os.path.dirname(os.path.abspath(\
inspect.getfile(inspect.currentframe())))}
execfile(os.path.join(dirs['script'],'_external_paths.py'))
command_paths = findPaths(['sander'])
dirs['amber'] = os.path.abspath(os.path.dirname(command_paths['sander'])[:-4])
dirs['temp'] = args.complex_tarball + '.tmp'
if not os.path.isdir(dirs['temp']):
os.system('mkdir -p '+dirs['temp'])
os.chdir(dirs['temp'])
ligand_prefix = '.'.join(os.path.dirname(args.ligand_mol2).split('/')[-1].split('.')[:-1]) \
+ '.' + os.path.basename(args.ligand_mol2)[:-5]
# The receptor file name ends with '.pdb2pqr_amber.pqr',
# which is 18 characters long
receptor_prefix = os.path.basename(args.receptor_pdb)[:-18]
complex_prefix = ligand_prefix + '-' + receptor_prefix
if not os.path.isfile(ligand_prefix+'.mol2'):
print '\n*** Writing mol2 file with amber atom types ***'
command = dirs['amber']+'/bin/antechamber' + \
' -i {0} -fi mol2 -o {1}.mol2 -fo mol2 -rn LIG'.format(\
args.ligand_mol2,ligand_prefix)
os.system(command)
if not os.path.isfile(ligand_prefix+'.mol2'):
print command
raise Exception('Could not write mol2 file')
if not os.path.isfile(ligand_prefix+'.frcmod'):
print '\n*** Generating frcmod file ***'
command = dirs['amber']+'/bin/parmchk' +\
' -i {0}.mol2 -f mol2 -o {0}.frcmod -a Y -w Y'.format(ligand_prefix)
os.system(command)
if not (os.path.isfile(os.path.join(complex_prefix+'.prmtop')) and \
os.path.isfile(os.path.join(complex_prefix+'.inpcrd')) and \
os.path.isfile(os.path.join(complex_prefix+'.pdb'))):
print '\n*** Generating prmtop and inpcrd and pdb files ***'
tleap_F = open(complex_prefix+'.tleap','w')
tleap_F.write("""
source leaprc.ff14SB
set default PBRadii mbondi2
# Receptor
receptor = loadpdb {0}
# Ligand
source leaprc.gaff2
loadamberparams {1}.frcmod
ligand = loadmol2 {1}.mol2
saveoff ligand {1}.lib
loadoff {1}.lib
# Complex
complex = combine {{receptor, ligand}}
saveamberparm complex {2}.prmtop {2}.inpcrd
savepdb complex {2}.pdb
quit
""".format(args.receptor_pdb, ligand_prefix, complex_prefix))
tleap_F.close()
command = dirs['amber']+'/bin/tleap -f {0}.tleap'.format(complex_prefix)
os.system(command)
if os.path.isfile(os.path.join(complex_prefix+'.pdb')):
print '\n*** Setting fixed atoms in pdb file ***'
command = 'python {0}/label_fixed_atoms.py {1}'
command = command.format(dirs['script'], os.path.join(complex_prefix+'.pdb'))
os.system(command)
# Compresses the complex files in a tarball
import tarfile
tarF = tarfile.open(args.complex_tarball,'w:gz')
tarF_contents = [complex_prefix+'.'+ext for ext in ['prmtop', 'inpcrd', 'pdb']]
for FN in tarF_contents:
tarF.add(FN)
tarF.close()
os.chdir('..')
if not args.debug:
os.system('rm -rf '+dirs['temp'])
| mit | 8,426,347,556,647,662,000 | 32.63964 | 92 | 0.686931 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-monitor/azure/mgmt/monitor/models/metric_trigger.py | 1 | 4030 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MetricTrigger(Model):
"""The trigger that results in a scaling action.
All required parameters must be populated in order to send to Azure.
:param metric_name: Required. the name of the metric that defines what the
rule monitors.
:type metric_name: str
:param metric_resource_uri: Required. the resource identifier of the
resource the rule monitors.
:type metric_resource_uri: str
:param time_grain: Required. the granularity of metrics the rule monitors.
Must be one of the predefined values returned from metric definitions for
the metric. Must be between 12 hours and 1 minute.
:type time_grain: timedelta
:param statistic: Required. the metric statistic type. How the metrics
from multiple instances are combined. Possible values include: 'Average',
'Min', 'Max', 'Sum'
:type statistic: str or ~azure.mgmt.monitor.models.MetricStatisticType
:param time_window: Required. the range of time in which instance data is
collected. This value must be greater than the delay in metric collection,
which can vary from resource-to-resource. Must be between 12 hours and 5
minutes.
:type time_window: timedelta
:param time_aggregation: Required. time aggregation type. How the data
that is collected should be combined over time. The default value is
Average. Possible values include: 'Average', 'Minimum', 'Maximum',
'Total', 'Count'
:type time_aggregation: str or
~azure.mgmt.monitor.models.TimeAggregationType
:param operator: Required. the operator that is used to compare the metric
data and the threshold. Possible values include: 'Equals', 'NotEquals',
'GreaterThan', 'GreaterThanOrEqual', 'LessThan', 'LessThanOrEqual'
:type operator: str or ~azure.mgmt.monitor.models.ComparisonOperationType
:param threshold: Required. the threshold of the metric that triggers the
scale action.
:type threshold: float
"""
_validation = {
'metric_name': {'required': True},
'metric_resource_uri': {'required': True},
'time_grain': {'required': True},
'statistic': {'required': True},
'time_window': {'required': True},
'time_aggregation': {'required': True},
'operator': {'required': True},
'threshold': {'required': True},
}
_attribute_map = {
'metric_name': {'key': 'metricName', 'type': 'str'},
'metric_resource_uri': {'key': 'metricResourceUri', 'type': 'str'},
'time_grain': {'key': 'timeGrain', 'type': 'duration'},
'statistic': {'key': 'statistic', 'type': 'MetricStatisticType'},
'time_window': {'key': 'timeWindow', 'type': 'duration'},
'time_aggregation': {'key': 'timeAggregation', 'type': 'TimeAggregationType'},
'operator': {'key': 'operator', 'type': 'ComparisonOperationType'},
'threshold': {'key': 'threshold', 'type': 'float'},
}
def __init__(self, **kwargs):
super(MetricTrigger, self).__init__(**kwargs)
self.metric_name = kwargs.get('metric_name', None)
self.metric_resource_uri = kwargs.get('metric_resource_uri', None)
self.time_grain = kwargs.get('time_grain', None)
self.statistic = kwargs.get('statistic', None)
self.time_window = kwargs.get('time_window', None)
self.time_aggregation = kwargs.get('time_aggregation', None)
self.operator = kwargs.get('operator', None)
self.threshold = kwargs.get('threshold', None)
| mit | 7,079,760,800,057,811,000 | 46.411765 | 86 | 0.644913 | false |
robe16/kiosk.grandparent-message-board | src/axiscare/url_process.py | 1 | 2732 | from multiprocessing import Process
from bs4 import BeautifulSoup
import time
from google.google_gmail import get_gmail_lists, get_gmail_message_mime, delete_gmail_message
from config.cfg import put_config_axiscare_url
from log.log import log_general, log_error
def eml_list():
return get_gmail_lists()
def get_ids(id_list):
ids = []
for l in id_list:
ids.append(l['id'])
return ids
def get_emails(ids):
emls = []
for id in ids:
e = get_gmail_message_mime(id)
emls.append({'id': id, 'email': e})
return emls
def extract_url(eml):
#
for p in eml.get_payload():
if not isinstance(p.get_payload(), str):
for p2 in p.get_payload():
for h in p2._headers:
if h[0]== 'Content-Type' and h[1].startswith('text/html'):
payload = p2.get_payload()
soup = BeautifulSoup(payload, "html.parser")
a_all = soup.findAll("a")
for a in a_all:
href = a.attrs['href'].replace('3D', '').replace('\"', '')
if href.startswith('https://1000.axiscare.com'):
#Assumption that html version appears before pdf version
return href
#
return False
def process_emls(emls):
#
for e in emls:
#
url = extract_url(e['email'])
#
if url:
put_config_axiscare_url(url)
#Delete email
delete_gmail_message(e['id'])
return True
return False
def url_updater():
#
updatestatus = False
#
while True:
#
try:
eml_lists = eml_list()
#
if len(eml_lists) > 0:
#
eml_ids = get_ids(eml_lists)
#
if len(eml_ids) > 0:
#
emls = get_emails(eml_ids)
updatestatus = process_emls(emls)
#
if updatestatus:
msg_success = 'the url stored in config.json has been updated'
else:
msg_success = 'no new urls received'
log_general('Axiscare URL updater process completed - {msg_success}'.format(msg_success=msg_success))
#
except Exception as e:
log_error('Could not process emails to check for new URL notification - {error}'.format(error=e))
#
time.sleep(300) #5mins
def start_url_updater():
process_urlupdater = Process(target=url_updater)
process_urlupdater.start()
log_general('Axiscare URL updater process started')
| gpl-3.0 | -8,363,137,163,987,299,000 | 27.458333 | 113 | 0.515739 | false |
dariocorral/panoanda | panoanda/hourOffset.py | 1 | 1849 | """
Created on Sun Sep 17 07:26:03 2017
@author: dariocorral
"""
from datetime import datetime, date, timedelta
import pytz
class Hour(object):
"""
Auxiliary class for converting GMT - NY - local time hours
"""
#Local time hour property
@property
def current_local(self):
"""
Returns local current hour
:return:integer
"""
return datetime.now().hour
#New York current hour property
@property
def current_NY(self):
"""
Returns New York current hour
:return:integer
"""
return datetime.now(tz=pytz.timezone('US/Eastern')).hour
#GMT current hour property
@property
def current_GMT(self):
"""
Returns GMT current hour
:return:integer
"""
return datetime.now(tz=pytz.timezone('utc')).hour
#New York hour - GMT hour
@property
def offset_NY_GMT(self):
"""
Returns New York current hour GMT current hour difference
:return: integer
"""
return self.current_NY - self.current_GMT
#New York hour - GMT hour
@property
def offset_local_GMT(self):
"""
Returns Local current hour vs GMT current hour difference
:return: integer
"""
return self.current_local - self.current_GMT
def hour_offset_calculate(self, hour, delta):
"""
Operate with hours
"""
year = date.today().year
month = date.today().month
day = date.today().day
dt_hour = datetime(year, month, day, hour)
dt_hour_offset = dt_hour + timedelta(hours= delta)
return dt_hour_offset.hour
| mit | -2,291,861,256,432,731,100 | 20.511628 | 66 | 0.53272 | false |
olcf/pcircle | legacy/pcircle/path.py | 1 | 2783 | import os
class CopyType:
""" A fake enum, define three type of copy job """
FILE2FILE = 0
FILE2DIR = 1
DIR2DIR = 2
def copytype2str(t):
if t == CopyType.FILE2FILE:
return "file to file"
elif t == CopyType.FILE2DIR:
return "file(s) to dir"
elif t == CopyType.DIR2DIR:
return "dir to dir"
else:
return "Unknown type"
def cleanup_path(paths, removedir=True):
""" remove unreable files and directories from the input path collection,
skipped include two type of elements: unwanted directories if removedir is True
or unaccessible files/directories
"""
checked = []
skipped = []
for ele in paths:
ele = os.path.abspath(ele)
if os.path.exists(ele) and os.access(ele, os.R_OK):
if os.path.isdir(ele) and removedir:
skipped.append(ele)
else:
checked.append(ele)
else:
skipped.append(ele)
return checked, skipped
def identify_copytype(isrc, idest):
""" verify and return target destination
case 1: source: multiple files
destination is an existing directory
copytype: FILES2DIR
case 2: source: a file
destination can either be:
a file doesn't exist but writable
a file exists
then FILE2FILE
case 3: source: a directory
destination: a directory doesn't exist, but writable
then DIR2DIR
case 3 used to be the only mode FCP supports.
"""
if not os.path.isabs(idest):
idest = os.path.abspath(idest)
single_src_file = True if len(isrc) == 1 and os.path.isfile(isrc[0]) else False
single_src_dir = True if len(isrc) == 1 and os.path.isdir(isrc[0]) else False
dest_exist_dir = False
dest_exist_file = False
dest_parent_writable = False
if os.path.exists(idest):
if not os.access(idest, os.W_OK):
raise ValueError("Can't access %s" % idest)
if os.path.isfile(idest):
dest_exist_file = True
else:
dest_exist_dir = True
else:
# idest doesn't exist, check its parent
idest_parent = os.path.dirname(idest)
if os.path.exists(idest_parent) and os.access(idest_parent, os.W_OK):
dest_parent_writable = True
if single_src_file and (dest_exist_file or dest_parent_writable):
copytype = CopyType.FILE2FILE
elif single_src_dir and (dest_exist_dir or dest_parent_writable):
copytype = CopyType.DIR2DIR
elif not (single_src_dir or single_src_file) and dest_exist_dir:
copytype = CopyType.FILE2DIR
if copytype is None:
raise ValueError("Can't decide the type of copy operations")
return copytype
| apache-2.0 | -2,806,440,448,711,826,000 | 29.922222 | 83 | 0.614445 | false |
phase-dev/phase | libmproxy/protocol/__init__.py | 1 | 3448 | from ..proxy import ServerConnection, AddressPriority
KILL = 0 # const for killed requests
class ConnectionTypeChange(Exception):
"""
Gets raised if the connetion type has been changed (e.g. after HTTP/1.1 101 Switching Protocols).
It's up to the raising ProtocolHandler to specify the new conntype before raising the exception.
"""
pass
class ProtocolHandler(object):
def __init__(self, c):
self.c = c
"""@type: libmproxy.proxy.ConnectionHandler"""
def handle_messages(self):
"""
This method gets called if a client connection has been made. Depending on the proxy settings,
a server connection might already exist as well.
"""
raise NotImplementedError # pragma: nocover
def handle_error(self, error):
"""
This method gets called should there be an uncaught exception during the connection.
This might happen outside of handle_messages, e.g. if the initial SSL handshake fails in transparent mode.
"""
raise error # pragma: nocover
class TemporaryServerChangeMixin(object):
"""
This mixin allows safe modification of the target server,
without any need to expose the ConnectionHandler to the Flow.
"""
def change_server(self, address, ssl):
if address == self.c.server_conn.address():
return
priority = AddressPriority.MANUALLY_CHANGED
if self.c.server_conn.priority > priority:
self.log("Attempt to change server address, "
"but priority is too low (is: %s, got: %s)" % (self.server_conn.priority, priority))
return
self.log("Temporarily change server connection: %s:%s -> %s:%s" % (
self.c.server_conn.address.host,
self.c.server_conn.address.port,
address.host,
address.port
))
if not hasattr(self, "_backup_server_conn"):
self._backup_server_conn = self.c.server_conn
self.c.server_conn = None
else: # This is at least the second temporary change. We can kill the current connection.
self.c.del_server_connection()
self.c.set_server_address(address, priority)
if ssl:
self.establish_ssl(server=True)
def restore_server(self):
if not hasattr(self, "_backup_server_conn"):
return
self.log("Restore original server connection: %s:%s -> %s:%s" % (
self.c.server_conn.address.host,
self.c.server_conn.address.port,
self._backup_server_conn.host,
self._backup_server_conn.port
))
self.c.del_server_connection()
self.c.server_conn = self._backup_server_conn
del self._backup_server_conn
from . import http, tcp
protocols = {
'http': dict(handler=http.HTTPHandler, flow=http.HTTPFlow),
'tcp': dict(handler=tcp.TCPHandler)
} # PyCharm type hinting behaves bad if this is a dict constructor...
def _handler(conntype, connection_handler):
if conntype in protocols:
return protocols[conntype]["handler"](connection_handler)
raise NotImplementedError # pragma: nocover
def handle_messages(conntype, connection_handler):
return _handler(conntype, connection_handler).handle_messages()
def handle_error(conntype, connection_handler, error):
return _handler(conntype, connection_handler).handle_error(error)
| gpl-3.0 | -1,467,776,239,148,009,500 | 32.803922 | 114 | 0.645012 | false |
Davideddu/garden.namedboxes | __init__.py | 1 | 3796 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# __init__.py
# Copyright (C) 2014 Davide Depau <[email protected]>
#
# NamedBoxes is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NamedBoxes is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""A simple box layout with a label on top of it.
Use it like you would use a BoxLayout, but don't change the orientation
property. Add another layout inside of it instead, and change its
orientation.
You can change the title of the box, though (title property), the background
color (background_color property) and the horizontal alignment of the label
(title_align property).
"""
from kivy.lang import Builder
from kivy.properties import Property, NumericProperty, BoundedNumericProperty,\
ObjectProperty, StringProperty, DictProperty,\
ListProperty, OptionProperty, BooleanProperty
from kivy.uix.boxlayout import BoxLayout
kv = """
<NamedBox>:
orientation: "vertical"
content: content
padding: ["10dp", 0, "10dp", "10dp"]
canvas.before:
Color:
rgba: root.background_color
Rectangle:
size: self.size
pos: self.pos
BoxLayout:
size_hint_y: None
height: lab.height
Label:
id: lab
size_hint_y: None
height: dp(45)#, self.texture_size[1] + dp(10))
markup: root.markup
text: root.title
text_size: self.width, self.height
valign: 'middle'
halign: root.title_align
BoxLayout:
orientation: "vertical"
id: content
"""
Builder.load_string(kv)
class NamedBox(BoxLayout):
"""BoxLayout with a background color and with a label on top of it.
Use it like you would use a BoxLayout, but don't change the orientation
property. Add another layout inside of it instead, and change its
orientation."""
background_color = ListProperty([.1, .1, .1, .5])
"""The background color for the box, in RGBA.
background_color is a ListProperty, defaults to [.1, .1, .1, .5].
"""
title_align = OptionProperty("center", options=("left", "right", "center"))
"""The horizontal alignment of the text in the title of the box.
title_align is an OptionProperty, defaults to "center" and can be one
of "left", "right", "center".
"""
title = StringProperty("<No title set>")
"""The title of the named box.
title is a StringProperty, defaults to "<No title set>".
"""
markup = BooleanProperty(False)
"""Sets whether the markup should be enabled for the title.
markup is a BooleanProperty, defaults to False.
"""
content = ObjectProperty(None)
def add_widget(self, widget):
if self.content:
self.content.add_widget(widget)
else:
super(NamedBox, self).add_widget(widget)
if __name__ == "__main__":
from kivy.app import App
from kivy.uix.button import Button
class NamedBoxApp(App):
def build(self):
root = BoxLayout(padding="100dp", spacing="100dp")
box = NamedBox(title="Named box")
box.add_widget(Button(text="Button"))
root.add_widget(box)
return root
NamedBoxApp().run() | mit | 8,088,089,841,062,187,000 | 29.620968 | 79 | 0.644889 | false |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.0/Lib/tkinter/tix.py | 1 | 74062 | # -*-mode: python; fill-column: 75; tab-width: 8 -*-
#
# $Id: tix.py 63418 2008-05-17 18:39:55Z georg.brandl $
#
# Tix.py -- Tix widget wrappers.
#
# For Tix, see http://tix.sourceforge.net
#
# - Sudhir Shenoy ([email protected]), Dec. 1995.
# based on an idea of Jean-Marc Lugrin ([email protected])
#
# NOTE: In order to minimize changes to Tkinter.py, some of the code here
# (TixWidget.__init__) has been taken from Tkinter (Widget.__init__)
# and will break if there are major changes in Tkinter.
#
# The Tix widgets are represented by a class hierarchy in python with proper
# inheritance of base classes.
#
# As a result after creating a 'w = StdButtonBox', I can write
# w.ok['text'] = 'Who Cares'
# or w.ok['bg'] = w['bg']
# or even w.ok.invoke()
# etc.
#
# Compare the demo tixwidgets.py to the original Tcl program and you will
# appreciate the advantages.
#
from tkinter import *
from tkinter import _flatten, _cnfmerge, _default_root
# WARNING - TkVersion is a limited precision floating point number
if TkVersion < 3.999:
raise ImportError("This version of Tix.py requires Tk 4.0 or higher")
import _tkinter # If this fails your Python may not be configured for Tk
# Some more constants (for consistency with Tkinter)
WINDOW = 'window'
TEXT = 'text'
STATUS = 'status'
IMMEDIATE = 'immediate'
IMAGE = 'image'
IMAGETEXT = 'imagetext'
BALLOON = 'balloon'
AUTO = 'auto'
ACROSSTOP = 'acrosstop'
# Some constants used by Tkinter dooneevent()
TCL_DONT_WAIT = 1 << 1
TCL_WINDOW_EVENTS = 1 << 2
TCL_FILE_EVENTS = 1 << 3
TCL_TIMER_EVENTS = 1 << 4
TCL_IDLE_EVENTS = 1 << 5
TCL_ALL_EVENTS = 0
# BEWARE - this is implemented by copying some code from the Widget class
# in Tkinter (to override Widget initialization) and is therefore
# liable to break.
import tkinter, os
# Could probably add this to Tkinter.Misc
class tixCommand:
"""The tix commands provide access to miscellaneous elements
of Tix's internal state and the Tix application context.
Most of the information manipulated by these commands pertains
to the application as a whole, or to a screen or
display, rather than to a particular window.
This is a mixin class, assumed to be mixed to Tkinter.Tk
that supports the self.tk.call method.
"""
def tix_addbitmapdir(self, directory):
"""Tix maintains a list of directories under which
the tix_getimage and tix_getbitmap commands will
search for image files. The standard bitmap directory
is $TIX_LIBRARY/bitmaps. The addbitmapdir command
adds directory into this list. By using this
command, the image files of an applications can
also be located using the tix_getimage or tix_getbitmap
command.
"""
return self.tk.call('tix', 'addbitmapdir', directory)
def tix_cget(self, option):
"""Returns the current value of the configuration
option given by option. Option may be any of the
options described in the CONFIGURATION OPTIONS section.
"""
return self.tk.call('tix', 'cget', option)
def tix_configure(self, cnf=None, **kw):
"""Query or modify the configuration options of the Tix application
context. If no option is specified, returns a dictionary all of the
available options. If option is specified with no value, then the
command returns a list describing the one named option (this list
will be identical to the corresponding sublist of the value
returned if no option is specified). If one or more option-value
pairs are specified, then the command modifies the given option(s)
to have the given value(s); in this case the command returns an
empty string. Option may be any of the configuration options.
"""
# Copied from Tkinter.py
if kw:
cnf = _cnfmerge((cnf, kw))
elif cnf:
cnf = _cnfmerge(cnf)
if cnf is None:
cnf = {}
for x in self.tk.split(self.tk.call('tix', 'configure')):
cnf[x[0][1:]] = (x[0][1:],) + x[1:]
return cnf
if isinstance(cnf, StringType):
x = self.tk.split(self.tk.call('tix', 'configure', '-'+cnf))
return (x[0][1:],) + x[1:]
return self.tk.call(('tix', 'configure') + self._options(cnf))
def tix_filedialog(self, dlgclass=None):
"""Returns the file selection dialog that may be shared among
different calls from this application. This command will create a
file selection dialog widget when it is called the first time. This
dialog will be returned by all subsequent calls to tix_filedialog.
An optional dlgclass parameter can be passed to specified what type
of file selection dialog widget is desired. Possible options are
tix FileSelectDialog or tixExFileSelectDialog.
"""
if dlgclass is not None:
return self.tk.call('tix', 'filedialog', dlgclass)
else:
return self.tk.call('tix', 'filedialog')
def tix_getbitmap(self, name):
"""Locates a bitmap file of the name name.xpm or name in one of the
bitmap directories (see the tix_addbitmapdir command above). By
using tix_getbitmap, you can avoid hard coding the pathnames of the
bitmap files in your application. When successful, it returns the
complete pathname of the bitmap file, prefixed with the character
'@'. The returned value can be used to configure the -bitmap
option of the TK and Tix widgets.
"""
return self.tk.call('tix', 'getbitmap', name)
def tix_getimage(self, name):
"""Locates an image file of the name name.xpm, name.xbm or name.ppm
in one of the bitmap directories (see the addbitmapdir command
above). If more than one file with the same name (but different
extensions) exist, then the image type is chosen according to the
depth of the X display: xbm images are chosen on monochrome
displays and color images are chosen on color displays. By using
tix_ getimage, you can advoid hard coding the pathnames of the
image files in your application. When successful, this command
returns the name of the newly created image, which can be used to
configure the -image option of the Tk and Tix widgets.
"""
return self.tk.call('tix', 'getimage', name)
def tix_option_get(self, name):
"""Gets the options manitained by the Tix
scheme mechanism. Available options include:
active_bg active_fg bg
bold_font dark1_bg dark1_fg
dark2_bg dark2_fg disabled_fg
fg fixed_font font
inactive_bg inactive_fg input1_bg
input2_bg italic_font light1_bg
light1_fg light2_bg light2_fg
menu_font output1_bg output2_bg
select_bg select_fg selector
"""
# could use self.tk.globalgetvar('tixOption', name)
return self.tk.call('tix', 'option', 'get', name)
def tix_resetoptions(self, newScheme, newFontSet, newScmPrio=None):
"""Resets the scheme and fontset of the Tix application to
newScheme and newFontSet, respectively. This affects only those
widgets created after this call. Therefore, it is best to call the
resetoptions command before the creation of any widgets in a Tix
application.
The optional parameter newScmPrio can be given to reset the
priority level of the Tk options set by the Tix schemes.
Because of the way Tk handles the X option database, after Tix has
been has imported and inited, it is not possible to reset the color
schemes and font sets using the tix config command. Instead, the
tix_resetoptions command must be used.
"""
if newScmPrio is not None:
return self.tk.call('tix', 'resetoptions', newScheme, newFontSet, newScmPrio)
else:
return self.tk.call('tix', 'resetoptions', newScheme, newFontSet)
class Tk(tkinter.Tk, tixCommand):
"""Toplevel widget of Tix which represents mostly the main window
of an application. It has an associated Tcl interpreter."""
def __init__(self, screenName=None, baseName=None, className='Tix'):
tkinter.Tk.__init__(self, screenName, baseName, className)
tixlib = os.environ.get('TIX_LIBRARY')
self.tk.eval('global auto_path; lappend auto_path [file dir [info nameof]]')
if tixlib is not None:
self.tk.eval('global auto_path; lappend auto_path {%s}' % tixlib)
self.tk.eval('global tcl_pkgPath; lappend tcl_pkgPath {%s}' % tixlib)
# Load Tix - this should work dynamically or statically
# If it's static, tcl/tix8.1/pkgIndex.tcl should have
# 'load {} Tix'
# If it's dynamic under Unix, tcl/tix8.1/pkgIndex.tcl should have
# 'load libtix8.1.8.3.so Tix'
self.tk.eval('package require Tix')
def destroy(self):
# For safety, remove an delete_window binding before destroy
self.protocol("WM_DELETE_WINDOW", "")
tkinter.Tk.destroy(self)
# The Tix 'tixForm' geometry manager
class Form:
"""The Tix Form geometry manager
Widgets can be arranged by specifying attachments to other widgets.
See Tix documentation for complete details"""
def config(self, cnf={}, **kw):
self.tk.call('tixForm', self._w, *self._options(cnf, kw))
form = config
def __setitem__(self, key, value):
Form.form(self, {key: value})
def check(self):
return self.tk.call('tixForm', 'check', self._w)
def forget(self):
self.tk.call('tixForm', 'forget', self._w)
def grid(self, xsize=0, ysize=0):
if (not xsize) and (not ysize):
x = self.tk.call('tixForm', 'grid', self._w)
y = self.tk.splitlist(x)
z = ()
for x in y:
z = z + (self.tk.getint(x),)
return z
return self.tk.call('tixForm', 'grid', self._w, xsize, ysize)
def info(self, option=None):
if not option:
return self.tk.call('tixForm', 'info', self._w)
if option[0] != '-':
option = '-' + option
return self.tk.call('tixForm', 'info', self._w, option)
def slaves(self):
return map(self._nametowidget,
self.tk.splitlist(
self.tk.call(
'tixForm', 'slaves', self._w)))
tkinter.Widget.__bases__ = tkinter.Widget.__bases__ + (Form,)
class TixWidget(tkinter.Widget):
"""A TixWidget class is used to package all (or most) Tix widgets.
Widget initialization is extended in two ways:
1) It is possible to give a list of options which must be part of
the creation command (so called Tix 'static' options). These cannot be
given as a 'config' command later.
2) It is possible to give the name of an existing TK widget. These are
child widgets created automatically by a Tix mega-widget. The Tk call
to create these widgets is therefore bypassed in TixWidget.__init__
Both options are for use by subclasses only.
"""
def __init__ (self, master=None, widgetName=None,
static_options=None, cnf={}, kw={}):
# Merge keywords and dictionary arguments
if kw:
cnf = _cnfmerge((cnf, kw))
else:
cnf = _cnfmerge(cnf)
# Move static options into extra. static_options must be
# a list of keywords (or None).
extra=()
# 'options' is always a static option
if static_options:
static_options.append('options')
else:
static_options = ['options']
for k,v in cnf.items()[:]:
if k in static_options:
extra = extra + ('-' + k, v)
del cnf[k]
self.widgetName = widgetName
Widget._setup(self, master, cnf)
# If widgetName is None, this is a dummy creation call where the
# corresponding Tk widget has already been created by Tix
if widgetName:
self.tk.call(widgetName, self._w, *extra)
# Non-static options - to be done via a 'config' command
if cnf:
Widget.config(self, cnf)
# Dictionary to hold subwidget names for easier access. We can't
# use the children list because the public Tix names may not be the
# same as the pathname component
self.subwidget_list = {}
# We set up an attribute access function so that it is possible to
# do w.ok['text'] = 'Hello' rather than w.subwidget('ok')['text'] = 'Hello'
# when w is a StdButtonBox.
# We can even do w.ok.invoke() because w.ok is subclassed from the
# Button class if you go through the proper constructors
def __getattr__(self, name):
if name in self.subwidget_list:
return self.subwidget_list[name]
raise AttributeError(name)
def set_silent(self, value):
"""Set a variable without calling its action routine"""
self.tk.call('tixSetSilent', self._w, value)
def subwidget(self, name):
"""Return the named subwidget (which must have been created by
the sub-class)."""
n = self._subwidget_name(name)
if not n:
raise TclError("Subwidget " + name + " not child of " + self._name)
# Remove header of name and leading dot
n = n[len(self._w)+1:]
return self._nametowidget(n)
def subwidgets_all(self):
"""Return all subwidgets."""
names = self._subwidget_names()
if not names:
return []
retlist = []
for name in names:
name = name[len(self._w)+1:]
try:
retlist.append(self._nametowidget(name))
except:
# some of the widgets are unknown e.g. border in LabelFrame
pass
return retlist
def _subwidget_name(self,name):
"""Get a subwidget name (returns a String, not a Widget !)"""
try:
return self.tk.call(self._w, 'subwidget', name)
except TclError:
return None
def _subwidget_names(self):
"""Return the name of all subwidgets."""
try:
x = self.tk.call(self._w, 'subwidgets', '-all')
return self.tk.split(x)
except TclError:
return None
def config_all(self, option, value):
"""Set configuration options for all subwidgets (and self)."""
if option == '':
return
elif not isinstance(option, StringType):
option = repr(option)
if not isinstance(value, StringType):
value = repr(value)
names = self._subwidget_names()
for name in names:
self.tk.call(name, 'configure', '-' + option, value)
# These are missing from Tkinter
def image_create(self, imgtype, cnf={}, master=None, **kw):
if not master:
master = tkinter._default_root
if not master:
raise RuntimeError('Too early to create image')
if kw and cnf: cnf = _cnfmerge((cnf, kw))
elif kw: cnf = kw
options = ()
for k, v in cnf.items():
if hasattr(v, '__call__'):
v = self._register(v)
options = options + ('-'+k, v)
return master.tk.call(('image', 'create', imgtype,) + options)
def image_delete(self, imgname):
try:
self.tk.call('image', 'delete', imgname)
except TclError:
# May happen if the root was destroyed
pass
# Subwidgets are child widgets created automatically by mega-widgets.
# In python, we have to create these subwidgets manually to mirror their
# existence in Tk/Tix.
class TixSubWidget(TixWidget):
"""Subwidget class.
This is used to mirror child widgets automatically created
by Tix/Tk as part of a mega-widget in Python (which is not informed
of this)"""
def __init__(self, master, name,
destroy_physically=1, check_intermediate=1):
if check_intermediate:
path = master._subwidget_name(name)
try:
path = path[len(master._w)+1:]
plist = path.split('.')
except:
plist = []
if not check_intermediate:
# immediate descendant
TixWidget.__init__(self, master, None, None, {'name' : name})
else:
# Ensure that the intermediate widgets exist
parent = master
for i in range(len(plist) - 1):
n = '.'.join(plist[:i+1])
try:
w = master._nametowidget(n)
parent = w
except KeyError:
# Create the intermediate widget
parent = TixSubWidget(parent, plist[i],
destroy_physically=0,
check_intermediate=0)
# The Tk widget name is in plist, not in name
if plist:
name = plist[-1]
TixWidget.__init__(self, parent, None, None, {'name' : name})
self.destroy_physically = destroy_physically
def destroy(self):
# For some widgets e.g., a NoteBook, when we call destructors,
# we must be careful not to destroy the frame widget since this
# also destroys the parent NoteBook thus leading to an exception
# in Tkinter when it finally calls Tcl to destroy the NoteBook
for c in self.children.values(): c.destroy()
if self._name in self.master.children:
del self.master.children[self._name]
if self._name in self.master.subwidget_list:
del self.master.subwidget_list[self._name]
if self.destroy_physically:
# This is bypassed only for a few widgets
self.tk.call('destroy', self._w)
# Useful func. to split Tcl lists and return as a dict. From Tkinter.py
def _lst2dict(lst):
dict = {}
for x in lst:
dict[x[0][1:]] = (x[0][1:],) + x[1:]
return dict
# Useful class to create a display style - later shared by many items.
# Contributed by Steffen Kremser
class DisplayStyle:
"""DisplayStyle - handle configuration options shared by
(multiple) Display Items"""
def __init__(self, itemtype, cnf={}, **kw):
master = _default_root # global from Tkinter
if not master and 'refwindow' in cnf: master=cnf['refwindow']
elif not master and 'refwindow' in kw: master= kw['refwindow']
elif not master: raise RuntimeError("Too early to create display style: no root window")
self.tk = master.tk
self.stylename = self.tk.call('tixDisplayStyle', itemtype,
*self._options(cnf,kw) )
def __str__(self):
return self.stylename
def _options(self, cnf, kw):
if kw and cnf:
cnf = _cnfmerge((cnf, kw))
elif kw:
cnf = kw
opts = ()
for k, v in cnf.items():
opts = opts + ('-'+k, v)
return opts
def delete(self):
self.tk.call(self.stylename, 'delete')
def __setitem__(self,key,value):
self.tk.call(self.stylename, 'configure', '-%s'%key, value)
def config(self, cnf={}, **kw):
return _lst2dict(
self.tk.split(
self.tk.call(
self.stylename, 'configure', *self._options(cnf,kw))))
def __getitem__(self,key):
return self.tk.call(self.stylename, 'cget', '-%s'%key)
######################################################
### The Tix Widget classes - in alphabetical order ###
######################################################
class Balloon(TixWidget):
"""Balloon help widget.
Subwidget Class
--------- -----
label Label
message Message"""
# FIXME: It should inherit -superclass tixShell
def __init__(self, master=None, cnf={}, **kw):
# static seem to be -installcolormap -initwait -statusbar -cursor
static = ['options', 'installcolormap', 'initwait', 'statusbar',
'cursor']
TixWidget.__init__(self, master, 'tixBalloon', static, cnf, kw)
self.subwidget_list['label'] = _dummyLabel(self, 'label',
destroy_physically=0)
self.subwidget_list['message'] = _dummyLabel(self, 'message',
destroy_physically=0)
def bind_widget(self, widget, cnf={}, **kw):
"""Bind balloon widget to another.
One balloon widget may be bound to several widgets at the same time"""
self.tk.call(self._w, 'bind', widget._w, *self._options(cnf, kw))
def unbind_widget(self, widget):
self.tk.call(self._w, 'unbind', widget._w)
class ButtonBox(TixWidget):
"""ButtonBox - A container for pushbuttons.
Subwidgets are the buttons added with the add method.
"""
def __init__(self, master=None, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixButtonBox',
['orientation', 'options'], cnf, kw)
def add(self, name, cnf={}, **kw):
"""Add a button with given name to box."""
btn = self.tk.call(self._w, 'add', name, *self._options(cnf, kw))
self.subwidget_list[name] = _dummyButton(self, name)
return btn
def invoke(self, name):
if name in self.subwidget_list:
self.tk.call(self._w, 'invoke', name)
class ComboBox(TixWidget):
"""ComboBox - an Entry field with a dropdown menu. The user can select a
choice by either typing in the entry subwdget or selecting from the
listbox subwidget.
Subwidget Class
--------- -----
entry Entry
arrow Button
slistbox ScrolledListBox
tick Button
cross Button : present if created with the fancy option"""
# FIXME: It should inherit -superclass tixLabelWidget
def __init__ (self, master=None, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixComboBox',
['editable', 'dropdown', 'fancy', 'options'],
cnf, kw)
self.subwidget_list['label'] = _dummyLabel(self, 'label')
self.subwidget_list['entry'] = _dummyEntry(self, 'entry')
self.subwidget_list['arrow'] = _dummyButton(self, 'arrow')
self.subwidget_list['slistbox'] = _dummyScrolledListBox(self,
'slistbox')
try:
self.subwidget_list['tick'] = _dummyButton(self, 'tick')
self.subwidget_list['cross'] = _dummyButton(self, 'cross')
except TypeError:
# unavailable when -fancy not specified
pass
# align
def add_history(self, str):
self.tk.call(self._w, 'addhistory', str)
def append_history(self, str):
self.tk.call(self._w, 'appendhistory', str)
def insert(self, index, str):
self.tk.call(self._w, 'insert', index, str)
def pick(self, index):
self.tk.call(self._w, 'pick', index)
class Control(TixWidget):
"""Control - An entry field with value change arrows. The user can
adjust the value by pressing the two arrow buttons or by entering
the value directly into the entry. The new value will be checked
against the user-defined upper and lower limits.
Subwidget Class
--------- -----
incr Button
decr Button
entry Entry
label Label"""
# FIXME: It should inherit -superclass tixLabelWidget
def __init__ (self, master=None, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixControl', ['options'], cnf, kw)
self.subwidget_list['incr'] = _dummyButton(self, 'incr')
self.subwidget_list['decr'] = _dummyButton(self, 'decr')
self.subwidget_list['label'] = _dummyLabel(self, 'label')
self.subwidget_list['entry'] = _dummyEntry(self, 'entry')
def decrement(self):
self.tk.call(self._w, 'decr')
def increment(self):
self.tk.call(self._w, 'incr')
def invoke(self):
self.tk.call(self._w, 'invoke')
def update(self):
self.tk.call(self._w, 'update')
class DirList(TixWidget):
"""DirList - displays a list view of a directory, its previous
directories and its sub-directories. The user can choose one of
the directories displayed in the list or change to another directory.
Subwidget Class
--------- -----
hlist HList
hsb Scrollbar
vsb Scrollbar"""
# FIXME: It should inherit -superclass tixScrolledHList
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixDirList', ['options'], cnf, kw)
self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
def chdir(self, dir):
self.tk.call(self._w, 'chdir', dir)
class DirTree(TixWidget):
"""DirTree - Directory Listing in a hierarchical view.
Displays a tree view of a directory, its previous directories and its
sub-directories. The user can choose one of the directories displayed
in the list or change to another directory.
Subwidget Class
--------- -----
hlist HList
hsb Scrollbar
vsb Scrollbar"""
# FIXME: It should inherit -superclass tixScrolledHList
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixDirTree', ['options'], cnf, kw)
self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
def chdir(self, dir):
self.tk.call(self._w, 'chdir', dir)
class DirSelectBox(TixWidget):
"""DirSelectBox - Motif style file select box.
It is generally used for
the user to choose a file. FileSelectBox stores the files mostly
recently selected into a ComboBox widget so that they can be quickly
selected again.
Subwidget Class
--------- -----
selection ComboBox
filter ComboBox
dirlist ScrolledListBox
filelist ScrolledListBox"""
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixDirSelectBox', ['options'], cnf, kw)
self.subwidget_list['dirlist'] = _dummyDirList(self, 'dirlist')
self.subwidget_list['dircbx'] = _dummyFileComboBox(self, 'dircbx')
class ExFileSelectBox(TixWidget):
"""ExFileSelectBox - MS Windows style file select box.
It provides an convenient method for the user to select files.
Subwidget Class
--------- -----
cancel Button
ok Button
hidden Checkbutton
types ComboBox
dir ComboBox
file ComboBox
dirlist ScrolledListBox
filelist ScrolledListBox"""
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixExFileSelectBox', ['options'], cnf, kw)
self.subwidget_list['cancel'] = _dummyButton(self, 'cancel')
self.subwidget_list['ok'] = _dummyButton(self, 'ok')
self.subwidget_list['hidden'] = _dummyCheckbutton(self, 'hidden')
self.subwidget_list['types'] = _dummyComboBox(self, 'types')
self.subwidget_list['dir'] = _dummyComboBox(self, 'dir')
self.subwidget_list['dirlist'] = _dummyDirList(self, 'dirlist')
self.subwidget_list['file'] = _dummyComboBox(self, 'file')
self.subwidget_list['filelist'] = _dummyScrolledListBox(self, 'filelist')
def filter(self):
self.tk.call(self._w, 'filter')
def invoke(self):
self.tk.call(self._w, 'invoke')
# Should inherit from a Dialog class
class DirSelectDialog(TixWidget):
"""The DirSelectDialog widget presents the directories in the file
system in a dialog window. The user can use this dialog window to
navigate through the file system to select the desired directory.
Subwidgets Class
---------- -----
dirbox DirSelectDialog"""
# FIXME: It should inherit -superclass tixDialogShell
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixDirSelectDialog',
['options'], cnf, kw)
self.subwidget_list['dirbox'] = _dummyDirSelectBox(self, 'dirbox')
# cancel and ok buttons are missing
def popup(self):
self.tk.call(self._w, 'popup')
def popdown(self):
self.tk.call(self._w, 'popdown')
# Should inherit from a Dialog class
class ExFileSelectDialog(TixWidget):
"""ExFileSelectDialog - MS Windows style file select dialog.
It provides an convenient method for the user to select files.
Subwidgets Class
---------- -----
fsbox ExFileSelectBox"""
# FIXME: It should inherit -superclass tixDialogShell
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixExFileSelectDialog',
['options'], cnf, kw)
self.subwidget_list['fsbox'] = _dummyExFileSelectBox(self, 'fsbox')
def popup(self):
self.tk.call(self._w, 'popup')
def popdown(self):
self.tk.call(self._w, 'popdown')
class FileSelectBox(TixWidget):
"""ExFileSelectBox - Motif style file select box.
It is generally used for
the user to choose a file. FileSelectBox stores the files mostly
recently selected into a ComboBox widget so that they can be quickly
selected again.
Subwidget Class
--------- -----
selection ComboBox
filter ComboBox
dirlist ScrolledListBox
filelist ScrolledListBox"""
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixFileSelectBox', ['options'], cnf, kw)
self.subwidget_list['dirlist'] = _dummyScrolledListBox(self, 'dirlist')
self.subwidget_list['filelist'] = _dummyScrolledListBox(self, 'filelist')
self.subwidget_list['filter'] = _dummyComboBox(self, 'filter')
self.subwidget_list['selection'] = _dummyComboBox(self, 'selection')
def apply_filter(self): # name of subwidget is same as command
self.tk.call(self._w, 'filter')
def invoke(self):
self.tk.call(self._w, 'invoke')
# Should inherit from a Dialog class
class FileSelectDialog(TixWidget):
"""FileSelectDialog - Motif style file select dialog.
Subwidgets Class
---------- -----
btns StdButtonBox
fsbox FileSelectBox"""
# FIXME: It should inherit -superclass tixStdDialogShell
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixFileSelectDialog',
['options'], cnf, kw)
self.subwidget_list['btns'] = _dummyStdButtonBox(self, 'btns')
self.subwidget_list['fsbox'] = _dummyFileSelectBox(self, 'fsbox')
def popup(self):
self.tk.call(self._w, 'popup')
def popdown(self):
self.tk.call(self._w, 'popdown')
class FileEntry(TixWidget):
"""FileEntry - Entry field with button that invokes a FileSelectDialog.
The user can type in the filename manually. Alternatively, the user can
press the button widget that sits next to the entry, which will bring
up a file selection dialog.
Subwidgets Class
---------- -----
button Button
entry Entry"""
# FIXME: It should inherit -superclass tixLabelWidget
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixFileEntry',
['dialogtype', 'options'], cnf, kw)
self.subwidget_list['button'] = _dummyButton(self, 'button')
self.subwidget_list['entry'] = _dummyEntry(self, 'entry')
def invoke(self):
self.tk.call(self._w, 'invoke')
def file_dialog(self):
# FIXME: return python object
pass
class HList(TixWidget):
"""HList - Hierarchy display widget can be used to display any data
that have a hierarchical structure, for example, file system directory
trees. The list entries are indented and connected by branch lines
according to their places in the hierachy.
Subwidgets - None"""
def __init__ (self,master=None,cnf={}, **kw):
TixWidget.__init__(self, master, 'tixHList',
['columns', 'options'], cnf, kw)
def add(self, entry, cnf={}, **kw):
return self.tk.call(self._w, 'add', entry, *self._options(cnf, kw))
def add_child(self, parent=None, cnf={}, **kw):
if not parent:
parent = ''
return self.tk.call(
self._w, 'addchild', parent, *self._options(cnf, kw))
def anchor_set(self, entry):
self.tk.call(self._w, 'anchor', 'set', entry)
def anchor_clear(self):
self.tk.call(self._w, 'anchor', 'clear')
def column_width(self, col=0, width=None, chars=None):
if not chars:
return self.tk.call(self._w, 'column', 'width', col, width)
else:
return self.tk.call(self._w, 'column', 'width', col,
'-char', chars)
def delete_all(self):
self.tk.call(self._w, 'delete', 'all')
def delete_entry(self, entry):
self.tk.call(self._w, 'delete', 'entry', entry)
def delete_offsprings(self, entry):
self.tk.call(self._w, 'delete', 'offsprings', entry)
def delete_siblings(self, entry):
self.tk.call(self._w, 'delete', 'siblings', entry)
def dragsite_set(self, index):
self.tk.call(self._w, 'dragsite', 'set', index)
def dragsite_clear(self):
self.tk.call(self._w, 'dragsite', 'clear')
def dropsite_set(self, index):
self.tk.call(self._w, 'dropsite', 'set', index)
def dropsite_clear(self):
self.tk.call(self._w, 'dropsite', 'clear')
def header_create(self, col, cnf={}, **kw):
self.tk.call(self._w, 'header', 'create', col, *self._options(cnf, kw))
def header_configure(self, col, cnf={}, **kw):
if cnf is None:
return _lst2dict(
self.tk.split(
self.tk.call(self._w, 'header', 'configure', col)))
self.tk.call(self._w, 'header', 'configure', col,
*self._options(cnf, kw))
def header_cget(self, col, opt):
return self.tk.call(self._w, 'header', 'cget', col, opt)
def header_exists(self, col):
return self.tk.call(self._w, 'header', 'exists', col)
def header_delete(self, col):
self.tk.call(self._w, 'header', 'delete', col)
def header_size(self, col):
return self.tk.call(self._w, 'header', 'size', col)
def hide_entry(self, entry):
self.tk.call(self._w, 'hide', 'entry', entry)
def indicator_create(self, entry, cnf={}, **kw):
self.tk.call(
self._w, 'indicator', 'create', entry, *self._options(cnf, kw))
def indicator_configure(self, entry, cnf={}, **kw):
if cnf is None:
return _lst2dict(
self.tk.split(
self.tk.call(self._w, 'indicator', 'configure', entry)))
self.tk.call(
self._w, 'indicator', 'configure', entry, *self._options(cnf, kw))
def indicator_cget(self, entry, opt):
return self.tk.call(self._w, 'indicator', 'cget', entry, opt)
def indicator_exists(self, entry):
return self.tk.call (self._w, 'indicator', 'exists', entry)
def indicator_delete(self, entry):
self.tk.call(self._w, 'indicator', 'delete', entry)
def indicator_size(self, entry):
return self.tk.call(self._w, 'indicator', 'size', entry)
def info_anchor(self):
return self.tk.call(self._w, 'info', 'anchor')
def info_children(self, entry=None):
c = self.tk.call(self._w, 'info', 'children', entry)
return self.tk.splitlist(c)
def info_data(self, entry):
return self.tk.call(self._w, 'info', 'data', entry)
def info_exists(self, entry):
return self.tk.call(self._w, 'info', 'exists', entry)
def info_hidden(self, entry):
return self.tk.call(self._w, 'info', 'hidden', entry)
def info_next(self, entry):
return self.tk.call(self._w, 'info', 'next', entry)
def info_parent(self, entry):
return self.tk.call(self._w, 'info', 'parent', entry)
def info_prev(self, entry):
return self.tk.call(self._w, 'info', 'prev', entry)
def info_selection(self):
c = self.tk.call(self._w, 'info', 'selection')
return self.tk.splitlist(c)
def item_cget(self, entry, col, opt):
return self.tk.call(self._w, 'item', 'cget', entry, col, opt)
def item_configure(self, entry, col, cnf={}, **kw):
if cnf is None:
return _lst2dict(
self.tk.split(
self.tk.call(self._w, 'item', 'configure', entry, col)))
self.tk.call(self._w, 'item', 'configure', entry, col,
*self._options(cnf, kw))
def item_create(self, entry, col, cnf={}, **kw):
self.tk.call(
self._w, 'item', 'create', entry, col, *self._options(cnf, kw))
def item_exists(self, entry, col):
return self.tk.call(self._w, 'item', 'exists', entry, col)
def item_delete(self, entry, col):
self.tk.call(self._w, 'item', 'delete', entry, col)
def entrycget(self, entry, opt):
return self.tk.call(self._w, 'entrycget', entry, opt)
def entryconfigure(self, entry, cnf={}, **kw):
if cnf is None:
return _lst2dict(
self.tk.split(
self.tk.call(self._w, 'entryconfigure', entry)))
self.tk.call(self._w, 'entryconfigure', entry,
*self._options(cnf, kw))
def nearest(self, y):
return self.tk.call(self._w, 'nearest', y)
def see(self, entry):
self.tk.call(self._w, 'see', entry)
def selection_clear(self, cnf={}, **kw):
self.tk.call(self._w, 'selection', 'clear', *self._options(cnf, kw))
def selection_includes(self, entry):
return self.tk.call(self._w, 'selection', 'includes', entry)
def selection_set(self, first, last=None):
self.tk.call(self._w, 'selection', 'set', first, last)
def show_entry(self, entry):
return self.tk.call(self._w, 'show', 'entry', entry)
def xview(self, *args):
self.tk.call(self._w, 'xview', *args)
def yview(self, *args):
self.tk.call(self._w, 'yview', *args)
class InputOnly(TixWidget):
"""InputOnly - Invisible widget. Unix only.
Subwidgets - None"""
def __init__ (self,master=None,cnf={}, **kw):
TixWidget.__init__(self, master, 'tixInputOnly', None, cnf, kw)
class LabelEntry(TixWidget):
"""LabelEntry - Entry field with label. Packages an entry widget
and a label into one mega widget. It can beused be used to simplify
the creation of ``entry-form'' type of interface.
Subwidgets Class
---------- -----
label Label
entry Entry"""
def __init__ (self,master=None,cnf={}, **kw):
TixWidget.__init__(self, master, 'tixLabelEntry',
['labelside','options'], cnf, kw)
self.subwidget_list['label'] = _dummyLabel(self, 'label')
self.subwidget_list['entry'] = _dummyEntry(self, 'entry')
class LabelFrame(TixWidget):
"""LabelFrame - Labelled Frame container. Packages a frame widget
and a label into one mega widget. To create widgets inside a
LabelFrame widget, one creates the new widgets relative to the
frame subwidget and manage them inside the frame subwidget.
Subwidgets Class
---------- -----
label Label
frame Frame"""
def __init__ (self,master=None,cnf={}, **kw):
TixWidget.__init__(self, master, 'tixLabelFrame',
['labelside','options'], cnf, kw)
self.subwidget_list['label'] = _dummyLabel(self, 'label')
self.subwidget_list['frame'] = _dummyFrame(self, 'frame')
class ListNoteBook(TixWidget):
"""A ListNoteBook widget is very similar to the TixNoteBook widget:
it can be used to display many windows in a limited space using a
notebook metaphor. The notebook is divided into a stack of pages
(windows). At one time only one of these pages can be shown.
The user can navigate through these pages by
choosing the name of the desired page in the hlist subwidget."""
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixListNoteBook', ['options'], cnf, kw)
# Is this necessary? It's not an exposed subwidget in Tix.
self.subwidget_list['pane'] = _dummyPanedWindow(self, 'pane',
destroy_physically=0)
self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
self.subwidget_list['shlist'] = _dummyScrolledHList(self, 'shlist')
def add(self, name, cnf={}, **kw):
self.tk.call(self._w, 'add', name, *self._options(cnf, kw))
self.subwidget_list[name] = TixSubWidget(self, name)
return self.subwidget_list[name]
def page(self, name):
return self.subwidget(name)
def pages(self):
# Can't call subwidgets_all directly because we don't want .nbframe
names = self.tk.split(self.tk.call(self._w, 'pages'))
ret = []
for x in names:
ret.append(self.subwidget(x))
return ret
def raise_page(self, name): # raise is a python keyword
self.tk.call(self._w, 'raise', name)
class Meter(TixWidget):
"""The Meter widget can be used to show the progress of a background
job which may take a long time to execute.
"""
def __init__(self, master=None, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixMeter',
['options'], cnf, kw)
class NoteBook(TixWidget):
"""NoteBook - Multi-page container widget (tabbed notebook metaphor).
Subwidgets Class
---------- -----
nbframe NoteBookFrame
<pages> page widgets added dynamically with the add method"""
def __init__ (self,master=None,cnf={}, **kw):
TixWidget.__init__(self,master,'tixNoteBook', ['options'], cnf, kw)
self.subwidget_list['nbframe'] = TixSubWidget(self, 'nbframe',
destroy_physically=0)
def add(self, name, cnf={}, **kw):
self.tk.call(self._w, 'add', name, *self._options(cnf, kw))
self.subwidget_list[name] = TixSubWidget(self, name)
return self.subwidget_list[name]
def delete(self, name):
self.tk.call(self._w, 'delete', name)
self.subwidget_list[name].destroy()
del self.subwidget_list[name]
def page(self, name):
return self.subwidget(name)
def pages(self):
# Can't call subwidgets_all directly because we don't want .nbframe
names = self.tk.split(self.tk.call(self._w, 'pages'))
ret = []
for x in names:
ret.append(self.subwidget(x))
return ret
def raise_page(self, name): # raise is a python keyword
self.tk.call(self._w, 'raise', name)
def raised(self):
return self.tk.call(self._w, 'raised')
class NoteBookFrame(TixWidget):
# FIXME: This is dangerous to expose to be called on its own.
pass
class OptionMenu(TixWidget):
"""OptionMenu - creates a menu button of options.
Subwidget Class
--------- -----
menubutton Menubutton
menu Menu"""
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixOptionMenu', ['options'], cnf, kw)
self.subwidget_list['menubutton'] = _dummyMenubutton(self, 'menubutton')
self.subwidget_list['menu'] = _dummyMenu(self, 'menu')
def add_command(self, name, cnf={}, **kw):
self.tk.call(self._w, 'add', 'command', name, *self._options(cnf, kw))
def add_separator(self, name, cnf={}, **kw):
self.tk.call(self._w, 'add', 'separator', name, *self._options(cnf, kw))
def delete(self, name):
self.tk.call(self._w, 'delete', name)
def disable(self, name):
self.tk.call(self._w, 'disable', name)
def enable(self, name):
self.tk.call(self._w, 'enable', name)
class PanedWindow(TixWidget):
"""PanedWindow - Multi-pane container widget
allows the user to interactively manipulate the sizes of several
panes. The panes can be arranged either vertically or horizontally.The
user changes the sizes of the panes by dragging the resize handle
between two panes.
Subwidgets Class
---------- -----
<panes> g/p widgets added dynamically with the add method."""
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixPanedWindow', ['orientation', 'options'], cnf, kw)
# add delete forget panecget paneconfigure panes setsize
def add(self, name, cnf={}, **kw):
self.tk.call(self._w, 'add', name, *self._options(cnf, kw))
self.subwidget_list[name] = TixSubWidget(self, name,
check_intermediate=0)
return self.subwidget_list[name]
def delete(self, name):
self.tk.call(self._w, 'delete', name)
self.subwidget_list[name].destroy()
del self.subwidget_list[name]
def forget(self, name):
self.tk.call(self._w, 'forget', name)
def panecget(self, entry, opt):
return self.tk.call(self._w, 'panecget', entry, opt)
def paneconfigure(self, entry, cnf={}, **kw):
if cnf is None:
return _lst2dict(
self.tk.split(
self.tk.call(self._w, 'paneconfigure', entry)))
self.tk.call(self._w, 'paneconfigure', entry, *self._options(cnf, kw))
def panes(self):
names = self.tk.call(self._w, 'panes')
ret = []
for x in names:
ret.append(self.subwidget(x))
return ret
class PopupMenu(TixWidget):
"""PopupMenu widget can be used as a replacement of the tk_popup command.
The advantage of the Tix PopupMenu widget is it requires less application
code to manipulate.
Subwidgets Class
---------- -----
menubutton Menubutton
menu Menu"""
# FIXME: It should inherit -superclass tixShell
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixPopupMenu', ['options'], cnf, kw)
self.subwidget_list['menubutton'] = _dummyMenubutton(self, 'menubutton')
self.subwidget_list['menu'] = _dummyMenu(self, 'menu')
def bind_widget(self, widget):
self.tk.call(self._w, 'bind', widget._w)
def unbind_widget(self, widget):
self.tk.call(self._w, 'unbind', widget._w)
def post_widget(self, widget, x, y):
self.tk.call(self._w, 'post', widget._w, x, y)
class ResizeHandle(TixWidget):
"""Internal widget to draw resize handles on Scrolled widgets."""
def __init__(self, master, cnf={}, **kw):
# There seems to be a Tix bug rejecting the configure method
# Let's try making the flags -static
flags = ['options', 'command', 'cursorfg', 'cursorbg',
'handlesize', 'hintcolor', 'hintwidth',
'x', 'y']
# In fact, x y height width are configurable
TixWidget.__init__(self, master, 'tixResizeHandle',
flags, cnf, kw)
def attach_widget(self, widget):
self.tk.call(self._w, 'attachwidget', widget._w)
def detach_widget(self, widget):
self.tk.call(self._w, 'detachwidget', widget._w)
def hide(self, widget):
self.tk.call(self._w, 'hide', widget._w)
def show(self, widget):
self.tk.call(self._w, 'show', widget._w)
class ScrolledHList(TixWidget):
"""ScrolledHList - HList with automatic scrollbars."""
# FIXME: It should inherit -superclass tixScrolledWidget
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixScrolledHList', ['options'],
cnf, kw)
self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
class ScrolledListBox(TixWidget):
"""ScrolledListBox - Listbox with automatic scrollbars."""
# FIXME: It should inherit -superclass tixScrolledWidget
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixScrolledListBox', ['options'], cnf, kw)
self.subwidget_list['listbox'] = _dummyListbox(self, 'listbox')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
class ScrolledText(TixWidget):
"""ScrolledText - Text with automatic scrollbars."""
# FIXME: It should inherit -superclass tixScrolledWidget
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixScrolledText', ['options'], cnf, kw)
self.subwidget_list['text'] = _dummyText(self, 'text')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
class ScrolledTList(TixWidget):
"""ScrolledTList - TList with automatic scrollbars."""
# FIXME: It should inherit -superclass tixScrolledWidget
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixScrolledTList', ['options'],
cnf, kw)
self.subwidget_list['tlist'] = _dummyTList(self, 'tlist')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
class ScrolledWindow(TixWidget):
"""ScrolledWindow - Window with automatic scrollbars."""
# FIXME: It should inherit -superclass tixScrolledWidget
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixScrolledWindow', ['options'], cnf, kw)
self.subwidget_list['window'] = _dummyFrame(self, 'window')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
class Select(TixWidget):
"""Select - Container of button subwidgets. It can be used to provide
radio-box or check-box style of selection options for the user.
Subwidgets are buttons added dynamically using the add method."""
# FIXME: It should inherit -superclass tixLabelWidget
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixSelect',
['allowzero', 'radio', 'orientation', 'labelside',
'options'],
cnf, kw)
self.subwidget_list['label'] = _dummyLabel(self, 'label')
def add(self, name, cnf={}, **kw):
self.tk.call(self._w, 'add', name, *self._options(cnf, kw))
self.subwidget_list[name] = _dummyButton(self, name)
return self.subwidget_list[name]
def invoke(self, name):
self.tk.call(self._w, 'invoke', name)
class Shell(TixWidget):
"""Toplevel window.
Subwidgets - None"""
def __init__ (self,master=None,cnf={}, **kw):
TixWidget.__init__(self, master, 'tixShell', ['options', 'title'], cnf, kw)
class DialogShell(TixWidget):
"""Toplevel window, with popup popdown and center methods.
It tells the window manager that it is a dialog window and should be
treated specially. The exact treatment depends on the treatment of
the window manager.
Subwidgets - None"""
# FIXME: It should inherit from Shell
def __init__ (self,master=None,cnf={}, **kw):
TixWidget.__init__(self, master,
'tixDialogShell',
['options', 'title', 'mapped',
'minheight', 'minwidth',
'parent', 'transient'], cnf, kw)
def popdown(self):
self.tk.call(self._w, 'popdown')
def popup(self):
self.tk.call(self._w, 'popup')
def center(self):
self.tk.call(self._w, 'center')
class StdButtonBox(TixWidget):
"""StdButtonBox - Standard Button Box (OK, Apply, Cancel and Help) """
def __init__(self, master=None, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixStdButtonBox',
['orientation', 'options'], cnf, kw)
self.subwidget_list['ok'] = _dummyButton(self, 'ok')
self.subwidget_list['apply'] = _dummyButton(self, 'apply')
self.subwidget_list['cancel'] = _dummyButton(self, 'cancel')
self.subwidget_list['help'] = _dummyButton(self, 'help')
def invoke(self, name):
if name in self.subwidget_list:
self.tk.call(self._w, 'invoke', name)
class TList(TixWidget):
"""TList - Hierarchy display widget which can be
used to display data in a tabular format. The list entries of a TList
widget are similar to the entries in the Tk listbox widget. The main
differences are (1) the TList widget can display the list entries in a
two dimensional format and (2) you can use graphical images as well as
multiple colors and fonts for the list entries.
Subwidgets - None"""
def __init__ (self,master=None,cnf={}, **kw):
TixWidget.__init__(self, master, 'tixTList', ['options'], cnf, kw)
def active_set(self, index):
self.tk.call(self._w, 'active', 'set', index)
def active_clear(self):
self.tk.call(self._w, 'active', 'clear')
def anchor_set(self, index):
self.tk.call(self._w, 'anchor', 'set', index)
def anchor_clear(self):
self.tk.call(self._w, 'anchor', 'clear')
def delete(self, from_, to=None):
self.tk.call(self._w, 'delete', from_, to)
def dragsite_set(self, index):
self.tk.call(self._w, 'dragsite', 'set', index)
def dragsite_clear(self):
self.tk.call(self._w, 'dragsite', 'clear')
def dropsite_set(self, index):
self.tk.call(self._w, 'dropsite', 'set', index)
def dropsite_clear(self):
self.tk.call(self._w, 'dropsite', 'clear')
def insert(self, index, cnf={}, **kw):
self.tk.call(self._w, 'insert', index, *self._options(cnf, kw))
def info_active(self):
return self.tk.call(self._w, 'info', 'active')
def info_anchor(self):
return self.tk.call(self._w, 'info', 'anchor')
def info_down(self, index):
return self.tk.call(self._w, 'info', 'down', index)
def info_left(self, index):
return self.tk.call(self._w, 'info', 'left', index)
def info_right(self, index):
return self.tk.call(self._w, 'info', 'right', index)
def info_selection(self):
c = self.tk.call(self._w, 'info', 'selection')
return self.tk.splitlist(c)
def info_size(self):
return self.tk.call(self._w, 'info', 'size')
def info_up(self, index):
return self.tk.call(self._w, 'info', 'up', index)
def nearest(self, x, y):
return self.tk.call(self._w, 'nearest', x, y)
def see(self, index):
self.tk.call(self._w, 'see', index)
def selection_clear(self, cnf={}, **kw):
self.tk.call(self._w, 'selection', 'clear', *self._options(cnf, kw))
def selection_includes(self, index):
return self.tk.call(self._w, 'selection', 'includes', index)
def selection_set(self, first, last=None):
self.tk.call(self._w, 'selection', 'set', first, last)
def xview(self, *args):
self.tk.call(self._w, 'xview', *args)
def yview(self, *args):
self.tk.call(self._w, 'yview', *args)
class Tree(TixWidget):
"""Tree - The tixTree widget can be used to display hierachical
data in a tree form. The user can adjust
the view of the tree by opening or closing parts of the tree."""
# FIXME: It should inherit -superclass tixScrolledWidget
def __init__(self, master=None, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixTree',
['options'], cnf, kw)
self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
def autosetmode(self):
'''This command calls the setmode method for all the entries in this
Tree widget: if an entry has no child entries, its mode is set to
none. Otherwise, if the entry has any hidden child entries, its mode is
set to open; otherwise its mode is set to close.'''
self.tk.call(self._w, 'autosetmode')
def close(self, entrypath):
'''Close the entry given by entryPath if its mode is close.'''
self.tk.call(self._w, 'close', entrypath)
def getmode(self, entrypath):
'''Returns the current mode of the entry given by entryPath.'''
return self.tk.call(self._w, 'getmode', entrypath)
def open(self, entrypath):
'''Open the entry given by entryPath if its mode is open.'''
self.tk.call(self._w, 'open', entrypath)
def setmode(self, entrypath, mode='none'):
'''This command is used to indicate whether the entry given by
entryPath has children entries and whether the children are visible. mode
must be one of open, close or none. If mode is set to open, a (+)
indicator is drawn next the the entry. If mode is set to close, a (-)
indicator is drawn next the the entry. If mode is set to none, no
indicators will be drawn for this entry. The default mode is none. The
open mode indicates the entry has hidden children and this entry can be
opened by the user. The close mode indicates that all the children of the
entry are now visible and the entry can be closed by the user.'''
self.tk.call(self._w, 'setmode', entrypath, mode)
# Could try subclassing Tree for CheckList - would need another arg to init
class CheckList(TixWidget):
"""The CheckList widget
displays a list of items to be selected by the user. CheckList acts
similarly to the Tk checkbutton or radiobutton widgets, except it is
capable of handling many more items than checkbuttons or radiobuttons.
"""
# FIXME: It should inherit -superclass tixTree
def __init__(self, master=None, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixCheckList',
['options'], cnf, kw)
self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
def autosetmode(self):
'''This command calls the setmode method for all the entries in this
Tree widget: if an entry has no child entries, its mode is set to
none. Otherwise, if the entry has any hidden child entries, its mode is
set to open; otherwise its mode is set to close.'''
self.tk.call(self._w, 'autosetmode')
def close(self, entrypath):
'''Close the entry given by entryPath if its mode is close.'''
self.tk.call(self._w, 'close', entrypath)
def getmode(self, entrypath):
'''Returns the current mode of the entry given by entryPath.'''
return self.tk.call(self._w, 'getmode', entrypath)
def open(self, entrypath):
'''Open the entry given by entryPath if its mode is open.'''
self.tk.call(self._w, 'open', entrypath)
def getselection(self, mode='on'):
'''Returns a list of items whose status matches status. If status is
not specified, the list of items in the "on" status will be returned.
Mode can be on, off, default'''
c = self.tk.split(self.tk.call(self._w, 'getselection', mode))
return self.tk.splitlist(c)
def getstatus(self, entrypath):
'''Returns the current status of entryPath.'''
return self.tk.call(self._w, 'getstatus', entrypath)
def setstatus(self, entrypath, mode='on'):
'''Sets the status of entryPath to be status. A bitmap will be
displayed next to the entry its status is on, off or default.'''
self.tk.call(self._w, 'setstatus', entrypath, mode)
###########################################################################
### The subclassing below is used to instantiate the subwidgets in each ###
### mega widget. This allows us to access their methods directly. ###
###########################################################################
class _dummyButton(Button, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyCheckbutton(Checkbutton, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyEntry(Entry, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyFrame(Frame, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyLabel(Label, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyListbox(Listbox, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyMenu(Menu, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyMenubutton(Menubutton, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyScrollbar(Scrollbar, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyText(Text, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyScrolledListBox(ScrolledListBox, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
self.subwidget_list['listbox'] = _dummyListbox(self, 'listbox')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
class _dummyHList(HList, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyScrolledHList(ScrolledHList, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
class _dummyTList(TList, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyComboBox(ComboBox, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, ['fancy',destroy_physically])
self.subwidget_list['label'] = _dummyLabel(self, 'label')
self.subwidget_list['entry'] = _dummyEntry(self, 'entry')
self.subwidget_list['arrow'] = _dummyButton(self, 'arrow')
self.subwidget_list['slistbox'] = _dummyScrolledListBox(self,
'slistbox')
try:
self.subwidget_list['tick'] = _dummyButton(self, 'tick')
#cross Button : present if created with the fancy option
self.subwidget_list['cross'] = _dummyButton(self, 'cross')
except TypeError:
# unavailable when -fancy not specified
pass
class _dummyDirList(DirList, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
class _dummyDirSelectBox(DirSelectBox, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
self.subwidget_list['dirlist'] = _dummyDirList(self, 'dirlist')
self.subwidget_list['dircbx'] = _dummyFileComboBox(self, 'dircbx')
class _dummyExFileSelectBox(ExFileSelectBox, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
self.subwidget_list['cancel'] = _dummyButton(self, 'cancel')
self.subwidget_list['ok'] = _dummyButton(self, 'ok')
self.subwidget_list['hidden'] = _dummyCheckbutton(self, 'hidden')
self.subwidget_list['types'] = _dummyComboBox(self, 'types')
self.subwidget_list['dir'] = _dummyComboBox(self, 'dir')
self.subwidget_list['dirlist'] = _dummyScrolledListBox(self, 'dirlist')
self.subwidget_list['file'] = _dummyComboBox(self, 'file')
self.subwidget_list['filelist'] = _dummyScrolledListBox(self, 'filelist')
class _dummyFileSelectBox(FileSelectBox, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
self.subwidget_list['dirlist'] = _dummyScrolledListBox(self, 'dirlist')
self.subwidget_list['filelist'] = _dummyScrolledListBox(self, 'filelist')
self.subwidget_list['filter'] = _dummyComboBox(self, 'filter')
self.subwidget_list['selection'] = _dummyComboBox(self, 'selection')
class _dummyFileComboBox(ComboBox, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
self.subwidget_list['dircbx'] = _dummyComboBox(self, 'dircbx')
class _dummyStdButtonBox(StdButtonBox, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
self.subwidget_list['ok'] = _dummyButton(self, 'ok')
self.subwidget_list['apply'] = _dummyButton(self, 'apply')
self.subwidget_list['cancel'] = _dummyButton(self, 'cancel')
self.subwidget_list['help'] = _dummyButton(self, 'help')
class _dummyNoteBookFrame(NoteBookFrame, TixSubWidget):
def __init__(self, master, name, destroy_physically=0):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyPanedWindow(PanedWindow, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
########################
### Utility Routines ###
########################
#mike Should tixDestroy be exposed as a wrapper? - but not for widgets.
def OptionName(widget):
'''Returns the qualified path name for the widget. Normally used to set
default options for subwidgets. See tixwidgets.py'''
return widget.tk.call('tixOptionName', widget._w)
# Called with a dictionary argument of the form
# {'*.c':'C source files', '*.txt':'Text Files', '*':'All files'}
# returns a string which can be used to configure the fsbox file types
# in an ExFileSelectBox. i.e.,
# '{{*} {* - All files}} {{*.c} {*.c - C source files}} {{*.txt} {*.txt - Text Files}}'
def FileTypeList(dict):
s = ''
for type in dict.keys():
s = s + '{{' + type + '} {' + type + ' - ' + dict[type] + '}} '
return s
# Still to be done:
# tixIconView
class CObjView(TixWidget):
"""This file implements the Canvas Object View widget. This is a base
class of IconView. It implements automatic placement/adjustment of the
scrollbars according to the canvas objects inside the canvas subwidget.
The scrollbars are adjusted so that the canvas is just large enough
to see all the objects.
"""
# FIXME: It should inherit -superclass tixScrolledWidget
pass
class Grid(TixWidget):
'''The Tix Grid command creates a new window and makes it into a
tixGrid widget. Additional options, may be specified on the command
line or in the option database to configure aspects such as its cursor
and relief.
A Grid widget displays its contents in a two dimensional grid of cells.
Each cell may contain one Tix display item, which may be in text,
graphics or other formats. See the DisplayStyle class for more information
about Tix display items. Individual cells, or groups of cells, can be
formatted with a wide range of attributes, such as its color, relief and
border.
Subwidgets - None'''
# valid specific resources as of Tk 8.4
# editdonecmd, editnotifycmd, floatingcols, floatingrows, formatcmd,
# highlightbackground, highlightcolor, leftmargin, itemtype, selectmode,
# selectunit, topmargin,
def __init__(self, master=None, cnf={}, **kw):
static= []
self.cnf= cnf
TixWidget.__init__(self, master, 'tixGrid', static, cnf, kw)
# valid options as of Tk 8.4
# anchor, bdtype, cget, configure, delete, dragsite, dropsite, entrycget, edit
# entryconfigure, format, geometryinfo, info, index, move, nearest, selection
# set, size, unset, xview, yview
# def anchor option ?args ...?
def anchor_get(self):
"Get the (x,y) coordinate of the current anchor cell"
return self._getints(self.tk.call(self, 'anchor', 'get'))
# def bdtype
# def delete dim from ?to?
def delete_row(self, from_, to=None):
"""Delete rows between from_ and to inclusive.
If to is not provided, delete only row at from_"""
if to is None:
self.tk.call(self, 'delete', 'row', from_)
else:
self.tk.call(self, 'delete', 'row', from_, to)
def delete_column(self, from_, to=None):
"""Delete columns between from_ and to inclusive.
If to is not provided, delete only column at from_"""
if to is None:
self.tk.call(self, 'delete', 'column', from_)
else:
self.tk.call(self, 'delete', 'column', from_, to)
# def edit apply
# def edit set x y
def entrycget(self, x, y, option):
"Get the option value for cell at (x,y)"
return self.tk.call(self, 'entrycget', x, y, option)
def entryconfigure(self, x, y, **kw):
return self.tk.call(self, 'entryconfigure', x, y, *self._options(None, kw))
# def format
# def index
def info_exists(self, x, y):
"Return True if display item exists at (x,y)"
return bool(int(self.tk.call(self, 'info', 'exists', x, y)))
def info_bbox(self, x, y):
# This seems to always return '', at least for 'text' displayitems
return self.tk.call(self, 'info', 'bbox', x, y)
def nearest(self, x, y):
"Return coordinate of cell nearest pixel coordinate (x,y)"
return self._getints(self.tk.call(self, 'nearest', x, y))
# def selection adjust
# def selection clear
# def selection includes
# def selection set
# def selection toggle
# def move dim from to offset
def set(self, x, y, itemtype=None, **kw):
args= self._options(self.cnf, kw)
if itemtype is not None:
args= ('-itemtype', itemtype) + args
self.tk.call(self, 'set', x, y, *args)
# def size dim index ?option value ...?
# def unset x y
def xview(self):
return self._getdoubles(self.tk.call(self, 'xview'))
def xview_moveto(self, fraction):
self.tk.call(self,'xview', 'moveto', fraction)
def xview_scroll(self, count, what="units"):
"Scroll right (count>0) or left <count> of units|pages"
self.tk.call(self, 'xview', 'scroll', count, what)
def yview(self):
return self._getdoubles(self.tk.call(self, 'yview'))
def yview_moveto(self, fraction):
self.tk.call(self,'ysview', 'moveto', fraction)
def yview_scroll(self, count, what="units"):
"Scroll down (count>0) or up <count> of units|pages"
self.tk.call(self, 'yview', 'scroll', count, what)
class ScrolledGrid(Grid):
'''Scrolled Grid widgets'''
# FIXME: It should inherit -superclass tixScrolledWidget
def __init__(self, master=None, cnf={}, **kw):
static= []
self.cnf= cnf
TixWidget.__init__(self, master, 'tixScrolledGrid', static, cnf, kw)
| mit | -4,159,879,401,223,015,400 | 38.165521 | 96 | 0.605074 | false |
BlackLight/evesp | evesp/event/__init__.py | 1 | 2976 | class Event(object):
"""
Base class for events
Fabio Manganiello, 2015 <[email protected]>
"""
def __init__(self, component=None, **kwargs):
"""
Constructor
kwargs -- key-value associations for the attributes of the object
"""
self.__kwargs = kwargs
self.component = component
vars(self).update(kwargs)
def get(self, attr):
" Get an event attribute by name. Return None if the attribute doesn't exist "
return self.__kwargs[attr] if attr in self.__kwargs else None
def serialize(self):
" Serialize the event using pickle "
import pickle
return pickle.dumps(self)
@classmethod
def deserialize(cls, event):
" Deserialize and return the event object using pickle "
import pickle
obj = pickle.loads(event)
assert isinstance(obj, cls)
return obj
def to_json(self):
" Serialize as JSON "
import json
attrs = self.__kwargs
return json.dumps(attrs)
@classmethod
def from_json(cls, attrs):
" Deserialize and initialize from JSON "
import json
attrs = dict(json.loads(attrs))
return Event(**attrs)
def __eq__(self, event):
"""
Return true if event equals self.
Two events are considered "equal" if:
- Their types are the same, or one is a direct subclass of the other;
- All of their constructor parameters are equal, unless a certain attribute is an instance of AttributeValueAny.
"""
if not self.__same_classes(self, event):
return False
for (attr, value) in self.__kwargs.items():
if not self.__same_values(value, event.__kwargs[attr]):
return False
return True
@classmethod
def __same_classes(cls, obj1, obj2):
return True \
if (type(obj1) == Event or type(obj2) == Event) \
else type(obj1) == type(obj2)
@classmethod
def __same_values(cls, value1, value2):
if not cls.__same_classes(value1, value2) \
and not isinstance(value1, AttributeValueAny) \
and not isinstance(value2, AttributeValueAny):
return False
return value1 == value2
class StopEvent(Event):
"""
A special event used to asynchronously stop components, workers and sockets
Fabio Manganiello, 2015 <[email protected]>
"""
class AttributeValueAny(object):
"""
When an event attribute type is AttributeValueAny,
that attribute won't be taken into account when
two events are compared through == operator or
explicit __eq__ method invocation.
Fabio Manganiello, 2015 <[email protected]>
"""
def __eq__(self, value):
""" Always return True. Any value equals "any" """
return True
def __repr__(self):
return "__ANY__"
# vim:sw=4:ts=4:et:
| apache-2.0 | 6,722,185,896,443,257,000 | 27.615385 | 120 | 0.600806 | false |
Shrews/PyGerrit | webapp/django/core/handlers/wsgi.py | 1 | 8460 | from threading import Lock
from pprint import pformat
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django import http
from django.core import signals
from django.core.handlers import base
from django.core.urlresolvers import set_script_prefix
from django.utils import datastructures
from django.utils.encoding import force_unicode
# See http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
STATUS_CODE_TEXT = {
100: 'CONTINUE',
101: 'SWITCHING PROTOCOLS',
200: 'OK',
201: 'CREATED',
202: 'ACCEPTED',
203: 'NON-AUTHORITATIVE INFORMATION',
204: 'NO CONTENT',
205: 'RESET CONTENT',
206: 'PARTIAL CONTENT',
300: 'MULTIPLE CHOICES',
301: 'MOVED PERMANENTLY',
302: 'FOUND',
303: 'SEE OTHER',
304: 'NOT MODIFIED',
305: 'USE PROXY',
306: 'RESERVED',
307: 'TEMPORARY REDIRECT',
400: 'BAD REQUEST',
401: 'UNAUTHORIZED',
402: 'PAYMENT REQUIRED',
403: 'FORBIDDEN',
404: 'NOT FOUND',
405: 'METHOD NOT ALLOWED',
406: 'NOT ACCEPTABLE',
407: 'PROXY AUTHENTICATION REQUIRED',
408: 'REQUEST TIMEOUT',
409: 'CONFLICT',
410: 'GONE',
411: 'LENGTH REQUIRED',
412: 'PRECONDITION FAILED',
413: 'REQUEST ENTITY TOO LARGE',
414: 'REQUEST-URI TOO LONG',
415: 'UNSUPPORTED MEDIA TYPE',
416: 'REQUESTED RANGE NOT SATISFIABLE',
417: 'EXPECTATION FAILED',
500: 'INTERNAL SERVER ERROR',
501: 'NOT IMPLEMENTED',
502: 'BAD GATEWAY',
503: 'SERVICE UNAVAILABLE',
504: 'GATEWAY TIMEOUT',
505: 'HTTP VERSION NOT SUPPORTED',
}
def safe_copyfileobj(fsrc, fdst, length=16*1024, size=0):
"""
A version of shutil.copyfileobj that will not read more than 'size' bytes.
This makes it safe from clients sending more than CONTENT_LENGTH bytes of
data in the body.
"""
if not size:
return
while size > 0:
buf = fsrc.read(min(length, size))
if not buf:
break
fdst.write(buf)
size -= len(buf)
class WSGIRequest(http.HttpRequest):
def __init__(self, environ):
script_name = base.get_script_name(environ)
path_info = force_unicode(environ.get('PATH_INFO', u'/'))
if not path_info:
# Sometimes PATH_INFO exists, but is empty (e.g. accessing
# the SCRIPT_NAME URL without a trailing slash). We really need to
# operate as if they'd requested '/'. Not amazingly nice to force
# the path like this, but should be harmless.
path_info = u'/'
self.environ = environ
self.path_info = path_info
self.path = '%s%s' % (script_name, path_info)
self.META = environ
self.META['PATH_INFO'] = path_info
self.META['SCRIPT_NAME'] = script_name
self.method = environ['REQUEST_METHOD'].upper()
def __repr__(self):
# Since this is called as part of error handling, we need to be very
# robust against potentially malformed input.
try:
get = pformat(self.GET)
except:
get = '<could not parse>'
try:
post = pformat(self.POST)
except:
post = '<could not parse>'
try:
cookies = pformat(self.COOKIES)
except:
cookies = '<could not parse>'
try:
meta = pformat(self.META)
except:
meta = '<could not parse>'
return '<WSGIRequest\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' % \
(get, post, cookies, meta)
def get_full_path(self):
return '%s%s' % (self.path, self.environ.get('QUERY_STRING', '') and ('?' + self.environ.get('QUERY_STRING', '')) or '')
def is_secure(self):
return 'wsgi.url_scheme' in self.environ \
and self.environ['wsgi.url_scheme'] == 'https'
def _load_post_and_files(self):
# Populates self._post and self._files
if self.method == 'POST':
if self.environ.get('CONTENT_TYPE', '').startswith('multipart'):
self._raw_post_data = ''
self._post, self._files = self.parse_file_upload(self.META, self.environ['wsgi.input'])
else:
self._post, self._files = http.QueryDict(self.raw_post_data, encoding=self._encoding), datastructures.MultiValueDict()
else:
self._post, self._files = http.QueryDict('', encoding=self._encoding), datastructures.MultiValueDict()
def _get_request(self):
if not hasattr(self, '_request'):
self._request = datastructures.MergeDict(self.POST, self.GET)
return self._request
def _get_get(self):
if not hasattr(self, '_get'):
# The WSGI spec says 'QUERY_STRING' may be absent.
self._get = http.QueryDict(self.environ.get('QUERY_STRING', ''), encoding=self._encoding)
return self._get
def _set_get(self, get):
self._get = get
def _get_post(self):
if not hasattr(self, '_post'):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
def _get_cookies(self):
if not hasattr(self, '_cookies'):
self._cookies = http.parse_cookie(self.environ.get('HTTP_COOKIE', ''))
return self._cookies
def _set_cookies(self, cookies):
self._cookies = cookies
def _get_files(self):
if not hasattr(self, '_files'):
self._load_post_and_files()
return self._files
def _get_raw_post_data(self):
try:
return self._raw_post_data
except AttributeError:
buf = StringIO()
try:
# CONTENT_LENGTH might be absent if POST doesn't have content at all (lighttpd)
content_length = int(self.environ.get('CONTENT_LENGTH', 0))
except (ValueError, TypeError):
# If CONTENT_LENGTH was empty string or not an integer, don't
# error out. We've also seen None passed in here (against all
# specs, but see ticket #8259), so we handle TypeError as well.
content_length = 0
if content_length > 0:
safe_copyfileobj(self.environ['wsgi.input'], buf,
size=content_length)
self._raw_post_data = buf.getvalue()
buf.close()
return self._raw_post_data
GET = property(_get_get, _set_get)
POST = property(_get_post, _set_post)
COOKIES = property(_get_cookies, _set_cookies)
FILES = property(_get_files)
REQUEST = property(_get_request)
raw_post_data = property(_get_raw_post_data)
class WSGIHandler(base.BaseHandler):
initLock = Lock()
request_class = WSGIRequest
def __call__(self, environ, start_response):
from django.conf import settings
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
self.initLock.acquire()
# Check that middleware is still uninitialised.
if self._request_middleware is None:
self.load_middleware()
self.initLock.release()
set_script_prefix(base.get_script_name(environ))
signals.request_started.send(sender=self.__class__)
try:
try:
request = self.request_class(environ)
except UnicodeDecodeError:
response = http.HttpResponseBadRequest()
else:
response = self.get_response(request)
# Apply response middleware
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
response = self.apply_response_fixes(request, response)
finally:
signals.request_finished.send(sender=self.__class__)
try:
status_text = STATUS_CODE_TEXT[response.status_code]
except KeyError:
status_text = 'UNKNOWN STATUS CODE'
status = '%s %s' % (response.status_code, status_text)
response_headers = [(str(k), str(v)) for k, v in response.items()]
for c in response.cookies.values():
response_headers.append(('Set-Cookie', str(c.output(header=''))))
start_response(status, response_headers)
return response
| apache-2.0 | -8,111,166,442,822,824,000 | 34.546218 | 134 | 0.590662 | false |
drwyrm/Flexget | flexget/tests/conftest.py | 1 | 12285 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import jsonschema
from future.utils import PY2
from future.backports.http import client as backport_client
import re
import os
import sys
import yaml
import logging
import shutil
import requests
import itertools
from contextlib import contextmanager
import mock
import pytest
from path import Path
from vcr import VCR
from vcr.stubs import VCRHTTPSConnection, VCRHTTPConnection
import flexget.logger
from flexget.manager import Manager
from flexget.plugin import load_plugins
from flexget.task import Task, TaskAbort
from flexget.webserver import User
from flexget.manager import Session
from flexget.api import api_app
log = logging.getLogger('tests')
VCR_CASSETTE_DIR = os.path.join(os.path.dirname(__file__), 'cassettes')
VCR_RECORD_MODE = os.environ.get('VCR_RECORD_MODE', 'once')
vcr = VCR(
cassette_library_dir=VCR_CASSETTE_DIR,
record_mode=VCR_RECORD_MODE,
custom_patches=(
(backport_client, 'HTTPSConnection', VCRHTTPSConnection),
(backport_client, 'HTTPConnection', VCRHTTPConnection),
)
)
# --- These are the public fixtures tests can ask for ---
@pytest.fixture(scope='class')
def config(request):
"""
If used inside a test class, uses the `config` class attribute of the class.
This is used by `manager` fixture, and can be parametrized.
"""
return request.cls.config
@pytest.yield_fixture()
def manager(request, config, caplog, monkeypatch, filecopy): # enforce filecopy is run before manager
"""
Create a :class:`MockManager` for this test based on `config` argument.
"""
if 'tmpdir' in request.fixturenames:
config = config.replace('__tmp__', request.getfuncargvalue('tmpdir').strpath)
try:
mockmanager = MockManager(config, request.cls.__name__)
except Exception:
# Since we haven't entered the test function yet, pytest won't print the logs on failure. Print them manually.
print(caplog.text())
raise
yield mockmanager
mockmanager.shutdown()
@pytest.fixture()
def execute_task(manager):
"""
A function that can be used to execute and return a named task in `config` argument.
"""
def execute(task_name, abort=False, options=None):
"""
Use to execute one test task from config.
:param abort: If `True` expect (and require) this task to abort.
"""
log.info('********** Running task: %s ********** ' % task_name)
config = manager.config['tasks'][task_name]
task = Task(manager, task_name, config=config, options=options)
try:
if abort:
with pytest.raises(TaskAbort):
task.execute()
else:
task.execute()
finally:
try:
task.session.close()
except Exception:
pass
return task
return execute
@pytest.yield_fixture()
def use_vcr(request, monkeypatch):
"""
This fixture is applied automatically to any test using the `online` mark. It will record and playback network
sessions using VCR.
The record mode of VCR can be set using the VCR_RECORD_MODE environment variable when running tests.
"""
if VCR_RECORD_MODE == 'off':
yield None
else:
module = request.module.__name__.split('tests.')[-1]
class_name = request.cls.__name__
cassette_name = '.'.join([module, class_name, request.function.__name__])
cassette_path = os.path.join(VCR_CASSETTE_DIR, cassette_name)
online = True
if vcr.record_mode == 'none':
online = False
elif vcr.record_mode == 'once':
online = not os.path.exists(cassette_path)
# If we are not going online, disable domain limiting during test
if not online:
log.debug('Disabling domain limiters during VCR playback.')
monkeypatch.setattr('flexget.utils.requests.limit_domains', mock.Mock())
with vcr.use_cassette(path=cassette_path) as cassette:
yield cassette
@pytest.fixture()
def api_client(manager):
with Session() as session:
user = session.query(User).first()
if not user:
user = User(name='flexget', password='flexget')
session.add(user)
session.commit()
return APIClient(user.token)
@pytest.fixture()
def schema_match(manager):
"""
This fixture enables verifying JSON Schema. Return a list of validation error dicts. List is empty if no errors
occurred.
"""
def match(schema, response):
validator = jsonschema.Draft4Validator(schema)
errors = list(validator.iter_errors(response))
return [dict(value=list(e.path), message=e.message) for e in errors]
return match
@pytest.fixture()
def link_headers(manager):
"""
Parses link headers and return them in dict form
"""
def headers(response):
links = {}
for link in requests.utils.parse_header_links(response.headers.get('link')):
url = link['url']
page = int(re.search('(?<!per_)page=(\d)', url).group(1))
links[link['rel']] = dict(url=url, page=page)
return links
return headers
# --- End Public Fixtures ---
def pytest_configure(config):
# register the filecopy marker
config.addinivalue_line('markers',
'filecopy(src, dst): mark test to copy a file from `src` to `dst` before running.'
'online: mark a test that goes online. VCR will automatically be used.')
def pytest_runtest_setup(item):
# Add the filcopy fixture to any test marked with filecopy
if item.get_marker('filecopy'):
item.fixturenames.append('filecopy')
# Add the online marker to tests that will go online
if item.get_marker('online'):
item.fixturenames.append('use_vcr')
else:
item.fixturenames.append('no_requests')
@pytest.yield_fixture()
def filecopy(request):
out_files = []
marker = request.node.get_marker('filecopy')
if marker is not None:
copy_list = marker.args[0] if len(marker.args) == 1 else [marker.args]
for sources, dst in copy_list:
if isinstance(sources, str):
sources = [sources]
if 'tmpdir' in request.fixturenames:
dst = dst.replace('__tmp__', request.getfuncargvalue('tmpdir').strpath)
dst = Path(dst)
for f in itertools.chain(*(Path().glob(src) for src in sources)):
dest_path = dst
if dest_path.isdir():
dest_path = dest_path / f.basename()
if not os.path.isdir(os.path.dirname(dest_path)):
os.makedirs(os.path.dirname(dest_path))
if os.path.isdir(f):
shutil.copytree(f, dest_path)
else:
shutil.copy(f, dest_path)
out_files.append(dest_path)
yield
if out_files:
for f in out_files:
try:
if os.path.isdir(f):
shutil.rmtree(f)
else:
f.remove()
except OSError as e:
print("couldn't remove %s: %s" % (f, e))
@pytest.fixture()
def no_requests(monkeypatch):
online_funcs = [
'requests.sessions.Session.request',
'future.backports.http.client.HTTPConnection.request',
]
# Don't monkey patch HTTPSConnection if ssl not installed as it won't exist in backports
try:
import ssl # noqa
from ssl import SSLContext # noqa
online_funcs.append('future.backports.http.client.HTTPSConnection.request')
except ImportError:
pass
if PY2:
online_funcs.extend(['httplib.HTTPConnection.request',
'httplib.HTTPSConnection.request'])
else:
online_funcs.extend(['http.client.HTTPConnection.request',
'http.client.HTTPSConnection.request'])
for func in online_funcs:
monkeypatch.setattr(func, mock.Mock(side_effect=Exception('Online tests should use @pytest.mark.online')))
@pytest.fixture(scope='session', autouse=True)
def setup_once(pytestconfig, request):
# os.chdir(os.path.join(pytestconfig.rootdir.strpath, 'flexget', 'tests'))
flexget.logger.initialize(True)
m = MockManager('tasks: {}', 'init') # This makes sure our template environment is set up before any tests are run
m.shutdown()
logging.getLogger().setLevel(logging.DEBUG)
load_plugins()
@pytest.fixture(autouse=True)
def chdir(pytestconfig, request):
"""
By marking test with chdir flag we will change current working directory
to that module location. Task configuration can then assume this being
location for relative paths
"""
if 'chdir' in request.funcargnames:
os.chdir(os.path.dirname(request.module.__file__))
@pytest.fixture(autouse=True)
def setup_loglevel(pytestconfig, caplog):
# set logging level according to pytest verbosity
level = logging.DEBUG
if pytestconfig.getoption('verbose') == 1:
level = flexget.logger.TRACE
elif pytestconfig.getoption('quiet') == 1:
level = logging.INFO
logging.getLogger().setLevel(level)
caplog.setLevel(level)
class CrashReport(Exception):
pass
class MockManager(Manager):
unit_test = True
def __init__(self, config_text, config_name, db_uri=None):
self.config_text = config_text
self._db_uri = db_uri or 'sqlite:///:memory:'
super(MockManager, self).__init__(['execute'])
self.config_name = config_name
self.database_uri = self._db_uri
log.debug('database_uri: %s' % self.database_uri)
self.initialize()
def find_config(self, *args, **kwargs):
"""
Override configuration loading
"""
self.config_base = os.path.dirname(os.path.abspath(sys.path[0]))
def load_config(self, *args, **kwargs):
"""
Just load our config from the text passed in on init
"""
config = yaml.safe_load(self.config_text) or {}
self.update_config(config)
# no lock files with unit testing
@contextmanager
def acquire_lock(self, **kwargs):
self._has_lock = True
yield
def release_lock(self):
pass
def crash_report(self):
# We don't want to silently swallow crash reports during unit tests
log.error('Crash Report Traceback:', exc_info=True)
raise CrashReport('Crash report created during unit test, check log for traceback.')
class APIClient(object):
def __init__(self, api_key):
self.api_key = api_key
self.client = api_app.test_client()
def _append_header(self, key, value, kwargs):
if 'headers' not in kwargs:
kwargs['headers'] = {}
kwargs['headers'][key] = value
def json_post(self, *args, **kwargs):
self._append_header('Content-Type', 'application/json', kwargs)
if kwargs.get('auth', True):
self._append_header('Authorization', 'Token %s' % self.api_key, kwargs)
return self.client.post(*args, **kwargs)
def json_put(self, *args, **kwargs):
self._append_header('Content-Type', 'application/json', kwargs)
if kwargs.get('auth', True):
self._append_header('Authorization', 'Token %s' % self.api_key, kwargs)
return self.client.put(*args, **kwargs)
def get(self, *args, **kwargs):
if kwargs.get('auth', True):
self._append_header('Authorization', 'Token %s' % self.api_key, kwargs)
return self.client.get(*args, **kwargs)
def delete(self, *args, **kwargs):
if kwargs.get('auth', True):
self._append_header('Authorization', 'Token %s' % self.api_key, kwargs)
return self.client.delete(*args, **kwargs)
def head(self, *args, **kwargs):
if kwargs.get('auth', True):
self._append_header('Authorization', 'Token %s' % self.api_key, kwargs)
return self.client.head(*args, **kwargs)
| mit | 275,408,962,429,524,500 | 31.414248 | 119 | 0.624094 | false |
petewarden/tensorflow | tensorflow/python/keras/layers/preprocessing/text_vectorization_test.py | 1 | 68032 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras text vectorization preprocessing layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python import tf2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import one_device_strategy
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.keras import backend
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.layers import convolutional
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.layers import embeddings
from tensorflow.python.keras.layers.preprocessing import preprocessing_test_utils
from tensorflow.python.keras.layers.preprocessing import text_vectorization
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.ops import gen_string_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_string_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
def _get_end_to_end_test_cases():
test_cases = (
{
"testcase_name":
"test_simple_tokens_int_mode",
# Create an array where 'earth' is the most frequent term, followed by
# 'wind', then 'and', then 'fire'. This ensures that the vocab
# is sorting by frequency.
"vocab_data":
np.array([["fire"], ["earth"], ["earth"], ["earth"], ["earth"],
["wind"], ["wind"], ["wind"], ["and"], ["and"]]),
"input_data":
np.array([["earth"], ["wind"], ["and"], ["fire"], ["fire"],
["and"], ["earth"], ["michigan"]]),
"kwargs": {
"max_tokens": None,
"standardize": None,
"split": None,
"output_mode": text_vectorization.INT
},
"expected_output": [[2], [3], [4], [5], [5], [4], [2], [1]],
},
{
"testcase_name":
"test_simple_tokens_int_mode_hard_cap",
# Create an array where 'earth' is the most frequent term, followed by
# 'wind', then 'and', then 'fire'. This ensures that the vocab
# is sorting by frequency.
"vocab_data":
np.array([["fire"], ["earth"], ["earth"], ["earth"], ["earth"],
["wind"], ["wind"], ["wind"], ["and"], ["and"]]),
"input_data":
np.array([["earth"], ["wind"], ["and"], ["fire"], ["fire"],
["and"], ["earth"], ["michigan"]]),
"kwargs": {
"max_tokens": 6,
"standardize": None,
"split": None,
"output_mode": text_vectorization.INT
},
"expected_output": [[2], [3], [4], [5], [5], [4], [2], [1]],
},
{
"testcase_name":
"test_special_tokens_int_mode",
# Mask tokens in the vocab data should be ingored, and mapped to 0 in
# from the input data.
"vocab_data":
np.array([["fire"], ["earth"], ["earth"], ["earth"], ["earth"],
[""], [""], [""], ["[UNK]"], ["[UNK]"], ["[UNK]"],
["wind"], ["wind"], ["wind"], ["and"], ["and"]]),
"input_data":
np.array([["earth"], [""], ["wind"], ["[UNK]"], ["and"], [""],
["fire"], ["and"], ["[UNK]"], ["michigan"]]),
"kwargs": {
"max_tokens": None,
"standardize": None,
"split": None,
"output_mode": text_vectorization.INT
},
"expected_output": [[2], [0], [3], [1], [4], [0], [5], [4], [1], [1]],
},
{
"testcase_name":
"test_documents_int_mode",
"vocab_data":
np.array([["fire earth earth"], ["earth earth"], ["wind wind"],
["and wind and"]]),
"input_data":
np.array([["earth wind and"], ["fire fire"], ["and earth"],
["michigan"]]),
"kwargs": {
"max_tokens": None,
"standardize": None,
"split": text_vectorization.SPLIT_ON_WHITESPACE,
"output_mode": text_vectorization.INT
},
"expected_output": [[2, 3, 4], [5, 5, 0], [4, 2, 0], [1, 0, 0]],
},
{
"testcase_name":
"test_documents_1d_input_int_mode",
"vocab_data":
np.array([
"fire earth earth", "earth earth", "wind wind", "and wind and"
]),
"input_data":
np.array([["earth wind and"], ["fire fire"], ["and earth"],
["michigan"]]),
"kwargs": {
"max_tokens": None,
"standardize": None,
"split": text_vectorization.SPLIT_ON_WHITESPACE,
"output_mode": text_vectorization.INT
},
"expected_output": [[2, 3, 4], [5, 5, 0], [4, 2, 0], [1, 0, 0]],
},
{
"testcase_name":
"test_simple_tokens_binary_mode",
"vocab_data":
np.array([["fire"], ["earth"], ["earth"], ["earth"], ["earth"],
["wind"], ["wind"], ["wind"], ["and"], ["and"]]),
"input_data":
np.array([["earth"], ["wind"], ["and"], ["fire"], ["fire"],
["and"], ["earth"], ["michigan"]]),
"kwargs": {
"max_tokens": 5,
"standardize": None,
"split": None,
"output_mode": text_vectorization.BINARY
},
"expected_output": [[0, 1, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0],
[0, 0, 0, 0, 1], [0, 0, 0, 0, 1], [0, 0, 0, 1, 0],
[0, 1, 0, 0, 0], [1, 0, 0, 0, 0]],
},
{
"testcase_name":
"test_documents_binary_mode",
"vocab_data":
np.array([["fire earth earth"], ["earth earth"], ["wind wind"],
["and wind and"]]),
"input_data":
np.array([["earth wind"], ["and"], ["fire fire"],
["earth michigan"]]),
"kwargs": {
"max_tokens": 5,
"standardize": None,
"split": text_vectorization.SPLIT_ON_WHITESPACE,
"output_mode": text_vectorization.BINARY
},
"expected_output": [[0, 1, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1],
[1, 1, 0, 0, 0]],
},
{
"testcase_name":
"test_simple_tokens_count_mode",
"vocab_data":
np.array([["fire"], ["earth"], ["earth"], ["earth"], ["earth"],
["wind"], ["wind"], ["wind"], ["and"], ["and"]]),
"input_data":
np.array([["earth"], ["wind"], ["and"], ["fire"], ["fire"],
["and"], ["earth"], ["michigan"]]),
"kwargs": {
"max_tokens": 5,
"standardize": None,
"split": None,
"output_mode": text_vectorization.COUNT
},
"expected_output": [[0, 1, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0],
[0, 0, 0, 0, 1], [0, 0, 0, 0, 1], [0, 0, 0, 1, 0],
[0, 1, 0, 0, 0], [1, 0, 0, 0, 0]],
},
{
"testcase_name":
"test_documents_count_mode",
"vocab_data":
np.array([["fire earth earth"], ["earth earth"], ["wind wind"],
["and wind and"]]),
"input_data":
np.array([["earth wind"], ["and"], ["fire fire"],
["earth michigan"]]),
"kwargs": {
"max_tokens": 5,
"standardize": None,
"split": text_vectorization.SPLIT_ON_WHITESPACE,
"output_mode": text_vectorization.COUNT
},
"expected_output": [[0, 1, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 2],
[1, 1, 0, 0, 0]],
},
{
"testcase_name":
"test_tokens_idf_mode",
"vocab_data":
np.array([["fire"], ["earth"], ["earth"], ["earth"], ["earth"],
["wind"], ["wind"], ["wind"], ["and"], ["and"]]),
"input_data":
np.array([["earth"], ["wind"], ["and"], ["fire"], ["fire"],
["and"], ["earth"], ["michigan"]]),
"kwargs": {
"max_tokens": 5,
"standardize": None,
"split": None,
"output_mode": text_vectorization.TFIDF
},
"expected_output": [[0, 1.098612, 0, 0, 0], [0, 0, 1.252763, 0, 0],
[0, 0, 0, 1.466337, 0], [0, 0, 0, 0, 1.7917595],
[0, 0, 0, 0, 1.7917595], [0, 0, 0, 1.4663371, 0],
[0, 1.098612, 0, 0, 0], [1.402368, 0, 0, 0, 0]],
},
{
"testcase_name":
"test_documents_idf_mode",
"vocab_data":
np.array([["fire earth earth"], ["earth earth"], ["wind wind"],
["and wind and"]]),
"input_data":
np.array([["earth wind"], ["and"], ["fire fire"],
["earth michigan"]]),
"kwargs": {
"max_tokens": 5,
"standardize": None,
"split": text_vectorization.SPLIT_ON_WHITESPACE,
"output_mode": text_vectorization.TFIDF
},
"expected_output": [[0., 0.847298, 0.847298, 0., 0.],
[0., 0., 0., 1.098612, 0.],
[0., 0., 0., 0., 2.197225],
[0.972955, 0.847298, 0., 0., 0.]],
},
)
crossed_test_cases = []
# Cross above test cases with use_dataset in (True, False)
for use_dataset in (True, False):
for case in test_cases:
case = case.copy()
if use_dataset:
case["testcase_name"] = case["testcase_name"] + "_with_dataset"
case["use_dataset"] = use_dataset
crossed_test_cases.append(case)
return crossed_test_cases
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationLayerTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest
):
@parameterized.named_parameters(*_get_end_to_end_test_cases())
def test_layer_end_to_end_with_adapt(self, vocab_data, input_data, kwargs,
use_dataset, expected_output):
cls = text_vectorization.TextVectorization
if kwargs.get("output_mode") == text_vectorization.INT:
expected_output_dtype = dtypes.int64
else:
expected_output_dtype = dtypes.float32
input_shape = input_data.shape
if use_dataset:
# Keras APIs expect batched datasets.
# TODO(rachelim): `model.predict` predicts the result on each
# dataset batch separately, then tries to concatenate the results
# together. When the results have different shapes on the non-concat
# axis (which can happen in the output_mode = INT case for
# TextVectorization), the concatenation fails. In real use cases, this may
# not be an issue because users are likely to pipe the preprocessing layer
# into other keras layers instead of predicting it directly. A workaround
# for these unit tests is to have the dataset only contain one batch, so
# no concatenation needs to happen with the result. For consistency with
# numpy input, we should make `predict` join differently shaped results
# together sensibly, with 0 padding.
input_data = dataset_ops.Dataset.from_tensor_slices(input_data).batch(
input_shape[0])
vocab_data = dataset_ops.Dataset.from_tensor_slices(vocab_data).batch(
input_shape[0])
output_data = testing_utils.layer_test(
cls,
kwargs=kwargs,
input_shape=input_shape,
input_data=input_data,
input_dtype=dtypes.string,
expected_output_dtype=expected_output_dtype,
validate_training=False,
adapt_data=vocab_data)
self.assertAllClose(expected_output, output_data)
def test_list_inputs_1d(self):
vocab_data = ["two two two", "two three three", "three four four five"]
input_data = ["two three", "four five"]
layer = text_vectorization.TextVectorization()
layer.adapt(vocab_data)
out = layer(input_data)
if context.executing_eagerly():
self.assertAllClose(out.numpy(), [[2, 3], [4, 5]])
layer.set_vocabulary(["two", "three", "four", "five"])
out = layer(input_data)
if context.executing_eagerly():
self.assertAllClose(out.numpy(), [[2, 3], [4, 5]])
def test_tensor_inputs(self):
vocab_data = constant_op.constant(
["two two two", "two three three", "three four four five"])
input_data = constant_op.constant(["two three", "four five"])
layer = text_vectorization.TextVectorization()
layer.adapt(vocab_data)
out = layer(input_data)
if context.executing_eagerly():
self.assertAllClose(out.numpy(), [[2, 3], [4, 5]])
layer.set_vocabulary(["two", "three", "four", "five"])
out = layer(input_data)
if context.executing_eagerly():
self.assertAllClose(out.numpy(), [[2, 3], [4, 5]])
def test_list_inputs_2d(self):
vocab_data = [
["two two two"], ["two three three"], ["three four four five"]]
input_data = [["two three"], ["four five"]]
layer = text_vectorization.TextVectorization()
layer.adapt(vocab_data)
out = layer(input_data)
if context.executing_eagerly():
self.assertAllClose(out.numpy(), [[2, 3], [4, 5]])
layer.set_vocabulary(["two", "three", "four", "five"])
out = layer(input_data)
if context.executing_eagerly():
self.assertAllClose(out.numpy(), [[2, 3], [4, 5]])
def test_dataset_of_single_strings(self):
vocab_data = ["two two two", "two three three", "three four four five"]
input_data = ["two three", "four five"]
vocab_ds = dataset_ops.Dataset.from_tensor_slices(vocab_data) # unbatched
layer = text_vectorization.TextVectorization()
layer.adapt(vocab_ds)
out = layer(input_data)
if context.executing_eagerly():
self.assertAllClose(out.numpy(), [[2, 3], [4, 5]])
@parameterized.named_parameters(
{
"testcase_name": "1d",
"data": ["0", "a", "b", "c", "d", "e", "a", "b", "c", "d", "f"],
"expected": [1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1]
},
{
"testcase_name": "2d",
"data": [["0", "a", "b", "c", "d"], ["e", "a", "b", "c", "d"], ["f"]],
"expected": [[1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 0, 0, 0, 0]]
},
{
"testcase_name":
"3d",
"data": [[["0", "a", "b"], ["c", "d"]], [["e", "a"], ["b", "c", "d"]],
[["f"]]],
"expected": [[[1, 2, 3], [4, 5, 0]], [[1, 2, 0], [3, 4, 5]],
[[1, 0, 0], [0, 0, 0]]]
},
)
def test_layer_dimensionality_handling(self, data, expected):
vocab = ["a", "b", "c", "d"]
vectorization = text_vectorization.TextVectorization(
max_tokens=None, standardize=None, split=None, pad_to_max_tokens=False)
vectorization.set_vocabulary(vocab)
output = vectorization(ragged_factory_ops.constant(data))
self.assertAllEqual(expected, output)
@parameterized.named_parameters(
{
"testcase_name": "1d",
"data": ["0 a b c d e a b c d f"],
"expected": [[1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1]]
},
{
"testcase_name":
"3d",
"data": [[["0 a b"], ["c d"]], [["e a"], ["b c d"]], [["f"]]],
"expected": [[[1, 2, 3], [4, 5, 0]], [[1, 2, 0], [3, 4, 5]],
[[1, 0, 0], [0, 0, 0]]]
},
)
def test_layer_dimensionality_handling_with_split(self, data, expected):
vocab = ["a", "b", "c", "d"]
vectorization = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.SPLIT_ON_WHITESPACE,
pad_to_max_tokens=False)
vectorization.set_vocabulary(vocab)
output = vectorization(ragged_factory_ops.constant(data, inner_shape=(1,)))
self.assertAllEqual(expected, output)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationPreprocessingTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def _write_to_temp_file(self, file_name, vocab_list):
vocab_path = os.path.join(self.get_temp_dir(), file_name + ".txt")
with gfile.GFile(vocab_path, "w") as writer:
for vocab in vocab_list:
writer.write(vocab + "\n")
writer.flush()
writer.close()
return vocab_path
def test_summary_before_adapt(self):
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=10,
standardize=text_vectorization.LOWER_AND_STRIP_PUNCTUATION,
split=None,
ngrams=None,
output_mode=text_vectorization.TFIDF)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
# We are testing that model.summary() can be called without erroring out.
# (b/145726907)
model.summary()
def test_normalization(self):
input_array = np.array([["Earth", "wInD", "aNd", "firE"],
["fire|", "an<>d", "{earth}", "michigan@%$"]])
expected_output = np.array([[b"earth", b"wind", b"and", b"fire"],
[b"fire", b"and", b"earth", b"michigan"]])
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=text_vectorization.LOWER_AND_STRIP_PUNCTUATION,
split=None,
ngrams=None,
output_mode=None)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_normalization_ragged_inputs(self):
input_array = ragged_factory_ops.constant([["Earth", "wInD", "aNd", "firE"],
["fire|", "an<>d", "{earth}"]])
expected_output = [[b"earth", b"wind", b"and", b"fire"],
[b"fire", b"and", b"earth"]]
input_data = keras.Input(shape=(None,), ragged=True, dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=text_vectorization.LOWER_AND_STRIP_PUNCTUATION,
split=None,
ngrams=None,
output_mode=None)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_custom_normalization(self):
input_array = np.array([["Earth", "wInD", "aNd", "firE"],
["fire|", "an<>d", "{earth}", "michigan@%$"]])
expected_output = np.array(
[[b"earth", b"wind", b"and", b"fire"],
[b"fire|", b"an<>d", b"{earth}", b"michigan@%$"]])
custom_standardization = gen_string_ops.string_lower
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=custom_standardization,
split=None,
ngrams=None,
output_mode=None)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_string_splitting(self):
input_array = np.array([["earth wind and fire"],
["\tfire\tand\nearth michigan "]])
expected_output = [[b"earth", b"wind", b"and", b"fire"],
[b"fire", b"and", b"earth", b"michigan"]]
input_data = keras.Input(shape=(1,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.SPLIT_ON_WHITESPACE,
ngrams=None,
output_mode=None)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_custom_string_splitting(self):
input_array = np.array([["earth>wind>and fire"],
["\tfire>and\nearth>michigan"]])
expected_output = [[b"earth", b"wind", b"and fire"],
[b"\tfire", b"and\nearth", b"michigan"]]
custom_split = lambda x: ragged_string_ops.string_split_v2(x, sep=">")
input_data = keras.Input(shape=(1,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=custom_split,
ngrams=None,
output_mode=None)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_single_ngram_value_ragged_inputs(self):
input_array = ragged_factory_ops.constant([["earth", "wind", "and", "fire"],
["fire", "and", "earth"]])
# pyformat: disable
expected_output = [[b"earth", b"wind", b"and", b"fire",
b"earth wind", b"wind and", b"and fire",
b"earth wind and", b"wind and fire"],
[b"fire", b"and", b"earth",
b"fire and", b"and earth",
b"fire and earth"]]
# pyformat: enable
input_data = keras.Input(shape=(None,), ragged=True, dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
ngrams=3,
output_mode=None)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_single_ngram_value(self):
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
# pyformat: disable
expected_output = [[b"earth", b"wind", b"and", b"fire",
b"earth wind", b"wind and", b"and fire",
b"earth wind and", b"wind and fire"],
[b"fire", b"and", b"earth", b"michigan",
b"fire and", b"and earth", b"earth michigan",
b"fire and earth", b"and earth michigan"]]
# pyformat: enable
input_data = keras.Input(shape=(4,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
ngrams=3,
output_mode=None)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_multiple_ngram_values(self):
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
# pyformat: disable
expected_output = [[b"earth wind", b"wind and", b"and fire",
b"earth wind and", b"wind and fire"],
[b"fire and", b"and earth", b"earth michigan",
b"fire and earth", b"and earth michigan"]]
# pyformat: enable
input_data = keras.Input(shape=(4,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
ngrams=(2, 3),
output_mode=None)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_string_multiple_preprocessing_steps(self):
input_array = np.array([["earth wInD and firE"],
["\tfire\tand\nearth!! michig@n "]])
expected_output = [[
b"earth",
b"wind",
b"and",
b"fire",
b"earth wind",
b"wind and",
b"and fire",
],
[
b"fire",
b"and",
b"earth",
b"michign",
b"fire and",
b"and earth",
b"earth michign",
]]
input_data = keras.Input(shape=(1,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=text_vectorization.LOWER_AND_STRIP_PUNCTUATION,
split=text_vectorization.SPLIT_ON_WHITESPACE,
ngrams=2,
output_mode=None)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_string_splitting_with_non_1d_array_fails(self):
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.SPLIT_ON_WHITESPACE,
output_mode=None)
with self.assertRaisesRegex(RuntimeError,
".*tokenize strings, the innermost dime.*"):
_ = layer(input_data)
def test_string_splitting_with_non_1d_raggedarray_fails(self):
input_data = keras.Input(shape=(None,), ragged=True, dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
vocabulary=["a"],
max_tokens=None,
standardize=None,
split=text_vectorization.SPLIT_ON_WHITESPACE,
output_mode=None)
with self.assertRaisesRegex(RuntimeError,
".*tokenize strings, the innermost dime.*"):
_ = layer(input_data)
def test_standardization_with_invalid_standardize_arg(self):
input_data = keras.Input(shape=(1,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(vocabulary=["a"])
layer._standardize = "unsupported"
with self.assertRaisesRegex(ValueError,
".*is not a supported standardization.*"):
_ = layer(input_data)
def test_splitting_with_invalid_split_arg(self):
input_data = keras.Input(shape=(1,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(vocabulary=["a"])
layer._split = "unsupported"
with self.assertRaisesRegex(ValueError, ".*is not a supported splitting.*"):
_ = layer(input_data)
def test_vocab_setting_via_init(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT,
vocabulary=vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_vocab_setting_via_init_file(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
vocab_path = self._write_to_temp_file("vocab_file", vocab_data)
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT,
vocabulary=vocab_path)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_vocab_setting_via_setter(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
vocab_path = self._write_to_temp_file("vocab_file", vocab_data)
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT)
layer.set_vocabulary(vocab_path)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_vocab_setting_with_oov_via_setter(self):
vocab_data = ["", "[UNK]", "earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
vocab_path = self._write_to_temp_file("vocab_file", vocab_data)
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT)
layer.set_vocabulary(vocab_path)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationDistributionTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_distribution_strategy_output(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
strategy = one_device_strategy.OneDeviceStrategy("/cpu:0")
with strategy.scope():
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationOutputTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_int_output(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_densifies_with_zeros(self):
vocab_data = ["earth", "wind", "and", "fire"]
# Create an input array that has 5 elements in the first example and 4 in
# the second. This should output a 2x5 tensor with a padding value in the
# second example.
input_array = np.array([["earth wind and also fire"],
["fire and earth michigan"]])
expected_output = [[2, 3, 4, 1, 5], [5, 4, 2, 1, 0]]
# This test doesn't explicitly set an output shape, so the 2nd dimension
# should stay 'None'.
expected_output_shape = [None, None]
# The input shape here is explicitly 1 because we're tokenizing.
input_data = keras.Input(shape=(1,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.SPLIT_ON_WHITESPACE,
output_mode=text_vectorization.INT)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_densifies_with_zeros_and_pads(self):
vocab_data = ["earth", "wind", "and", "fire"]
# Create an input array that has 5 elements in the first example and 4 in
# the second. This should output a 2x6 tensor with a padding value in the
# second example, since output_sequence_length is set to 6.
input_array = np.array([["earth wind and also fire"],
["fire and earth michigan"]])
expected_output = [[2, 3, 4, 1, 5, 0], [5, 4, 2, 1, 0, 0]]
output_sequence_length = 6
expected_output_shape = [None, output_sequence_length]
# The input shape here is explicitly 1 because we're tokenizing.
input_data = keras.Input(shape=(1,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.SPLIT_ON_WHITESPACE,
output_mode=text_vectorization.INT,
output_sequence_length=output_sequence_length)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_densifies_with_zeros_and_strips(self):
vocab_data = ["earth", "wind", "and", "fire"]
# Create an input array that has 5 elements in the first example and 4 in
# the second. This should output a 2x3 tensor with a padding value in the
# second example, since output_sequence_length is set to 3.
input_array = np.array([["earth wind and also fire"],
["fire and earth michigan"]])
expected_output = [[2, 3, 4], [5, 4, 2]]
output_sequence_length = 3
expected_output_shape = [None, output_sequence_length]
# The input shape here is explicitly 1 because we're tokenizing.
input_data = keras.Input(shape=(1,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.SPLIT_ON_WHITESPACE,
output_mode=text_vectorization.INT,
output_sequence_length=output_sequence_length)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_dynamically_strips_and_pads(self):
vocab_data = ["earth", "wind", "and", "fire"]
# Create an input array that has 5 elements in the first example and 4 in
# the second. This should output a 2x3 tensor with a padding value in the
# second example, since output_sequence_length is set to 3.
input_array = np.array([["earth wind and also fire"],
["fire and earth michigan"]])
expected_output = [[2, 3, 4], [5, 4, 2]]
output_sequence_length = 3
expected_output_shape = [None, output_sequence_length]
# The input shape here is explicitly 1 because we're tokenizing.
input_data = keras.Input(shape=(1,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.SPLIT_ON_WHITESPACE,
output_mode=text_vectorization.INT,
output_sequence_length=output_sequence_length)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
# Create an input array that has 1 element in the first example and 2 in
# the second. This should output a 2x3 tensor with a padding value in the
# second example, since output_sequence_length is set to 3.
input_array_2 = np.array([["wind"], ["fire and"]])
expected_output_2 = [[3, 0, 0], [5, 4, 0]]
output_dataset = model.predict(input_array_2)
self.assertAllEqual(expected_output_2, output_dataset)
def test_binary_output_hard_maximum(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0, 0],
[1, 1, 0, 1, 0, 0]]
# pyformat: enable
max_tokens = 6
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=max_tokens,
standardize=None,
split=None,
output_mode=text_vectorization.BINARY,
pad_to_max_tokens=True)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_binary_output_soft_maximum(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0],
[1, 1, 0, 1, 0]]
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=10,
standardize=None,
split=None,
output_mode=text_vectorization.BINARY,
pad_to_max_tokens=False)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_bag_output_hard_maximum_set_vocabulary_after_build(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0],
[1, 1, 0, 1, 0]]
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=max_tokens,
standardize=None,
split=None,
output_mode=text_vectorization.BINARY,
pad_to_max_tokens=True)
int_data = layer(input_data)
layer.set_vocabulary(vocab_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_bag_output_hard_maximum_adapt_after_build(self):
vocab_data = np.array([
"earth", "earth", "earth", "earth", "wind", "wind", "wind", "and",
"and", "fire"
])
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0],
[1, 1, 0, 1, 0]]
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=max_tokens,
standardize=None,
split=None,
output_mode=text_vectorization.BINARY,
pad_to_max_tokens=True)
int_data = layer(input_data)
layer.adapt(vocab_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_bag_output_hard_maximum_set_state_variables_after_build(self):
state_variables = {
text_vectorization._VOCAB_NAME: ["earth", "wind", "and", "fire"]
}
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0],
[1, 1, 0, 1, 0]]
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=max_tokens,
standardize=None,
split=None,
output_mode=text_vectorization.BINARY,
pad_to_max_tokens=True)
int_data = layer(input_data)
layer._set_state_variables(state_variables)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_bag_output_hard_maximum_multiple_adapts(self):
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
adapt_data = ["earth", "earth", "earth", "earth", "wind", "wind", "wind"]
first_expected_output = [
[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0],
]
second_adapt_data = [
"earth", "earth", "earth", "earth", "wind", "wind", "wind", "and",
"and", "fire"
]
second_expected_output = [
[0, 1, 1, 1, 0],
[1, 1, 0, 1, 0],
]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=5,
standardize=None,
split=None,
output_mode=text_vectorization.BINARY,
pad_to_max_tokens=True)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
# Test the first adapt
layer.adapt(adapt_data)
first_output = model.predict(input_array)
# Test the second adapt
layer.adapt(second_adapt_data)
second_output = model.predict(input_array)
self.assertAllEqual(first_expected_output, first_output)
self.assertAllEqual(second_expected_output, second_output)
def test_bag_output_soft_maximum_set_state_after_build(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0],
[1, 1, 0, 1, 0]]
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=10,
standardize=None,
split=None,
output_mode=text_vectorization.BINARY,
pad_to_max_tokens=False)
layer.build(input_data.shape)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_bag_output_soft_maximum_set_vocabulary_after_call_fails(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.BINARY,
pad_to_max_tokens=False)
layer.adapt(vocab_data)
_ = layer(input_data)
with self.assertRaisesRegex(RuntimeError, "vocabulary cannot be changed"):
layer.set_vocabulary(vocab_data)
def test_bag_output_soft_maximum_set_state_variables_after_call_fails(self):
state_variables = {
text_vectorization._VOCAB_NAME: ["earth", "wind", "and", "fire"]
}
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.BINARY,
pad_to_max_tokens=False)
layer.adapt(["earth", "wind"])
_ = layer(input_data)
with self.assertRaisesRegex(RuntimeError, "vocabulary cannot be changed"):
layer._set_state_variables(state_variables)
def test_count_output_hard_maximum(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
# pyformat: disable
expected_output = [[0, 2, 1, 1, 0, 0],
[2, 1, 0, 1, 0, 0]]
# pyformat: enable
max_tokens = 6
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=6,
standardize=None,
split=None,
output_mode=text_vectorization.COUNT,
pad_to_max_tokens=True)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_count_output_soft_maximum(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
# pyformat: disable
expected_output = [[0, 2, 1, 1, 0],
[2, 1, 0, 1, 0]]
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=10,
standardize=None,
split=None,
output_mode=text_vectorization.COUNT,
pad_to_max_tokens=False)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_tfidf_output_hard_maximum(self):
vocab_data = ["earth", "wind", "and", "fire"]
# OOV idf weight (bucket 0) should 0.5, the average of passed weights.
idf_weights = [.4, .25, .75, .6]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "fire", "earth", "michigan"]])
# pyformat: disable
# pylint: disable=bad-whitespace
expected_output = [[ 0, .8, .25, .75, 0, 0],
[ 1, .4, 0, 0, .6, 0]]
# pylint: enable=bad-whitespace
# pyformat: enable
max_tokens = 6
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=6,
standardize=None,
split=None,
output_mode=text_vectorization.TFIDF,
pad_to_max_tokens=True)
layer.set_vocabulary(vocab_data, idf_weights=idf_weights)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllClose(expected_output, output_dataset)
def test_tfidf_output_soft_maximum(self):
vocab_data = ["earth", "wind", "and", "fire"]
# OOV idf weight (bucket 0) should 0.5, the average of passed weights.
idf_weights = [.4, .25, .75, .6]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "fire", "earth", "michigan"]])
# pyformat: disable
# pylint: disable=bad-whitespace
expected_output = [[ 0, .8, .25, .75, 0],
[ 1, .4, 0, 0, .6]]
# pylint: enable=bad-whitespace
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=10,
standardize=None,
split=None,
output_mode=text_vectorization.TFIDF,
pad_to_max_tokens=False)
layer.set_vocabulary(vocab_data, idf_weights=idf_weights)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllClose(expected_output, output_dataset)
def test_tfidf_output_set_oov_weight(self):
vocab_data = ["[UNK]", "earth", "wind", "and", "fire"]
idf_weights = [.1, .4, .25, .75, .6]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "fire", "earth", "michigan"]])
# pyformat: disable
# pylint: disable=bad-whitespace
expected_output = [[ 0, .8, .25, .75, 0],
[ .2, .4, 0, 0, .6]]
# pylint: enable=bad-whitespace
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=10,
standardize=None,
split=None,
output_mode=text_vectorization.TFIDF,
pad_to_max_tokens=False)
layer.set_vocabulary(vocab_data, idf_weights=idf_weights)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllClose(expected_output, output_dataset)
def test_accept_1D_input(self):
input_array = np.array(["earth wind and fire",
"fire and earth michigan"])
layer = text_vectorization.TextVectorization(
standardize=None, split=None, output_mode="int")
layer.adapt(input_array)
_ = layer(input_array)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationModelBuildingTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
@parameterized.named_parameters(
{
"testcase_name": "count_hard_max",
"pad_to_max_tokens": True,
"output_mode": text_vectorization.COUNT
}, {
"testcase_name": "count_soft_max",
"pad_to_max_tokens": False,
"output_mode": text_vectorization.COUNT
}, {
"testcase_name": "binary_hard_max",
"pad_to_max_tokens": True,
"output_mode": text_vectorization.BINARY
}, {
"testcase_name": "binary_soft_max",
"pad_to_max_tokens": False,
"output_mode": text_vectorization.BINARY
}, {
"testcase_name": "tfidf_hard_max",
"pad_to_max_tokens": True,
"output_mode": text_vectorization.TFIDF
}, {
"testcase_name": "tfidf_soft_max",
"pad_to_max_tokens": False,
"output_mode": text_vectorization.TFIDF
})
def test_end_to_end_bagged_modeling(self, output_mode, pad_to_max_tokens):
vocab_data = ["earth", "wind", "and", "fire"]
idf_weights = [.5, .25, .2, .125]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=10,
standardize=None,
split=None,
output_mode=output_mode,
pad_to_max_tokens=pad_to_max_tokens)
if output_mode == text_vectorization.TFIDF:
layer.set_vocabulary(vocab_data, idf_weights=idf_weights)
else:
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
float_data = backend.cast(int_data, dtype="float32")
output_data = core.Dense(64)(float_data)
model = keras.Model(inputs=input_data, outputs=output_data)
_ = model.predict(input_array)
def test_end_to_end_vocab_modeling(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth wind and also fire"],
["fire and earth michigan"]])
output_sequence_length = 6
max_tokens = 5
# The input shape here is explicitly 1 because we're tokenizing.
input_data = keras.Input(shape=(1,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.SPLIT_ON_WHITESPACE,
output_mode=text_vectorization.INT,
output_sequence_length=output_sequence_length)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
embedded_data = embeddings.Embedding(
input_dim=max_tokens + 1, output_dim=32)(
int_data)
output_data = convolutional.Conv1D(
250, 3, padding="valid", activation="relu", strides=1)(
embedded_data)
model = keras.Model(inputs=input_data, outputs=output_data)
_ = model.predict(input_array)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationErrorTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest
):
def test_too_long_vocab_fails_in_single_setting(self):
vocab_data = ["earth", "wind", "and", "fire"]
layer = text_vectorization.TextVectorization(
max_tokens=4,
standardize=None,
split=None,
output_mode=text_vectorization.INT)
with self.assertRaisesRegex(ValueError,
"vocabulary larger than the maximum vocab.*"):
layer.set_vocabulary(vocab_data)
def test_setting_vocab_without_idf_weights_fails_in_tfidf_mode(self):
vocab_data = ["earth", "wind", "and", "fire"]
layer = text_vectorization.TextVectorization(
max_tokens=5,
standardize=None,
split=None,
output_mode=text_vectorization.TFIDF)
with self.assertRaisesRegex(
ValueError, "`idf_weights` must be set if output_mode is TFIDF"):
layer.set_vocabulary(vocab_data)
def test_idf_weights_length_mismatch_fails(self):
vocab_data = ["earth", "wind", "and", "fire"]
idf_weights = [1, 2, 3]
layer = text_vectorization.TextVectorization(
max_tokens=5,
standardize=None,
split=None,
output_mode=text_vectorization.TFIDF)
with self.assertRaisesRegex(
ValueError, "`idf_weights` must be the same length as vocab"):
layer.set_vocabulary(vocab_data, idf_weights)
def test_set_tfidf_in_non_tfidf_fails(self):
vocab_data = ["earth", "wind", "and", "fire"]
idf_weights = [1, 2, 3, 4]
layer = text_vectorization.TextVectorization(
max_tokens=5,
standardize=None,
split=None,
output_mode=text_vectorization.BINARY)
with self.assertRaisesRegex(ValueError,
"`idf_weights` should only be set if"):
layer.set_vocabulary(vocab_data, idf_weights)
def test_zero_max_tokens_fails(self):
with self.assertRaisesRegex(ValueError, "max_tokens.*"):
_ = text_vectorization.TextVectorization(max_tokens=0)
def test_non_string_dtype_fails(self):
with self.assertRaisesRegex(ValueError, "dtype of string.*"):
_ = text_vectorization.TextVectorization(dtype=dtypes.int64)
def test_unknown_standardize_arg_fails(self):
with self.assertRaisesRegex(ValueError,
"standardize arg.*unsupported_value"):
_ = text_vectorization.TextVectorization(standardize="unsupported_value")
def test_unknown_split_arg_fails(self):
with self.assertRaisesRegex(ValueError, "split arg.*unsupported_value"):
_ = text_vectorization.TextVectorization(split="unsupported_value")
def test_unknown_output_mode_arg_fails(self):
with self.assertRaisesRegex(ValueError,
"output_mode arg.*unsupported_value"):
_ = text_vectorization.TextVectorization(output_mode="unsupported_value")
def test_unknown_ngrams_arg_fails(self):
with self.assertRaisesRegex(ValueError, "ngrams.*unsupported_value"):
_ = text_vectorization.TextVectorization(ngrams="unsupported_value")
def test_float_ngrams_arg_fails(self):
with self.assertRaisesRegex(ValueError, "ngrams.*2.9"):
_ = text_vectorization.TextVectorization(ngrams=2.9)
def test_float_tuple_ngrams_arg_fails(self):
with self.assertRaisesRegex(ValueError, "ngrams.*(1.3, 2.9)"):
_ = text_vectorization.TextVectorization(ngrams=(1.3, 2.9))
def test_non_int_output_sequence_length_dtype_fails(self):
with self.assertRaisesRegex(ValueError, "output_sequence_length.*2.0"):
_ = text_vectorization.TextVectorization(
output_mode="int", output_sequence_length=2.0)
def test_non_none_output_sequence_length_fails_if_output_type_not_int(self):
with self.assertRaisesRegex(ValueError,
"`output_sequence_length` must not be set"):
_ = text_vectorization.TextVectorization(
output_mode="count", output_sequence_length=2)
# Custom functions for the custom callable serialization test. Declared here
# to avoid multiple registrations from run_all_keras_modes().
@generic_utils.register_keras_serializable(package="Test")
def custom_standardize_fn(x):
return gen_string_ops.string_lower(x)
@generic_utils.register_keras_serializable(package="Test")
def custom_split_fn(x):
return ragged_string_ops.string_split_v2(x, sep=">")
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationSavingTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def tearDown(self):
keras.backend.clear_session()
gc.collect()
super(TextVectorizationSavingTest, self).tearDown()
def test_saving(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
# Build and validate a golden model.
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
model.save(output_path, save_format="tf")
# Delete the session and graph to ensure that the loaded model is generated
# from scratch.
# TODO(b/149526183): Can't clear session when TF2 is disabled.
if tf2.enabled():
keras.backend.clear_session()
loaded_model = keras.models.load_model(output_path)
self.assertAllEqual(loaded_model.predict(input_array), expected_output)
def test_saving_when_nested(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
# Build and validate a golden model.
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
outer_input = keras.Input(shape=(None,), dtype=dtypes.string)
outer_output = model(outer_input)
outer_model = keras.Model(inputs=outer_input, outputs=outer_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
outer_model.save(output_path, save_format="tf")
# Delete the session and graph to ensure that the loaded model is generated
# from scratch.
# TODO(b/149526183): Can't clear session when TF2 is disabled.
if tf2.enabled():
keras.backend.clear_session()
loaded_model = keras.models.load_model(output_path)
self.assertAllEqual(loaded_model.predict(input_array), expected_output)
def test_saving_with_tfidf(self):
vocab_data = ["earth", "wind", "and", "fire"]
# OOV idf weight (bucket 0) should 0.5, the average of passed weights.
idf_weights = [.4, .25, .75, .6]
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "fire", "earth", "michigan"]])
# pyformat: disable
# pylint: disable=bad-whitespace
expected_output = [[ 0, .8, .25, .75, 0],
[ 1, .4, 0, 0, .6]]
vocab_data = ["earth", "wind", "and", "fire"]
# pylint: enable=bad-whitespace
# pyformat: enable
# Build and validate a golden model.
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=5,
standardize=None,
split=None,
output_mode=text_vectorization.TFIDF)
layer.set_vocabulary(vocab_data, idf_weights=idf_weights)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllClose(output_dataset, expected_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
model.save(output_path, save_format="tf")
loaded_model = keras.models.load_model(output_path)
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_dataset = loaded_model.predict(input_array)
self.assertAllClose(new_output_dataset, expected_output)
def test_serialization_with_custom_callables(self):
input_array = np.array([["earth>wind>and Fire"],
["\tfire>And\nearth>michigan"]])
expected_output = [[b"earth", b"wind", b"and fire"],
[b"\tfire", b"and\nearth", b"michigan"]]
input_data = keras.Input(shape=(1,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=custom_standardize_fn,
split=custom_split_fn,
ngrams=None,
output_mode=None)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
serialized_model_data = model.get_config()
new_model = keras.Model.from_config(serialized_model_data)
new_output_dataset = new_model.predict(input_array)
self.assertAllEqual(expected_output, new_output_dataset)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationE2ETest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_keras_vocab_trimming_example(self):
vocab_data = np.array([
"earth", "earth", "earth", "earth", "wind", "wind", "wind", "and",
"and", "fire"
])
input_array = np.array([["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"]])
# pyformat: disable
expected_output = [[1, 2, 1],
[3, 1, 0]]
# pyformat: enable
max_tokens = 3
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=max_tokens,
standardize=None,
split=None,
output_mode=text_vectorization.COUNT,
pad_to_max_tokens=True)
int_data = layer(input_data)
layer.adapt(vocab_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(input_data, int_data)
output = model.predict(input_array)
self.assertAllEqual(expected_output, output)
if __name__ == "__main__":
test.main()
| apache-2.0 | 1,295,234,925,009,800,400 | 39.08957 | 81 | 0.594367 | false |
Sapphirine/Predicting-The-United-States-Presidential-Election-Results-Using-TwitterSentiment | src/calculate_state_prob.py | 1 | 1641 | import pandas as pd
import csv
#Open file to save Trump tweets sentiment used to estimate probability
csvfile=open("/home/ubuntu/project/output_data/trump_pos_sentiment.csv", "w")
csvwriter=csv.writer(csvfile, delimiter=",")
#Assign header row
csvwriter.writerow(["Index"]+["State"]+["Sentiment"])
#Initialize counter for tweets
index=0
#Open Trump results and load in file
with open("/home/ubuntu/project/output_data/trump_tweets_results.csv","r") as infile:
for line in infile:
csvwriter.writerow([index]+[str(line).split(",")[0].strip()]+[str(line).split(",")[1].strip()])
index+=1
#Open Clinton results, flip sentiment and load in file
with open("/home/ubuntu/project/output_data/clinton_tweets_results.csv","r") as infile:
for line in infile:
if str(line).split(",")[1].rstrip()=="1.0":
csvwriter.writerow([index]+[str(line).split(",")[0].strip()]+[0.0])
index+=1
else:
csvwriter.writerow([index]+[str(line).split(",")[0].strip()]+[1.0])
index+=1
#Close csv file
csvfile.close()
#Load data into data frame
data=pd.DataFrame.from_csv("/home/ubuntu/project/output_data/trump_pos_sentiment.csv")
#print data
#Group sentiment by state
grouped_data=data.groupby("State")["Sentiment"].mean()
#aggregations = {
# "Sentiment":'mean'
#}
#grouped_data=data.groupby("State").agg(aggregations)
#grouped_data=data.groupby(["State", "Sentiment"]).mean()
print grouped_data
#Load into data frame
prob = pd.DataFrame(grouped_data)
#load into csv file
prob.to_csv("/home/ubuntu/project/output_data/trump_win_prob.csv", sep=",", encoding="utf-8")
| apache-2.0 | -6,014,421,773,318,569,000 | 33.1875 | 103 | 0.678245 | false |
jaantollander/CrowdDynamics | crowddynamics/core/motion/tests/test_motion.py | 1 | 2448 | import numpy as np
from hypothesis import given
from crowddynamics.core.motion.adjusting import force_adjust, torque_adjust
from crowddynamics.core.motion.contact import force_contact
from crowddynamics.core.motion.fluctuation import force_fluctuation, \
torque_fluctuation
from crowddynamics.core.motion.helbing import \
force_social_helbing
from crowddynamics.testing import reals
SIZE = 10
@given(mass=reals(min_value=0, shape=SIZE),
scale=reals(min_value=0, shape=SIZE))
def test_force_fluctuation(mass, scale):
ans = force_fluctuation(mass, scale)
assert isinstance(ans, np.ndarray)
assert ans.dtype.type is np.float64
assert ans.shape == (SIZE, 2)
@given(mass=reals(min_value=0),
tau_adj=reals(min_value=0, exclude_zero='near'),
v0=reals(min_value=0),
e0=reals(shape=2),
v=reals(shape=2))
def test_force_adjust(mass, tau_adj, v0, e0, v):
ans = force_adjust(mass, tau_adj, v0, e0, v)
assert isinstance(ans, np.ndarray)
assert ans.dtype.type is np.float64
assert ans.shape == (2,)
@given(h=reals(),
n=reals(shape=2),
a=reals(min_value=0),
b=reals(min_value=0, exclude_zero='near'))
def test_force_social_helbing(h, n, a, b):
ans = force_social_helbing(h, n, a, b)
assert isinstance(ans, np.ndarray)
assert ans.dtype.type is np.float64
assert ans.shape == (2,)
@given(h=reals(),
n=reals(shape=2),
v=reals(shape=2),
t=reals(shape=2),
mu=reals(min_value=0),
kappa=reals(min_value=0),
damping=reals(min_value=0))
def test_force_contact(h, n, v, t, mu, kappa, damping):
ans = force_contact(h, n, v, t, mu, kappa, damping)
assert isinstance(ans, np.ndarray)
assert ans.dtype.type is np.float64
assert ans.shape == (2,)
@given(inertia_rot=reals(0, shape=SIZE), scale=reals(0, shape=SIZE))
def test_torque_fluctuation(inertia_rot, scale):
ans = torque_fluctuation(inertia_rot, scale)
assert isinstance(ans, np.ndarray)
assert ans.dtype.type is np.float64
assert ans.shape == (SIZE,)
@given(inertia_rot=reals(0),
tau_rot=reals(0, exclude_zero='near'),
phi_0=reals(),
phi=reals(),
omega_0=reals(),
omega=reals())
def test_torque_adjust(inertia_rot, tau_rot, phi_0, phi, omega_0, omega):
ans = torque_adjust(inertia_rot, tau_rot, phi_0, phi, omega_0,
omega)
assert isinstance(ans, float)
| gpl-3.0 | 1,157,326,579,588,317,200 | 30.384615 | 75 | 0.658497 | false |
stoq/kiwi | examples/validation/datatypes.py | 1 | 1096 | import datetime
from gi.repository import Gtk
from kiwi.currency import currency
from kiwi.ui.widgets.entry import ProxyEntry
from kiwi.ui.widgets.label import ProxyLabel
window = Gtk.Window()
window.connect('delete-event', Gtk.main_quit)
window.set_border_width(6)
vbox = Gtk.VBox()
window.add(vbox)
data_types = [
(True, bool),
(42, int),
(22.0 / 7.0, float),
(3000, int),
('THX', str),
(datetime.datetime.now(), datetime.datetime),
(datetime.date.today(), datetime.date),
(datetime.time(11, 38, 00), datetime.time),
(currency('50.1'), currency),
]
for data, data_type in data_types:
hbox = Gtk.HBox(True)
vbox.pack_start(hbox, False, False, 6)
label = ProxyLabel(data_type.__name__.capitalize())
label.set_bold(True)
hbox.pack_start(label, True, True, 0)
label = ProxyLabel(data_type=data_type)
label.update(data)
hbox.pack_start(label, False, False, 6)
entry = ProxyEntry(data_type=data_type)
entry.update(data)
entry.validate()
hbox.pack_start(entry, False, False, 6)
window.show_all()
Gtk.main()
| lgpl-2.1 | -7,734,861,046,727,665,000 | 22.826087 | 55 | 0.666058 | false |
vinta/django-email-confirm-la | email_confirm_la/models.py | 1 | 7024 | # coding: utf-8
import datetime
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.mail import EmailMessage
from django.db import IntegrityError
from django.db import models
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from email_confirm_la import signals
from email_confirm_la.conf import configs
from email_confirm_la.compat import GenericForeignKey
from email_confirm_la.exceptions import ExpiredError
from email_confirm_la.utils import generate_random_token
class EmailConfirmationManager(models.Manager):
def verify_email_for_object(self, email, content_object, email_field_name='email'):
"""
Create an email confirmation for `content_object` and send a confirmation mail.
The email will be directly saved to `content_object.email_field_name` when `is_primary` and `skip_verify` both are true.
"""
confirmation_key = generate_random_token()
try:
confirmation = EmailConfirmation()
confirmation.content_object = content_object
confirmation.email_field_name = email_field_name
confirmation.email = email
confirmation.confirmation_key = confirmation_key
confirmation.save()
except IntegrityError:
confirmation = EmailConfirmation.objects.get_for_object(content_object, email_field_name)
confirmation.email = email
confirmation.confirmation_key = confirmation_key
confirmation.save(update_fields=['email', 'confirmation_key'])
confirmation.send()
return confirmation
def get_unverified_email_for_object(self, content_object, email_field_name='email'):
try:
confirmation = EmailConfirmation.objects.get_for_object(content_object, email_field_name)
except EmailConfirmation.DoesNotExist:
unverified_email = ''
else:
unverified_email = confirmation.email
return unverified_email
def get_for_object(self, content_object, email_field_name='email'):
content_type = ContentType.objects.get_for_model(content_object)
confirmation = EmailConfirmation.objects.get(content_type=content_type, object_id=content_object.id, email_field_name=email_field_name)
return confirmation
def get_queryset_for_object(self, content_object, email_field_name='email'):
content_type = ContentType.objects.get_for_model(content_object)
queryset = EmailConfirmation.objects.filter(content_type=content_type, object_id=content_object.id, email_field_name=email_field_name)
return queryset
def get_for_email(self, email, content_object_model, email_field_name='email'):
content_type = ContentType.objects.get_for_model(content_object_model)
confirmation = EmailConfirmation.objects.get(content_type=content_type, email_field_name=email_field_name, email=email)
return confirmation
class EmailConfirmation(models.Model):
"""
Once an email is confirmed, it will be delete from this table. In other words, there are only unconfirmed emails in the database.
"""
ExpiredError = ExpiredError
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
email_field_name = models.CharField(verbose_name=_('Email field name'), max_length=32, default='email')
email = models.EmailField(verbose_name=_('Email'), db_index=True)
confirmation_key = models.CharField(verbose_name=_('Confirmation_key'), max_length=64, unique=True)
send_at = models.DateTimeField(null=True, blank=True, db_index=True)
objects = EmailConfirmationManager()
class Meta:
verbose_name = _('Email confirmation')
verbose_name_plural = _('Email confirmation')
unique_together = (('content_type', 'object_id', 'email_field_name'), )
def __repr__(self):
return '<EmailConfirmation {0}>'.format(self.email)
def __str__(self):
return 'Confirmation for {0}'.format(self.email)
def send(self, template_context=None):
default_template_context = dict(configs.EMAIL_CONFIRM_LA_TEMPLATE_CONTEXT)
default_template_context['email_confirmation'] = self
if isinstance(template_context, dict):
template_context = dict(default_template_context.items() + template_context.items()) # merge dictionaries
else:
template_context = default_template_context
subject = render_to_string('email_confirm_la/email/email_confirmation_subject.txt', template_context)
subject = ''.join(subject.splitlines()).strip() # remove unnecessary line breaks
body = render_to_string('email_confirm_la/email/email_confirmation_message.html', template_context)
message = EmailMessage(subject, body, settings.DEFAULT_FROM_EMAIL, [self.email, ])
message.content_subtype = 'html'
message.send()
self.send_at = timezone.now()
self.save(update_fields=('send_at', ))
signals.post_email_confirmation_send.send(
sender=self.__class__,
confirmation=self,
)
def get_confirmation_url(self, full=True):
url_reverse_name = configs.EMAIL_CONFIRM_LA_CONFIRM_URL_REVERSE_NAME
url = reverse(url_reverse_name, kwargs={'confirmation_key': self.confirmation_key})
if full:
confirmation_url = '{0}://{1}{2}'.format(configs.EMAIL_CONFIRM_LA_HTTP_PROTOCOL, configs.EMAIL_CONFIRM_LA_DOMAIN, url)
else:
confirmation_url = url
return confirmation_url
def confirm(self, ignore_expiration=False, save_to_content_object=True):
if not ignore_expiration and self.is_expired:
raise ExpiredError()
old_email = getattr(self.content_object, self.email_field_name, '')
if save_to_content_object:
setattr(self.content_object, self.email_field_name, self.email)
self.content_object.save(update_fields=(self.email_field_name, ))
signals.post_email_confirmation_confirm.send(
sender=self.__class__,
confirmation=self,
save_to_content_object=save_to_content_object,
old_email=old_email,
)
def clean(self):
"""
delete all confirmations for the same content_object and the same field
"""
EmailConfirmation.objects.filter(content_type=self.content_type, object_id=self.object_id, email_field_name=self.email_field_name).delete()
@property
def is_expired(self):
if not self.send_at:
return False
expiration_time = self.send_at + datetime.timedelta(seconds=configs.EMAIL_CONFIRM_LA_CONFIRM_EXPIRE_SEC)
return expiration_time <= timezone.now()
| mit | -1,843,030,772,394,282,500 | 39.837209 | 147 | 0.685222 | false |
h2020-endeavour/iSDX | xctrl/flowmodmsg.py | 1 | 2846 | #!/usr/bin/env python
# Author:
# Rudiger Birkner (Networked Systems Group ETH Zurich)
class FlowModMsgBuilder(object):
def __init__(self, participant, key):
self.participant = participant
self.key = key
self.flow_mods = []
def add_flow_mod(self, mod_type, rule_type, priority, match, action, datapath = None, cookie = None):
if cookie is None:
cookie = (len(self.flow_mods)+1, 65535)
fm = {
"cookie": cookie,
"datapath": datapath,
"mod_type": mod_type,
"rule_type": rule_type,
"priority": priority,
"match": match,
"action": action
}
self.flow_mods.append(fm)
return cookie
def delete_flow_mod(self, mod_type, rule_type, cookie, cookie_mask):
fm = {
"cookie": (cookie, cookie_mask),
"mod_type": mod_type,
"rule_type": rule_type,
}
self.flow_mods.append(fm)
def get_msg(self):
msg = {
"auth_info": {
"participant" : self.participant,
"key" : self.key
},
"flow_mods": self.flow_mods
}
return msg
def reset_flow_mod(self):
self.flow_mods = []
# request body format:
# {"auth_info": {
# "participant": 1,
# "key": "xyz"
# }
# "flow_mods": [
# { "cookie": (1, 2**16-1),
# "mod_type": "insert/remove",
# "rule_type": "inbound/outbound/main",
# "priority": 1,
# "match" : {
# "eth_type" : 0x0806,
# "arp_tpa" : ("172.1.0.0", "255.255.255.0"),
# "in_port" : 5,
# "eth_dst" : "ff:ff:ff:ff:ff:ff",
# "eth_src" : "80:23:ff:98:10:01",
# "ipv4_src" : "192.168.1.1",
# "ipv4_dst" : "192.168.1.2",
# "tcp_src" : 80,
# "tcp_dst" : 179,
# "udp_src" : 23,
# "udp_dst" : 22,
# },
# "action" : {
# "fwd": ["inbound"/"outbound"/"main-in"/main-out"],
# "set_eth_src": "80:23:ff:98:10:01",
# "set_eth_dst": ("00:00:00:00:00:01","00:00:00:00:03:ff")
# }
# },
# { "cookie": (2, 2**16-1),
# "mod_type": "insert/remove",
# "rule_type": "inbound/outbound/main",
# "match" : {"tcp_dst" : 80},
# "action" : {"fwd": [3]}
# }
# ...]
# }
| apache-2.0 | 162,722,739,256,632,800 | 30.977528 | 105 | 0.382642 | false |
Nadeflore/dakara-player-vlc | tests/test_text_generator.py | 1 | 10404 | from unittest import TestCase
from unittest.mock import mock_open, patch
from dakara_base.resources_manager import get_file
from path import Path
from dakara_player_vlc.text_generator import (
IDLE_TEMPLATE_NAME,
TextGenerator,
TRANSITION_TEMPLATE_NAME,
)
from dakara_player_vlc.resources_manager import get_template
class TextGeneratorTestCase(TestCase):
"""Test the text generator class unitary
"""
@patch.object(TextGenerator, "load_templates")
@patch.object(TextGenerator, "load_icon_map")
def test_load(self, mocked_load_icon_map, mocked_load_templates):
"""Test the load method
"""
# create ojbect
text_generator = TextGenerator({})
# call the method
text_generator.load()
# assert the call
mocked_load_icon_map.assert_called_once_with()
mocked_load_templates.assert_called_once_with()
@patch.object(Path, "open", new_callable=mock_open)
@patch("dakara_player_vlc.text_generator.ICON_MAP_FILE", "icon_map_file")
@patch("dakara_player_vlc.text_generator.get_file", autospec=True)
@patch("dakara_player_vlc.text_generator.json.load", autospec=True)
def test_load_icon_map(self, mocked_load, mocked_get_file, mocked_open):
"""Test to load the icon map
"""
# create the mock
mocked_load.return_value = {"name": "value"}
mocked_get_file.return_value = Path("path/to/icon_map_file")
# create the object
text_generator = TextGenerator({})
# pre assert there are not icon map
self.assertDictEqual(text_generator.icon_map, {})
# call the method
text_generator.load_icon_map()
# assert there is an icon map
self.assertDictEqual(text_generator.icon_map, {"name": "value"})
# assert the mock
mocked_load.assert_called_with(mocked_open.return_value)
mocked_get_file.assert_called_with(
"dakara_player_vlc.resources", "icon_map_file"
)
mocked_open.assert_called_with()
def test_load_templates_default(self):
"""Test to load default templates for text
In that case, the templates come from the fallback directory.
"""
# create object
text_generator = TextGenerator({})
# pre assert there are no templates
self.assertIsNone(text_generator.idle_template)
self.assertIsNone(text_generator.transition_template)
# call the method
text_generator.load_templates()
# assert there are templates defined
self.assertEqual(
text_generator.idle_template.filename, get_template(IDLE_TEMPLATE_NAME)
)
self.assertEqual(
text_generator.transition_template.filename,
get_template(TRANSITION_TEMPLATE_NAME),
)
def test_load_templates_custom_directory_success(self):
"""Test to load custom templates using an existing directory
In that case, the templates come from this directory.
"""
# create object
text_generator = TextGenerator({"directory": get_file("tests.resources", "")})
# pre assert there are no templates
self.assertIsNone(text_generator.idle_template)
self.assertIsNone(text_generator.transition_template)
# call the method
text_generator.load_templates()
# assert there are templates defined
self.assertEqual(
text_generator.idle_template.filename,
get_file("tests.resources", IDLE_TEMPLATE_NAME),
)
self.assertEqual(
text_generator.transition_template.filename,
get_file("tests.resources", TRANSITION_TEMPLATE_NAME),
)
def test_load_templates_custom_directory_fail(self):
"""Test to load templates using a directory that does not exist
In that case, the templates come from the fallback directory.
"""
# create object
text_generator = TextGenerator({"directory": "nowhere"})
# pre assert there are no templates
self.assertIsNone(text_generator.idle_template)
self.assertIsNone(text_generator.transition_template)
# call the method
text_generator.load_templates()
# assert there are templates defined
self.assertEqual(
text_generator.idle_template.filename, get_template(IDLE_TEMPLATE_NAME)
)
self.assertEqual(
text_generator.transition_template.filename,
get_template(TRANSITION_TEMPLATE_NAME),
)
def test_load_templates_custom_names_success(self):
"""Test to load templates using existing names
In that case, the templates come from the custom directory and have the
correct name.
"""
# create object
text_generator = TextGenerator(
{
"directory": get_file("tests.resources", ""),
"idle_template_name": "song.ass",
"transition_template_name": "song.ass",
}
)
# pre assert there are no templates
self.assertIsNone(text_generator.idle_template)
self.assertIsNone(text_generator.transition_template)
# call the method
text_generator.load_templates()
# assert there are templates defined
self.assertEqual(
text_generator.idle_template.filename,
get_file("tests.resources", "song.ass"),
)
self.assertEqual(
text_generator.transition_template.filename,
get_file("tests.resources", "song.ass"),
)
def test_load_templates_custom_names_fail(self):
"""Test to load templates using names that do not exist
In that case, the templates come from the custom directory and have
the default name.
"""
# create object
text_generator = TextGenerator(
{
"directory": get_file("tests.resources", ""),
"idle_template_name": "nothing",
"transition_template_name": "nothing",
}
)
# pre assert there are no templates
self.assertIsNone(text_generator.idle_template)
self.assertIsNone(text_generator.transition_template)
# call the method
text_generator.load_templates()
# assert there are templates defined
self.assertEqual(
text_generator.idle_template.filename,
get_file("tests.resources", IDLE_TEMPLATE_NAME),
)
self.assertEqual(
text_generator.transition_template.filename,
get_file("tests.resources", TRANSITION_TEMPLATE_NAME),
)
def test_convert_icon(self):
"""Test the convertion of an available icon name to its code
"""
# create object
text_generator = TextGenerator({})
text_generator.icon_map = {"music": "0xf001"}
self.assertEqual(text_generator.convert_icon("music"), "\uf001")
self.assertEqual(text_generator.convert_icon("other"), " ")
def test_convert_icon_unavailable(self):
"""Test the convertion of an unavailable icon name to a generic code
"""
# create object
text_generator = TextGenerator({})
self.assertEqual(text_generator.convert_icon("unavailable"), " ")
def test_convert_icon_none(self):
"""Test the convertion of a null icon name is handled
"""
# create object
text_generator = TextGenerator({})
self.assertEqual(text_generator.convert_icon(None), "")
def test_convert_link_type_name(self):
"""Test the convertion of a link type to its long name
"""
# create object
text_generator = TextGenerator({})
self.assertEqual(text_generator.convert_link_type_name("OP"), "Opening")
self.assertEqual(text_generator.convert_link_type_name("ED"), "Ending")
self.assertEqual(text_generator.convert_link_type_name("IN"), "Insert song")
self.assertEqual(text_generator.convert_link_type_name("IS"), "Image song")
class TextGeneratorIntegrationTestCase(TestCase):
"""Test the text generator class in real conditions
"""
def setUp(self):
# create info dictionary
self.idle_info = {"notes": ["VLC 0.0.0", "Dakara player 0.0.0"]}
# create playlist entry
self.playlist_entry = {
"song": {
"title": "Song title",
"artists": [{"name": "Artist name"}],
"works": [
{
"work": {
"title": "Work title",
"subtitle": "Subtitle of the work",
"work_type": {
"name": "Work type name",
"icon_name": "music",
},
},
"link_type": "OP",
"link_type_number": 1,
"episodes": "1, 2, 3",
}
],
"file_path": "path/of/the/file",
},
"owner": {"username": "User"},
"date_created": "1970-01-01T00:00:00.00",
}
# create idle text content
self.idle_text_path = get_file("tests.resources", "idle.ass")
# create transition text content
self.transition_text_path = get_file("tests.resources", "transition.ass")
# create text generator object
self.text_generator = TextGenerator({})
self.text_generator.load()
def test_create_idle_text(self):
"""Test the generation of an idle text
"""
# call method
result = self.text_generator.create_idle_text(self.idle_info)
# check file content
idle_text_content = self.idle_text_path.text(encoding="utf8")
self.assertEqual(idle_text_content, result)
def test_create_transition_text(self):
"""Test the generation of a transition text
"""
# call method
result = self.text_generator.create_transition_text(self.playlist_entry)
# check file content
transition_text_content = self.transition_text_path.text(encoding="utf8")
self.assertEqual(transition_text_content, result)
| mit | 6,179,296,149,975,221,000 | 33.450331 | 86 | 0.597174 | false |
ben-jones/centinel | centinel/vpn/openvpn.py | 1 | 2675 | #!/usr/bin/python
# openvpn.py: library to handle starting and stopping openvpn instances
import subprocess
import threading
import time
class OpenVPN():
def __init__(self, config_file=None, auth_file=None, timeout=10):
self.started = False
self.stopped = False
self.error = False
self.notifications = ""
self.auth_file = auth_file
self.config_file = config_file
self.thread = threading.Thread(target=self._invoke_openvpn)
self.thread.setDaemon(1)
self.timeout = timeout
def _invoke_openvpn(self):
if self.auth_file is None:
cmd = ['sudo', 'openvpn', '--script-security', '2',
'--config', self.config_file]
else:
cmd = ['sudo', 'openvpn', '--script-security', '2',
'--config', self.config_file,
'--auth-user-pass', self.auth_file]
self.process = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self.kill_switch = self.process.terminate
self.starting = True
while True:
line = self.process.stdout.readline().strip()
if not line:
break
self.output_callback(line, self.process.terminate)
def output_callback(self, line, kill_switch):
"""Set status of openvpn according to what we process"""
self.notifications += line + "\n"
if "Initialization Sequence Completed" in line:
self.started = True
if "ERROR:" in line:
self.error = True
if "process exiting" in line:
self.stopped = True
def start(self, timeout=None):
"""Start openvpn and block until the connection is opened or there is
an error
"""
if not timeout:
timeout = self.timeout
self.thread.start()
start_time = time.time()
while start_time + timeout > time.time():
self.thread.join(1)
if self.error or self.started:
break
if self.started:
print "openvpn started"
else:
print "openvpn not started"
print self.notifications
def stop(self, timeout=None):
"""Stop openvpn"""
if not timeout:
timeout = self.timeout
self.kill_switch()
self.thread.join(timeout)
if self.stopped:
print "stopped"
else:
print "not stopped"
print self.notifications
| mit | -5,255,765,130,357,951,000 | 31.621951 | 77 | 0.541308 | false |
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/services/verification/tests/test_token_creation.py | 1 | 1773 | # Copyright 2009, 2011 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
__metaclass__ = type
import random
import testtools
from lp.services.database.constants import UTC_NOW
from lp.services.tokens import (
create_token,
create_unique_token_for_table,
)
from lp.services.verification.interfaces.authtoken import LoginTokenType
from lp.services.verification.model.logintoken import LoginToken
from lp.testing.layers import DatabaseFunctionalLayer
class Test_create_token(testtools.TestCase):
def test_length(self):
token = create_token(99)
self.assertEquals(len(token), 99)
class Test_create_unique_token_for_table(testtools.TestCase):
layer = DatabaseFunctionalLayer
def test_token_uniqueness(self):
orig_state = random.getstate()
self.addCleanup(lambda: random.setstate(orig_state))
# Calling create_unique_token_for_table() twice with the same
# random.seed() will generate two identical tokens, as the token was
# never inserted in the table.
random.seed(0)
token1 = create_unique_token_for_table(99, LoginToken.token)
random.seed(0)
token2 = create_unique_token_for_table(99, LoginToken.token)
self.assertEquals(token1, token2)
# Now insert the token in the table so that the next time we call
# create_unique_token_for_table() we get a different token.
LoginToken(
requester=None, token=token2, email='[email protected]',
tokentype=LoginTokenType.ACCOUNTMERGE, created=UTC_NOW)
random.seed(0)
token3 = create_unique_token_for_table(99, LoginToken.token)
self.assertNotEquals(token1, token3)
| agpl-3.0 | -8,056,143,793,439,895,000 | 35.183673 | 76 | 0.707276 | false |
openstack/smaug | karbor/tests/unit/protection/test_cinder_protection_plugin.py | 1 | 18217 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient import exceptions as cinder_exc
import collections
from karbor.common import constants
from karbor.context import RequestContext
from karbor import exception
from karbor.resource import Resource
from karbor.services.protection import bank_plugin
from karbor.services.protection import client_factory
from karbor.services.protection.protection_plugins.volume. \
cinder_protection_plugin import CinderBackupProtectionPlugin
from karbor.services.protection.protection_plugins.volume \
import volume_plugin_cinder_schemas as cinder_schemas
from karbor.tests import base
from karbor.tests.unit.protection import fakes
import mock
from oslo_config import cfg
from oslo_config import fixture
ResourceNode = collections.namedtuple(
"ResourceNode",
["value",
"child_nodes"]
)
Image = collections.namedtuple(
"Image",
["disk_format",
"container_format",
"status"]
)
def call_hooks(operation, checkpoint, resource, context, parameters, **kwargs):
def noop(*args, **kwargs):
pass
hooks = (
'on_prepare_begin',
'on_prepare_finish',
'on_main',
'on_complete',
)
for hook_name in hooks:
hook = getattr(operation, hook_name, noop)
hook(checkpoint, resource, context, parameters, **kwargs)
class FakeCheckpoint(object):
def __init__(self, section):
super(FakeCheckpoint, self).__init__()
self.bank_section = section
self.id = "fake_id"
def get_resource_bank_section(self, resource_id=None):
return self.bank_section
class BackupResponse(object):
def __init__(self, bkup_id, final_status, working_status, time_to_work):
super(BackupResponse, self).__init__()
self._final_status = final_status
self._working_status = working_status
self._time_to_work = time_to_work
self._id = bkup_id
def __call__(self, *args, **kwargs):
res = mock.Mock()
res.id = self._id
if self._time_to_work > 0:
self._time_to_work -= 1
res.status = self._working_status
else:
res.status = self._final_status
if res.status == 'not-found':
raise cinder_exc.NotFound(403)
return res
class RestoreResponse(object):
def __init__(self, volume_id, raise_except=False):
self._volume_id = volume_id
self._raise_except = raise_except
def __call__(self, *args, **kwargs):
if self._raise_except:
raise exception.KarborException()
res = mock.Mock()
res.volume_id = self._volume_id
return res
class CinderProtectionPluginTest(base.TestCase):
def setUp(self):
super(CinderProtectionPluginTest, self).setUp()
plugin_config = cfg.ConfigOpts()
plugin_config_fixture = self.useFixture(fixture.Config(plugin_config))
plugin_config_fixture.load_raw_values(
group='cinder_backup_protection_plugin',
poll_interval=0,
)
self.plugin = CinderBackupProtectionPlugin(plugin_config)
cfg.CONF.set_default('cinder_endpoint',
'http://127.0.0.1:8776/v2',
'cinder_client')
self.cntxt = RequestContext(user_id='demo',
project_id='abcd',
auth_token='efgh')
self.cinder_client = client_factory.ClientFactory.create_client(
"cinder", self.cntxt)
def _get_checkpoint(self):
fake_bank = bank_plugin.Bank(fakes.FakeBankPlugin())
fake_bank_section = bank_plugin.BankSection(
bank=fake_bank,
section="fake"
)
return FakeCheckpoint(fake_bank_section)
def test_get_options_schema(self):
options_schema = self.plugin.get_options_schema(
'OS::Cinder::Volume')
self.assertEqual(options_schema, cinder_schemas.OPTIONS_SCHEMA)
def test_get_restore_schema(self):
options_schema = self.plugin.get_restore_schema(
'OS::Cinder::Volume')
self.assertEqual(options_schema, cinder_schemas.RESTORE_SCHEMA)
def test_get_saved_info_schema(self):
options_schema = self.plugin.get_saved_info_schema(
'OS::Cinder::Volume')
self.assertEqual(options_schema,
cinder_schemas.SAVED_INFO_SCHEMA)
@mock.patch('karbor.services.protection.clients.cinder.create')
def test_protect_succeed(self, mock_cinder_create):
resource = Resource(
id="123",
type=constants.VOLUME_RESOURCE_TYPE,
name="test",
)
checkpoint = self._get_checkpoint()
section = checkpoint.get_resource_bank_section()
operation = self.plugin.get_protect_operation(resource)
section.update_object = mock.MagicMock()
mock_cinder_create.return_value = self.cinder_client
with mock.patch.multiple(
self.cinder_client,
volumes=mock.DEFAULT,
backups=mock.DEFAULT,
volume_snapshots=mock.DEFAULT,
) as mocks:
mocks['volumes'].get.return_value = mock.Mock()
mocks['volumes'].get.return_value.status = 'available'
mocks['backups'].create = BackupResponse(
'456', 'creating', '---', 0)
mocks['backups'].get = BackupResponse(
'456', 'available', 'creating', 2)
mocks['volume_snapshots'].get.return_value = BackupResponse(
'789', 'creating', '---', 0)
mocks['volume_snapshots'].get = BackupResponse(
'789', 'available', 'creating', 2)
call_hooks(operation, checkpoint, resource, self.cntxt, {})
@mock.patch('karbor.services.protection.clients.cinder.create')
def test_protect_fail_backup(self, mock_cinder_create):
resource = Resource(
id="123",
type=constants.VOLUME_RESOURCE_TYPE,
name="test",
)
checkpoint = self._get_checkpoint()
operation = self.plugin.get_protect_operation(resource)
mock_cinder_create.return_value = self.cinder_client
with mock.patch.multiple(
self.cinder_client,
volumes=mock.DEFAULT,
backups=mock.DEFAULT,
volume_snapshots=mock.DEFAULT,
) as mocks:
mocks['volumes'].get.return_value = mock.Mock()
mocks['volumes'].get.return_value.status = 'available'
mocks['backups'].backups.create = BackupResponse(
'456', 'creating', '---', 0)
mocks['backups'].backups.get = BackupResponse(
'456', 'error', 'creating', 2)
mocks['volume_snapshots'].get.return_value = BackupResponse(
'789', 'creating', '---', 0)
mocks['volume_snapshots'].get = BackupResponse(
'789', 'available', 'creating', 2)
self.assertRaises(
exception.CreateResourceFailed,
call_hooks,
operation,
checkpoint,
resource,
self.cntxt,
{}
)
@mock.patch('karbor.services.protection.clients.cinder.create')
def test_protect_fail_snapshot(self, mock_cinder_create):
resource = Resource(
id="123",
type=constants.VOLUME_RESOURCE_TYPE,
name="test",
)
checkpoint = self._get_checkpoint()
operation = self.plugin.get_protect_operation(resource)
mock_cinder_create.return_value = self.cinder_client
with mock.patch.multiple(
self.cinder_client,
volumes=mock.DEFAULT,
backups=mock.DEFAULT,
volume_snapshots=mock.DEFAULT,
) as mocks:
mocks['volumes'].get.return_value = mock.Mock()
mocks['volumes'].get.return_value.status = 'available'
mocks['backups'].backups.create = BackupResponse(
'456', 'creating', '---', 0)
mocks['backups'].backups.get = BackupResponse(
'456', 'available', 'creating', 2)
mocks['volume_snapshots'].get.return_value = BackupResponse(
'789', 'creating', '---', 0)
mocks['volume_snapshots'].get = BackupResponse(
'789', 'error', 'creating', 2)
self.assertRaises(
exception.CreateResourceFailed,
call_hooks,
operation,
checkpoint,
resource,
self.cntxt,
{}
)
@mock.patch('karbor.services.protection.clients.cinder.create')
def test_protect_fail_volume(self, mock_cinder_create):
resource = Resource(
id="123",
type=constants.VOLUME_RESOURCE_TYPE,
name="test",
)
checkpoint = self._get_checkpoint()
operation = self.plugin.get_protect_operation(resource)
mock_cinder_create.return_value = self.cinder_client
with mock.patch.multiple(
self.cinder_client,
volumes=mock.DEFAULT,
backups=mock.DEFAULT,
volume_snapshots=mock.DEFAULT,
) as mocks:
mocks['volumes'].get.return_value = mock.Mock()
mocks['volumes'].get.return_value.status = 'error'
mocks['backups'].backups.create = BackupResponse(
'456', 'creating', '---', 0)
mocks['backups'].backups.get = BackupResponse(
'456', 'error', 'creating', 2)
mocks['volume_snapshots'].get.return_value = BackupResponse(
'789', 'creating', '---', 0)
mocks['volume_snapshots'].get = BackupResponse(
'789', 'available', 'creating', 2)
self.assertRaises(
exception.CreateResourceFailed,
call_hooks,
operation,
checkpoint,
resource,
self.cntxt,
{}
)
@mock.patch('karbor.services.protection.clients.cinder.create')
def test_delete_succeed(self, mock_cinder_create):
resource = Resource(
id="123",
type=constants.VOLUME_RESOURCE_TYPE,
name="test",
)
checkpoint = self._get_checkpoint()
section = checkpoint.get_resource_bank_section()
section.update_object('metadata', {
'backup_id': '456',
})
operation = self.plugin.get_delete_operation(resource)
mock_cinder_create.return_value = self.cinder_client
with mock.patch.object(self.cinder_client, 'backups') as backups:
backups.delete = BackupResponse('456', 'deleting', '---', 0)
backups.get = BackupResponse('456', 'not-found', 'deleting', 2)
call_hooks(operation, checkpoint, resource, self.cntxt, {})
@mock.patch('karbor.services.protection.clients.cinder.create')
def test_delete_fail(self, mock_cinder_create):
resource = Resource(
id="123",
type=constants.VOLUME_RESOURCE_TYPE,
name="test",
)
checkpoint = self._get_checkpoint()
section = checkpoint.get_resource_bank_section()
section.update_object('metadata', {
'backup_id': '456',
})
operation = self.plugin.get_delete_operation(resource)
mock_cinder_create.return_value = self.cinder_client
with mock.patch.object(self.cinder_client, 'backups') as backups:
backups.delete = BackupResponse('456', 'deleting', '---', 0)
backups.get = BackupResponse('456', 'error', 'deleting', 2)
self.assertRaises(
exception.DeleteResourceFailed,
call_hooks,
operation,
checkpoint,
resource,
self.cntxt,
{}
)
@mock.patch('karbor.services.protection.clients.cinder.create')
@mock.patch('karbor.services.protection.protection_plugins.utils.'
'update_resource_restore_result')
def test_restore_succeed(self, mock_update_restore, mock_cinder_create):
resource = Resource(
id="123",
type=constants.VOLUME_RESOURCE_TYPE,
name="fake",
)
checkpoint = self._get_checkpoint()
section = checkpoint.get_resource_bank_section()
section.update_object('metadata', {
'backup_id': '456',
})
parameters = {
"restore_name": "karbor restore volume",
"restore_description": "karbor restore",
}
operation = self.plugin.get_restore_operation(resource)
mock_cinder_create.return_value = self.cinder_client
with mock.patch.multiple(
self.cinder_client,
volumes=mock.DEFAULT,
restores=mock.DEFAULT,
) as mocks:
volume_id = 456
mocks['volumes'].get.return_value = mock.Mock()
mocks['volumes'].get.return_value.status = 'available'
mocks['restores'].restore = RestoreResponse(volume_id)
call_hooks(operation, checkpoint, resource, self.cntxt, parameters,
**{'restore': None, 'new_resources': {}})
mocks['volumes'].update.assert_called_with(
volume_id,
**{'name': parameters['restore_name'],
'description': parameters['restore_description']})
mock_update_restore.assert_called_with(
None, resource.type, volume_id, 'available')
@mock.patch('karbor.services.protection.clients.cinder.create')
@mock.patch('karbor.services.protection.protection_plugins.utils.'
'update_resource_verify_result')
def test_verify_succeed(self, mock_update_verify, mock_cinder_create):
resource = Resource(
id="123",
type=constants.VOLUME_RESOURCE_TYPE,
name="fake",
)
checkpoint = self._get_checkpoint()
section = checkpoint.get_resource_bank_section()
section.update_object('metadata', {
'backup_id': '456',
})
parameters = {}
operation = self.plugin.get_verify_operation(resource)
mock_cinder_create.return_value = self.cinder_client
with mock.patch.multiple(
self.cinder_client,
backups=mock.DEFAULT,
volumes=mock.DEFAULT,
) as mocks:
volume_id = '123'
mocks['backups'].get.return_value = mock.Mock()
mocks['backups'].get.return_value.status = 'available'
call_hooks(operation, checkpoint, resource, self.cntxt, parameters,
**{'verify': None, 'new_resources': {}})
mock_update_verify.assert_called_with(
None, resource.type, volume_id, 'available')
@mock.patch('karbor.services.protection.clients.cinder.create')
def test_restore_fail_volume_0(self, mock_cinder_create):
resource = Resource(
id="123",
type=constants.VOLUME_RESOURCE_TYPE,
name="fake",
)
checkpoint = self._get_checkpoint()
section = checkpoint.get_resource_bank_section()
section.update_object('metadata', {
'backup_id': '456',
})
operation = self.plugin.get_restore_operation(resource)
mock_cinder_create.return_value = self.cinder_client
with mock.patch.multiple(
self.cinder_client,
restores=mock.DEFAULT,
) as mocks:
mocks['restores'].restore = RestoreResponse(0, True)
self.assertRaises(
exception.KarborException, call_hooks,
operation, checkpoint, resource, self.cntxt,
{}, **{'restore': None})
@mock.patch('karbor.services.protection.clients.cinder.create')
@mock.patch('karbor.services.protection.protection_plugins.utils.'
'update_resource_restore_result')
def test_restore_fail_volume_1(self, mock_update_restore,
mock_cinder_create):
resource = Resource(
id="123",
type=constants.VOLUME_RESOURCE_TYPE,
name="fake",
)
checkpoint = self._get_checkpoint()
section = checkpoint.get_resource_bank_section()
section.update_object('metadata', {
'backup_id': '456',
})
operation = self.plugin.get_restore_operation(resource)
mock_cinder_create.return_value = self.cinder_client
with mock.patch.multiple(
self.cinder_client,
volumes=mock.DEFAULT,
restores=mock.DEFAULT,
) as mocks:
volume_id = 456
mocks['volumes'].get.return_value = mock.Mock()
mocks['volumes'].get.return_value.status = 'error'
mocks['restores'].restore = RestoreResponse(volume_id)
self.assertRaises(
exception.RestoreResourceFailed, call_hooks,
operation, checkpoint, resource, self.cntxt,
{}, **{'restore': None})
mock_update_restore.assert_called_with(
None, resource.type, volume_id,
constants.RESOURCE_STATUS_ERROR, 'Error creating volume')
def test_get_supported_resources_types(self):
types = self.plugin.get_supported_resources_types()
self.assertEqual([constants.VOLUME_RESOURCE_TYPE], types)
| apache-2.0 | -7,685,870,856,915,176,000 | 37.925214 | 79 | 0.582697 | false |
DTOcean/dtocean-core | dtocean_core/utils/moorings.py | 1 | 21601 | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2018 Mathew Topper
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pandas as pd
def get_component_dict(component_type,
data_table,
rope_data=None,
sand_data=None,
soft_data=None,
check_keys=None):
valid_components = ["cable",
"chain",
"drag anchor",
"forerunner assembly",
"pile",
"rope",
"shackle",
"swivel"]
if component_type not in valid_components:
valid_str = ", ".join(valid_components)
errStr = ("Argument system_type must be one of '{}' not "
"'{}'").format(valid_str, component_type)
raise ValueError(errStr)
if component_type in ["drag anchor", "pile"]:
system_type = "foundation system"
else:
system_type = "mooring system"
compdict = {}
if check_keys is None: check_keys = []
key_ids = data_table["Key Identifier"]
for key_id in key_ids:
# Check for duplicates
if key_id in check_keys:
errStr = "Key identifier {} has been duplicated".format(key_id)
raise KeyError(errStr)
# Start building the value dictionary
data_dict = {"item1": system_type,
"item2": component_type}
record = data_table.loc[data_table['Key Identifier'] == key_id]
# Build shared items
data_dict["item3"] = record.iloc[0]["Name"]
# Build component specific items
if component_type in ["chain", "forerunner assembly"]:
data_dict["item5"] = [record.iloc[0]["Min Break Load"],
record.iloc[0]["Axial Stiffness"]]
data_dict["item6"] = [record.iloc[0]["Diameter"],
record.iloc[0]["Connecting Length"]]
data_dict["item7"] = [record.iloc[0]["Dry Mass per Unit Length"],
record.iloc[0]["Wet Mass per Unit Length"]]
data_dict["item11"] = record.iloc[0]["Cost per Unit Length"]
elif component_type in ["shackle", "swivel"]:
data_dict["item5"] = [record.iloc[0]["Min Break Load"],
record.iloc[0]["Axial Stiffness"]]
data_dict["item6"] = [record.iloc[0]["Nominal Diameter"],
record.iloc[0]["Connecting Length"]]
data_dict["item7"] = [record.iloc[0]["Dry Unit Mass"],
record.iloc[0]["Wet Unit Mass"]]
data_dict["item11"] = record.iloc[0]["Cost"]
elif component_type == "pile":
data_dict["item5"] = [record.iloc[0]["Yield Stress"],
record.iloc[0]["Youngs Modulus"]]
data_dict["item6"] = [record.iloc[0]["Diameter"],
record.iloc[0]["Wall Thickness"]]
data_dict["item7"] = [record.iloc[0]["Dry Mass per Unit Length"],
record.iloc[0]["Wet Mass per Unit Length"]]
data_dict["item11"] = record.iloc[0]["Cost per Unit Length"]
elif component_type == "drag anchor":
if sand_data is None or soft_data is None:
errStr = ("Arguments 'sand_data' and 'soft_data' must be "
"supplied if component_type is 'drag anchor'")
raise ValueError(errStr)
data_dict["item5"] = [record.iloc[0]["Min Break Load"],
record.iloc[0]["Axial Stiffness"]]
data_dict["item6"] = [record.iloc[0]["Width"],
record.iloc[0]["Depth"],
record.iloc[0]["Height"],
record.iloc[0]["Connecting Size"]]
data_dict["item7"] = [record.iloc[0]["Dry Unit Mass"],
record.iloc[0]["Wet Unit Mass"]]
# Add anchor coefficients
sand_coeffs = sand_data.loc[sand_data['Key Identifier'] == key_id]
soft_coeffs = sand_data.loc[soft_data['Key Identifier'] == key_id]
sand_df = sand_coeffs[['Holding Capacity Coefficient 1',
'Holding Capacity Coefficient 2',
'Penetration Coefficient 1',
'Penetration Coefficient 2']]
soft_df = soft_coeffs[['Holding Capacity Coefficient 1',
'Holding Capacity Coefficient 2',
'Penetration Coefficient 1',
'Penetration Coefficient 2']]
data_dict["item9"] = {'sand': sand_df.values.tolist()[0],
'soft': soft_df.values.tolist()[0]}
data_dict["item11"] = record.iloc[0]["Cost"]
elif component_type == "rope":
# Build rope axial stiffness list
if rope_data is None:
errStr = ("Argument 'rope_data' must be supplied if "
"component_type is 'rope'")
raise ValueError(errStr)
rope_array = rope_data[key_id]
data_dict["item4"] = [record.iloc[0]["Material"]]
data_dict["item5"] = [record.iloc[0]["Min Break Load"],
rope_array.tolist()]
data_dict["item6"] = [record.iloc[0]["Diameter"]]
data_dict["item7"] = [record.iloc[0]["Dry Mass per Unit Length"],
record.iloc[0]["Wet Mass per Unit Length"]]
data_dict["item11"] = record.iloc[0]["Cost per Unit Length"]
elif component_type == "cable":
data_dict["item5"] = [record.iloc[0]["Min Break Load"],
record.iloc[0]["Min Bend Radius"]]
data_dict["item6"] = [record.iloc[0]["Diameter"]]
data_dict["item7"] = [record.iloc[0]["Dry Mass per Unit Length"],
record.iloc[0]["Wet Mass per Unit Length"]]
data_dict["item11"] = record.iloc[0]["Cost per Unit Length"]
else:
errStr = "RUN FOR THE HILLS!!!!1!!"
raise RuntimeError(errStr)
compdict[key_id] = data_dict
check_keys.append(key_id)
return compdict
def get_moorings_tables(compdict):
cable_df = pd.DataFrame(columns=[
'Key Identifier',
'Name',
'Min Break Load',
'Min Bend Radius',
'Diameter',
'Dry Mass per Unit Length',
'Wet Mass per Unit Length',
'Cost per Unit Length',
'Environmental Impact'])
chain_df = pd.DataFrame(columns=[
'Key Identifier',
'Name',
'Min Break Load',
'Axial Stiffness',
'Diameter',
'Connecting Length',
'Dry Mass per Unit Length',
'Wet Mass per Unit Length',
'Cost per Unit Length',
'Environmental Impact'])
forerunner_df = pd.DataFrame(columns=[
'Key Identifier',
'Name',
'Min Break Load',
'Axial Stiffness',
'Diameter',
'Connecting Length',
'Dry Mass per Unit Length',
'Wet Mass per Unit Length',
'Cost per Unit Length',
'Environmental Impact'])
shackle_df = pd.DataFrame(columns=[
'Key Identifier',
'Name',
'Min Break Load',
'Axial Stiffness',
'Width',
'Depth',
'Height',
'Nominal Diameter',
'Connecting Length',
'Dry Unit Mass',
'Wet Unit Mass',
'Cost',
'Environmental Impact'])
swivel_df = pd.DataFrame(columns=[
'Key Identifier',
'Name',
'Min Break Load',
'Axial Stiffness',
'Width',
'Depth',
'Height',
'Nominal Diameter',
'Connecting Length',
'Dry Unit Mass',
'Wet Unit Mass',
'Cost',
'Environmental Impact'])
pile_df = pd.DataFrame(columns=[
'Key Identifier',
'Name',
'Yield Stress',
'Youngs Modulus',
'Diameter',
'Wall Thickness',
'Dry Mass per Unit Length',
'Wet Mass per Unit Length',
'Cost per Unit Length',
'Environmental Impact'])
anchor_df = pd.DataFrame(columns=[
'Key Identifier',
'Name',
'Min Break Load',
'Axial Stiffness',
'Width',
'Depth',
'Height',
'Connecting Size',
'Dry Unit Mass',
'Wet Unit Mass',
'Cost',
'Environmental Impact'])
anchor_sand_df = pd.DataFrame(columns=[
'Key Identifier',
'Holding Capacity Coefficient 1',
'Holding Capacity Coefficient 2',
'Penetration Coefficient 1',
'Penetration Coefficient 2'])
anchor_soft_df = pd.DataFrame(columns=[
'Key Identifier',
'Holding Capacity Coefficient 1',
'Holding Capacity Coefficient 2',
'Penetration Coefficient 1',
'Penetration Coefficient 2'])
rope_df = pd.DataFrame(columns=[
'Key Identifier',
'Name',
'Material',
'Min Break Load',
'Diameter',
'Dry Mass per Unit Length',
'Wet Mass per Unit Length',
'Cost per Unit Length',
'Environmental Impact'])
rope_dict = {}
for key_id, data_dict in compdict.iteritems():
values = []
columns = []
# Get component type
component_type = data_dict["item2"]
# Build shared items
columns.append("Key Identifier")
values.append(key_id)
columns.append("Name")
values.append(data_dict["item3"])
# Build component specific items
if component_type in ["chain", "forerunner assembly"]:
columns.append("Min Break Load")
values.append(data_dict["item5"][0])
columns.append("Axial Stiffness")
values.append(data_dict["item5"][1])
columns.append("Diameter")
values.append(data_dict["item6"][0])
columns.append("Connecting Length")
values.append(data_dict["item6"][1])
columns.append("Dry Mass per Unit Length")
values.append(data_dict["item7"][0])
columns.append("Wet Mass per Unit Length")
values.append(data_dict["item7"][1])
columns.append("Cost per Unit Length")
values.append(data_dict["item11"])
record = pd.Series(values, index=columns)
if component_type == "chain":
chain_df = chain_df.append(record, ignore_index=True)
else:
forerunner_df = forerunner_df.append(record, ignore_index=True)
elif component_type in ["shackle", "swivel"]:
columns.append("Min Break Load")
values.append(data_dict["item5"][0])
columns.append("Axial Stiffness")
values.append(data_dict["item5"][1])
columns.append("Width")
values.append(data_dict["item6"][0])
columns.append("Depth")
values.append(data_dict["item6"][0])
columns.append("Height")
values.append(data_dict["item6"][0])
columns.append("Nominal Diameter")
values.append(data_dict["item6"][0])
columns.append("Connecting Length")
values.append(data_dict["item6"][1])
columns.append("Dry Unit Mass")
values.append(data_dict["item7"][0])
columns.append("Wet Unit Mass")
values.append(data_dict["item7"][1])
columns.append("Cost")
values.append(data_dict["item11"])
record = pd.Series(values, index=columns)
if component_type == "shackle":
shackle_df = shackle_df.append(record, ignore_index=True)
else:
swivel_df = swivel_df.append(record, ignore_index=True)
elif component_type == "pile":
columns.append("Yield Stress")
values.append(data_dict["item5"][0])
columns.append("Youngs Modulus")
values.append(data_dict["item5"][1])
columns.append("Diameter")
values.append(data_dict["item6"][0])
columns.append("Wall Thickness")
values.append(data_dict["item6"][1])
columns.append("Dry Mass per Unit Length")
values.append(data_dict["item7"][0])
columns.append("Wet Mass per Unit Length")
values.append(data_dict["item7"][1])
columns.append("Cost per Unit Length")
values.append(data_dict["item11"])
record = pd.Series(values, index=columns)
pile_df = pile_df.append(record, ignore_index=True)
elif component_type == "drag anchor":
columns.append("Min Break Load")
values.append(data_dict["item5"][0])
columns.append("Axial Stiffness")
values.append(data_dict["item5"][1])
columns.append("Width")
values.append(data_dict["item6"][0])
columns.append("Depth")
values.append(data_dict["item6"][1])
columns.append("Height")
values.append(data_dict["item6"][2])
columns.append("Connecting Size")
values.append(data_dict["item6"][3])
columns.append("Dry Unit Mass")
values.append(data_dict["item7"][0])
columns.append("Wet Unit Mass")
values.append(data_dict["item7"][1])
columns.append("Cost")
values.append(data_dict["item11"])
record = pd.Series(values, index=columns)
anchor_df = anchor_df.append(record, ignore_index=True)
# Anchor coefficients
coef_cols = ['Key Identifier',
'Holding Capacity Coefficient 1',
'Holding Capacity Coefficient 2',
'Penetration Coefficient 1',
'Penetration Coefficient 2']
sand_list = [key_id]
soft_list = [key_id]
sand_list.extend(data_dict["item9"]["sand"])
soft_list.extend(data_dict["item9"]["soft"])
# Fix error in data
if len(sand_list) == 4: sand_list.append(0.)
if len(soft_list) == 4: soft_list.append(0.)
sand_record = pd.Series(sand_list, index=coef_cols)
soft_record = pd.Series(soft_list, index=coef_cols)
anchor_sand_df = anchor_sand_df.append(sand_record,
ignore_index=True)
anchor_soft_df = anchor_sand_df.append(soft_record,
ignore_index=True)
elif component_type == "rope":
columns.append("Material")
values.append(data_dict["item4"][0])
columns.append("Min Break Load")
values.append(data_dict["item5"][0])
columns.append("Diameter")
values.append(data_dict["item6"][0])
columns.append("Dry Mass per Unit Length")
values.append(data_dict["item7"][0])
columns.append("Wet Mass per Unit Length")
values.append(data_dict["item7"][1])
columns.append("Cost per Unit Length")
values.append(data_dict["item11"])
record = pd.Series(values, index=columns)
rope_df = rope_df.append(record, ignore_index=True)
# Collect the rope axial stress data
rope_dict[key_id] = data_dict["item5"][1]
elif component_type == "cable":
columns.append("Min Break Load")
values.append(data_dict["item5"][0])
columns.append("Min Bend Radius")
values.append(data_dict["item5"][1])
columns.append("Diameter")
values.append(data_dict["item6"][0])
columns.append("Dry Mass per Unit Length")
values.append(data_dict["item7"][0])
columns.append("Wet Mass per Unit Length")
values.append(data_dict["item7"][1])
columns.append("Cost per Unit Length")
values.append(data_dict["item11"])
record = pd.Series(values, index=columns)
cable_df = cable_df.append(record, ignore_index=True)
else:
errStr = ("The blue meanies are coming! Or, there was an unknown "
"component type: {}").format(component_type)
raise RuntimeError(errStr)
tables = {"cable": cable_df,
"chain": chain_df,
"forerunner assembly": forerunner_df,
"shackle": shackle_df,
"swivel": swivel_df,
"pile": pile_df,
"drag anchor": anchor_df,
"drag anchor sand": anchor_sand_df,
"drag anchor soft": anchor_soft_df,
"rope": rope_df,
"rope axial stiffness": rope_dict}
return tables
| gpl-3.0 | -1,758,551,616,762,855,000 | 39.679849 | 81 | 0.432711 | false |
Letractively/portable-movie-organizer | movie-organizer/MovieDataEditor.py | 1 | 9673 | #
# portable-movie-organizer
#
# Copyright (c) 2010 Ali Aafee
#
# This file is part of portable-movie-organizer.
#
# portable-movie-organizer is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# portable-movie-organizer is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with portable-movie-organizer.
# If not, see <http://www.gnu.org/licenses/>.
import wx
import FieldDataList
import ImdbAPI
import os.path
import thread
dirName = os.path.dirname(os.path.abspath(__file__))
dirName, fileName = os.path.split(dirName)
resDir = os.path.join(dirName, 'res')
class MovieDataEditor(wx.Dialog):
def __init__(self, parent, postersPath, catchPath, title='Edit Movie Metadata'):
self.title = title
self.postersPath = postersPath
self.catchPath = catchPath
self._init_ctrls(parent)
def _init_ctrls(self, parent):
wx.Dialog.__init__(self, name='MovieEditor', parent=parent,
style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER,
title=self.title, size=wx.Size(640,480))
self.fieldWindow = wx.ScrolledWindow(self, size=wx.Size(200,200), style=wx.HSCROLL)
self.fieldWindow.SetScrollbars(0,10,0,65)
gridSizer = wx.FlexGridSizer(7,4,10,10)
gridSizer.AddGrowableCol(1,1)
gridSizer.AddGrowableCol(3,1)
labelWidth = -1
gridSizer.AddSpacer(5)
gridSizer.AddSpacer(5)
gridSizer.AddSpacer(5)
gridSizer.AddSpacer(5)
self.lblTitle = wx.StaticText(self.fieldWindow, label='Title', size=wx.Size(labelWidth,-1))
self.txtTitle = wx.TextCtrl(self.fieldWindow)
gridSizer.Add(self.lblTitle, 1, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
gridSizer.Add(self.txtTitle, 1, wx.EXPAND)
self.lblSort = wx.StaticText(self.fieldWindow, label='Sort', size=wx.Size(labelWidth,-1))
self.txtSort = wx.TextCtrl(self.fieldWindow)
gridSizer.Add(self.lblSort, 1, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
gridSizer.Add(self.txtSort, 1, wx.EXPAND)
self.lblImage = wx.StaticText(self.fieldWindow, label='Poster', size=wx.Size(labelWidth,-1))
self.txtImage = wx.TextCtrl(self.fieldWindow)
gridSizer.Add(self.lblImage, 1, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
gridSizer.Add(self.txtImage, 1, wx.EXPAND)
self.lblReleased = wx.StaticText(self.fieldWindow, label='Released', size=wx.Size(labelWidth,-1))
self.txtReleased = wx.TextCtrl(self.fieldWindow)
gridSizer.Add(self.lblReleased, 1, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
gridSizer.Add(self.txtReleased, 1, wx.EXPAND)
self.lblRuntime = wx.StaticText(self.fieldWindow, label='Runtime', size=wx.Size(labelWidth,-1))
self.txtRuntime = wx.TextCtrl(self.fieldWindow)
gridSizer.Add(self.lblRuntime, 1, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
gridSizer.Add(self.txtRuntime, 1, wx.EXPAND)
self.lblRated = wx.StaticText(self.fieldWindow, label='Rated', size=wx.Size(labelWidth,-1))
self.txtRated = wx.TextCtrl(self.fieldWindow)
gridSizer.Add(self.lblRated, 1, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
gridSizer.Add(self.txtRated, 1, wx.EXPAND)
self.lblSummary = wx.StaticText(self.fieldWindow, label='Summary', size=wx.Size(labelWidth,-1))
self.txtSummary = wx.TextCtrl(self.fieldWindow, style=wx.TE_MULTILINE, size=wx.Size(-1,80))
gridSizer.Add(self.lblSummary, 1, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
gridSizer.Add(self.txtSummary, 1, wx.EXPAND)
self.lblGenres = wx.StaticText(self.fieldWindow, label='Genres', size=wx.Size(labelWidth,-1))
self.lstGenres = FieldDataList.FieldDataList(self.fieldWindow, size=wx.Size(-1,100))
gridSizer.Add(self.lblGenres, 1, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
gridSizer.Add(self.lstGenres, 1, wx.EXPAND)
self.lblActors = wx.StaticText(self.fieldWindow, label='Actors', size=wx.Size(labelWidth,-1))
self.lstActors = FieldDataList.FieldDataList(self.fieldWindow, size=wx.Size(-1,100))
gridSizer.Add(self.lblActors, 1, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
gridSizer.Add(self.lstActors, 1, wx.EXPAND)
self.lblDirectors = wx.StaticText(self.fieldWindow, label='Directors', size=wx.Size(labelWidth,-1))
self.lstDirectors = FieldDataList.FieldDataList(self.fieldWindow)
gridSizer.Add(self.lblDirectors, 1, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
gridSizer.Add(self.lstDirectors, 1, wx.EXPAND)
self.lblFiles = wx.StaticText(self.fieldWindow, label='Files', size=wx.Size(labelWidth,-1))
self.lstFiles = FieldDataList.FieldDataList(self.fieldWindow)
gridSizer.Add(self.lblFiles, 1, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
gridSizer.Add(self.lstFiles, 1, wx.EXPAND)
gridSizer.Add(wx.StaticText(self.fieldWindow, label=''))
self.fieldWindow.SetSizer(gridSizer)
self.fieldWindow.Layout()
self.btnSizer = self.CreateButtonSizer(wx.CANCEL)
self.btnSave = wx.Button(self, label="Save")
self.btnSave.Bind(wx.EVT_BUTTON, self.OnSave)
self.btnSizer.Add(self.btnSave)
self.mainTb = self._create_main_tb(self)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(self.mainTb, 0, wx.ALL | wx.ALIGN_LEFT | wx.EXPAND, 0 )
vbox.Add(self.fieldWindow, 1, wx.EXPAND)
vbox.Add(wx.StaticText(self,label=""))
vbox.Add(self.btnSizer, 0, wx.ALIGN_CENTER)
self.SetSizer(vbox)
self.Layout()
def _create_main_tb(self, parent):
tb = wx.ToolBar(parent, style=wx.TB_TEXT|wx.TB_NODIVIDER|wx.TB_HORIZONTAL|wx.TB_FLAT)
tb.SetToolBitmapSize((21, 21))
self.tb_search = wx.NewId()
tb.DoAddTool(
bitmap=wx.Bitmap(os.path.join(resDir,'web.png'), wx.BITMAP_TYPE_PNG),
#bitmap=wx.ArtProvider.GetBitmap(wx.ART_FIND),
bmpDisabled=wx.NullBitmap,
id=self.tb_search,
kind=wx.ITEM_NORMAL,
label='',
longHelp='',
shortHelp='Get Metadata from IMDB')
self.Bind(wx.EVT_TOOL, self.OnGetMetadata,
id=self.tb_search)
self.statusText = wx.StaticText(tb, label="")
tb.AddControl(self.statusText)
tb.Realize()
return tb
def SetData(self, data):
self.txtTitle.SetValue(data['title'])
self.txtSort.SetValue(data['sort'])
self.txtImage.SetValue(data['image'])
self.txtReleased.SetValue(data['released'])
self.txtRuntime.SetValue(data['runtime'])
self.txtRated.SetValue(data['rated'])
self.txtSummary.SetValue(data['summary'])
self.lstGenres.DeleteAllItems()
self.lstGenres.AddValues(data['genres'])
self.lstActors.DeleteAllItems()
self.lstActors.AddValues(data['actors'])
self.lstDirectors.DeleteAllItems()
self.lstDirectors.AddValues(data['directors'])
self.lstFiles.DeleteAllItems()
self.lstFiles.AddValues(data['files'])
def GetData(self):
data = {}
data['title'] = self.txtTitle.GetValue()
data['sort'] = self.txtSort.GetValue()
data['image'] = self.txtImage.GetValue()
data['released'] = self.txtReleased.GetValue()
data['runtime'] = self.txtRuntime.GetValue()
data['rated'] = self.txtRated.GetValue()
data['summary'] = self.txtSummary.GetValue()
data['genres'] = self.lstGenres.GetValues()
data['actors'] = self.lstActors.GetValues()
data['directors'] = self.lstDirectors.GetValues()
data['files'] = self.lstFiles.GetValues()
return data
def OnSave(self, event):
if self.txtTitle.GetValue() == '':
msg = wx.MessageDialog(self,
'Movie metadata cannot be saved without a Title. Cannot continue',
'Movie Title Missing', wx.OK|wx.ICON_INFORMATION)
msg.ShowModal()
msg.Destroy()
else:
self.EndModal(wx.ID_OK)
def OnGetMetadata(self, event):
title = self.txtTitle.GetValue()
year = self.txtReleased.GetValue()
if title=='':
dlg = wx.MessageDialog(self,
"Enter the title of the movie. Optionally enter the year(approximate).",
"Get metadata from IMDB",
wx.OK|wx.ICON_INFORMATION)
result = dlg.ShowModal()
dlg.Destroy()
return
self.mainTb.EnableTool(self.tb_search, False)
self.statusText.SetLabel("Getting metadata from IMDB...")
thread.start_new_thread(self._get_metadata, (title, year, self.postersPath, self.catchPath))
def _get_metadata(self, title, year, postersPath, catchPath):
try:
metadata = ImdbAPI.GetMetadata(title, year, postersPath, catchPath)
wx.CallAfter(self._done_get_metadata, metadata)
except wx._core.PyDeadObjectError, e:
print "dialog closed before thread could complete"
def _done_get_metadata(self, metadata):
self.statusText.SetLabel("")
if metadata != None:
print "Success"
self.txtTitle.SetValue(metadata['title'])
self.txtImage.SetValue(metadata['image'])
self.txtReleased.SetValue(metadata['released'])
self.txtRuntime.SetValue(metadata['runtime'])
self.txtRated.SetValue(metadata['rated'])
self.txtSummary.SetValue(metadata['summary'])
print "Genres"
self.lstGenres.DeleteAllItems()
self.lstGenres.AddValuesSimple(metadata['genres'])
print "Actors"
self.lstActors.DeleteAllItems()
self.lstActors.AddValuesSimple(metadata['actors'])
print "Directors"
self.lstDirectors.DeleteAllItems()
self.lstDirectors.AddValuesSimple(metadata['directors'])
else:
dlg = wx.MessageDialog(self,
"No results were found for the given title and year. (this may be due to a network error)",
"Get metadata from IMDB",
wx.OK|wx.ICON_INFORMATION)
result = dlg.ShowModal()
dlg.Destroy()
self.mainTb.EnableTool(self.tb_search, True)
| gpl-3.0 | -6,006,860,567,625,273,000 | 33.546429 | 101 | 0.721493 | false |
IsCoolEntertainment/debpkg_libcloud | libcloud/compute/drivers/openstack.py | 1 | 57541 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OpenStack driver
"""
try:
import simplejson as json
except ImportError:
import json
import warnings
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import b
from libcloud.utils.py3 import next
from libcloud.utils.py3 import urlparse
import base64
from xml.etree import ElementTree as ET
from libcloud.common.openstack import OpenStackBaseConnection
from libcloud.common.openstack import OpenStackDriverMixin
from libcloud.common.types import MalformedResponseError
from libcloud.compute.types import NodeState, Provider
from libcloud.compute.base import NodeSize, NodeImage
from libcloud.compute.base import NodeDriver, Node, NodeLocation
from libcloud.pricing import get_size_price
from libcloud.common.base import Response
from libcloud.utils.xml import findall
__all__ = [
'OpenStack_1_0_Response',
'OpenStack_1_0_Connection',
'OpenStack_1_0_NodeDriver',
'OpenStack_1_0_SharedIpGroup',
'OpenStack_1_0_NodeIpAddresses',
'OpenStack_1_1_Response',
'OpenStack_1_1_Connection',
'OpenStack_1_1_NodeDriver',
'OpenStackNodeDriver'
]
ATOM_NAMESPACE = "http://www.w3.org/2005/Atom"
DEFAULT_API_VERSION = '1.1'
class OpenStackResponse(Response):
node_driver = None
def success(self):
i = int(self.status)
return i >= 200 and i <= 299
def has_content_type(self, content_type):
content_type_value = self.headers.get('content-type') or ''
content_type_value = content_type_value.lower()
return content_type_value.find(content_type.lower()) > -1
def parse_body(self):
if self.status == httplib.NO_CONTENT or not self.body:
return None
if self.has_content_type('application/xml'):
try:
return ET.XML(self.body)
except:
raise MalformedResponseError(
'Failed to parse XML',
body=self.body,
driver=self.node_driver)
elif self.has_content_type('application/json'):
try:
return json.loads(self.body)
except:
raise MalformedResponseError(
'Failed to parse JSON',
body=self.body,
driver=self.node_driver)
else:
return self.body
def parse_error(self):
text = None
body = self.parse_body()
if self.has_content_type('application/xml'):
text = "; ".join([err.text or '' for err in body.getiterator()
if err.text])
elif self.has_content_type('application/json'):
values = body.values()
if len(values) > 0 and 'message' in values[0]:
text = ';'.join([fault_data['message'] for fault_data
in values])
else:
text = body
else:
# while we hope a response is always one of xml or json, we have
# seen html or text in the past, its not clear we can really do
# something to make it more readable here, so we will just pass
# it along as the whole response body in the text variable.
text = body
return '%s %s %s' % (self.status, self.error, text)
class OpenStackComputeConnection(OpenStackBaseConnection):
# default config for http://devstack.org/
service_type = 'compute'
service_name = 'nova'
service_region = 'RegionOne'
def request(self, action, params=None, data='', headers=None,
method='GET'):
if not headers:
headers = {}
if not params:
params = {}
if method in ("POST", "PUT"):
headers = {'Content-Type': self.default_content_type}
if method == "GET":
self._add_cache_busting_to_params(params)
return super(OpenStackComputeConnection, self).request(
action=action,
params=params, data=data,
method=method, headers=headers)
class OpenStackNodeDriver(NodeDriver, OpenStackDriverMixin):
"""
Base OpenStack node driver. Should not be used directly.
"""
api_name = 'openstack'
name = 'OpenStack'
website = 'http://openstack.org/'
NODE_STATE_MAP = {
'BUILD': NodeState.PENDING,
'REBUILD': NodeState.PENDING,
'ACTIVE': NodeState.RUNNING,
'SUSPENDED': NodeState.TERMINATED,
'DELETED': NodeState.TERMINATED,
'QUEUE_RESIZE': NodeState.PENDING,
'PREP_RESIZE': NodeState.PENDING,
'VERIFY_RESIZE': NodeState.RUNNING,
'PASSWORD': NodeState.PENDING,
'RESCUE': NodeState.PENDING,
'REBOOT': NodeState.REBOOTING,
'HARD_REBOOT': NodeState.REBOOTING,
'SHARE_IP': NodeState.PENDING,
'SHARE_IP_NO_CONFIG': NodeState.PENDING,
'DELETE_IP': NodeState.PENDING,
'UNKNOWN': NodeState.UNKNOWN
}
def __new__(cls, key, secret=None, secure=True, host=None, port=None,
api_version=DEFAULT_API_VERSION, **kwargs):
if cls is OpenStackNodeDriver:
if api_version == '1.0':
cls = OpenStack_1_0_NodeDriver
elif api_version == '1.1':
cls = OpenStack_1_1_NodeDriver
else:
raise NotImplementedError(
"No OpenStackNodeDriver found for API version %s" %
(api_version))
return super(OpenStackNodeDriver, cls).__new__(cls)
def __init__(self, *args, **kwargs):
OpenStackDriverMixin.__init__(self, **kwargs)
super(OpenStackNodeDriver, self).__init__(*args, **kwargs)
def destroy_node(self, node):
uri = '/servers/%s' % (node.id)
resp = self.connection.request(uri, method='DELETE')
# The OpenStack and Rackspace documentation both say this API will
# return a 204, but in-fact, everyone everywhere agrees it actually
# returns a 202, so we are going to accept either, and someday,
# someone will fix either the implementation or the documentation to
# agree.
return resp.status in (httplib.NO_CONTENT, httplib.ACCEPTED)
def reboot_node(self, node):
return self._reboot_node(node, reboot_type='HARD')
def list_nodes(self):
return self._to_nodes(
self.connection.request('/servers/detail').object)
def list_images(self, location=None, ex_only_active=True):
"""
@inherits: L{NodeDriver.list_images}
@param ex_only_active: True if list only active
@type ex_only_active: C{bool}
"""
return self._to_images(
self.connection.request('/images/detail').object, ex_only_active)
def list_sizes(self, location=None):
return self._to_sizes(
self.connection.request('/flavors/detail').object)
def list_locations(self):
return [NodeLocation(0, '', '', self)]
def _ex_connection_class_kwargs(self):
return self.openstack_connection_kwargs()
def ex_get_node_details(self, node_id):
"""
Lists details of the specified server.
@param node_id: ID of the node which should be used
@type node_id: C{str}
@rtype: L{Node}
"""
# @TODO: Remove this if in 0.6
if isinstance(node_id, Node):
node_id = node_id.id
uri = '/servers/%s' % (node_id)
resp = self.connection.request(uri, method='GET')
if resp.status == httplib.NOT_FOUND:
return None
return self._to_node_from_obj(resp.object)
def ex_soft_reboot_node(self, node):
"""
Soft reboots the specified server
@param node: node
@type node: L{Node}
@rtype: C{bool}
"""
return self._reboot_node(node, reboot_type='SOFT')
def ex_hard_reboot_node(self, node):
"""
Hard reboots the specified server
@param node: node
@type node: L{Node}
@rtype: C{bool}
"""
return self._reboot_node(node, reboot_type='HARD')
class OpenStackNodeSize(NodeSize):
"""
NodeSize class for the OpenStack.org driver.
Following the example of OpenNebula.org driver
and following guidelines:
https://issues.apache.org/jira/browse/LIBCLOUD-119
"""
def __init__(self, id, name, ram, disk, bandwidth, price, driver,
vcpus=None):
super(OpenStackNodeSize, self).__init__(id=id, name=name, ram=ram,
disk=disk,
bandwidth=bandwidth,
price=price, driver=driver)
self.vcpus = vcpus
def __repr__(self):
return (('<OpenStackNodeSize: id=%s, name=%s, ram=%s, disk=%s, '
'bandwidth=%s, price=%s, driver=%s, vcpus=%s, ...>')
% (self.id, self.name, self.ram, self.disk, self.bandwidth,
self.price, self.driver.name, self.vcpus))
class OpenStack_1_0_Response(OpenStackResponse):
def __init__(self, *args, **kwargs):
# done because of a circular reference from
# NodeDriver -> Connection -> Response
self.node_driver = OpenStack_1_0_NodeDriver
super(OpenStack_1_0_Response, self).__init__(*args, **kwargs)
class OpenStack_1_0_Connection(OpenStackComputeConnection):
responseCls = OpenStack_1_0_Response
default_content_type = 'application/xml; charset=UTF-8'
accept_format = 'application/xml'
XML_NAMESPACE = 'http://docs.rackspacecloud.com/servers/api/v1.0'
class OpenStack_1_0_NodeDriver(OpenStackNodeDriver):
"""
OpenStack node driver.
Extra node attributes:
- password: root password, available after create.
- hostId: represents the host your cloud server runs on
- imageId: id of image
- flavorId: id of flavor
"""
connectionCls = OpenStack_1_0_Connection
type = Provider.OPENSTACK
features = {"create_node": ["generates_password"]}
def __init__(self, *args, **kwargs):
self._ex_force_api_version = str(kwargs.pop('ex_force_api_version',
None))
self.XML_NAMESPACE = self.connectionCls.XML_NAMESPACE
super(OpenStack_1_0_NodeDriver, self).__init__(*args, **kwargs)
def _to_images(self, object, ex_only_active):
images = []
for image in findall(object, 'image', self.XML_NAMESPACE):
if ex_only_active and image.get('status') != 'ACTIVE':
continue
images.append(self._to_image(image))
return images
def _to_image(self, element):
return NodeImage(id=element.get('id'),
name=element.get('name'),
driver=self.connection.driver,
extra={'updated': element.get('updated'),
'created': element.get('created'),
'status': element.get('status'),
'serverId': element.get('serverId'),
'progress': element.get('progress'),
'minDisk': element.get('minDisk'),
'minRam': element.get('minRam')
}
)
def _change_password_or_name(self, node, name=None, password=None):
uri = '/servers/%s' % (node.id)
if not name:
name = node.name
body = {'xmlns': self.XML_NAMESPACE,
'name': name}
if password is not None:
body['adminPass'] = password
server_elm = ET.Element('server', body)
resp = self.connection.request(
uri, method='PUT', data=ET.tostring(server_elm))
if resp.status == httplib.NO_CONTENT and password is not None:
node.extra['password'] = password
return resp.status == httplib.NO_CONTENT
def create_node(self, **kwargs):
"""
Create a new node
@inherits: L{NodeDriver.create_node}
@keyword ex_metadata: Key/Value metadata to associate with a node
@type ex_metadata: C{dict}
@keyword ex_files: File Path => File contents to create on
the node
@type ex_files: C{dict}
@keyword ex_shared_ip_group_id: The server is launched into
that shared IP group
@type ex_shared_ip_group_id: C{str}
"""
name = kwargs['name']
image = kwargs['image']
size = kwargs['size']
attributes = {'xmlns': self.XML_NAMESPACE,
'name': name,
'imageId': str(image.id),
'flavorId': str(size.id)}
if 'ex_shared_ip_group' in kwargs:
# Deprecate this. Be explicit and call the variable
# ex_shared_ip_group_id since user needs to pass in the id, not the
# name.
warnings.warn('ex_shared_ip_group argument is deprecated.'
' Please use ex_shared_ip_group_id')
if 'ex_shared_ip_group_id' in kwargs:
shared_ip_group_id = kwargs['ex_shared_ip_group_id']
attributes['sharedIpGroupId'] = shared_ip_group_id
server_elm = ET.Element('server', attributes)
metadata_elm = self._metadata_to_xml(kwargs.get("ex_metadata", {}))
if metadata_elm:
server_elm.append(metadata_elm)
files_elm = self._files_to_xml(kwargs.get("ex_files", {}))
if files_elm:
server_elm.append(files_elm)
resp = self.connection.request("/servers",
method='POST',
data=ET.tostring(server_elm))
return self._to_node(resp.object)
def ex_set_password(self, node, password):
"""
Sets the Node's root password.
This will reboot the instance to complete the operation.
L{Node.extra['password']} will be set to the new value if the
operation was successful.
@param node: node to set password
@type node: L{Node}
@param password: new password.
@type password: C{str}
@rtype: C{bool}
"""
return self._change_password_or_name(node, password=password)
def ex_set_server_name(self, node, name):
"""
Sets the Node's name.
This will reboot the instance to complete the operation.
@param node: node to set name
@type node: L{Node}
@param name: new name
@type name: C{str}
@rtype: C{bool}
"""
return self._change_password_or_name(node, name=name)
def ex_resize(self, node, size):
"""
Change an existing server flavor / scale the server up or down.
@param node: node to resize.
@type node: L{Node}
@param size: new size.
@type size: L{NodeSize}
@rtype: C{bool}
"""
elm = ET.Element(
'resize',
{'xmlns': self.XML_NAMESPACE,
'flavorId': str(size.id)}
)
resp = self.connection.request("/servers/%s/action" % (node.id),
method='POST',
data=ET.tostring(elm))
return resp.status == httplib.ACCEPTED
def ex_confirm_resize(self, node):
"""
Confirm a resize request which is currently in progress. If a resize
request is not explicitly confirmed or reverted it's automatically
confirmed after 24 hours.
For more info refer to the API documentation: http://goo.gl/zjFI1
@param node: node for which the resize request will be confirmed.
@type node: L{Node}
@rtype: C{bool}
"""
elm = ET.Element(
'confirmResize',
{'xmlns': self.XML_NAMESPACE},
)
resp = self.connection.request("/servers/%s/action" % (node.id),
method='POST',
data=ET.tostring(elm))
return resp.status == httplib.NO_CONTENT
def ex_revert_resize(self, node):
"""
Revert a resize request which is currently in progress.
All resizes are automatically confirmed after 24 hours if they have
not already been confirmed explicitly or reverted.
For more info refer to the API documentation: http://goo.gl/AizBu
@param node: node for which the resize request will be reverted.
@type node: L{Node}
@rtype: C{bool}
"""
elm = ET.Element(
'revertResize',
{'xmlns': self.XML_NAMESPACE}
)
resp = self.connection.request("/servers/%s/action" % (node.id),
method='POST',
data=ET.tostring(elm))
return resp.status == httplib.NO_CONTENT
def ex_rebuild(self, node_id, image_id):
"""
Rebuilds the specified server.
@param node_id: ID of the node which should be used
@type node_id: C{str}
@param image_id: ID of the image which should be used
@type image_id: C{str}
@rtype: C{bool}
"""
# @TODO: Remove those ifs in 0.6
if isinstance(node_id, Node):
node_id = node_id.id
if isinstance(image_id, NodeImage):
image_id = image_id.id
elm = ET.Element(
'rebuild',
{'xmlns': self.XML_NAMESPACE,
'imageId': image_id}
)
resp = self.connection.request("/servers/%s/action" % node_id,
method='POST',
data=ET.tostring(elm))
return resp.status == httplib.ACCEPTED
def ex_create_ip_group(self, group_name, node_id=None):
"""
Creates a shared IP group.
@param group_name: group name which should be used
@type group_name: C{str}
@param node_id: ID of the node which should be used
@type node_id: C{str}
@rtype: C{bool}
"""
# @TODO: Remove this if in 0.6
if isinstance(node_id, Node):
node_id = node_id.id
group_elm = ET.Element(
'sharedIpGroup',
{'xmlns': self.XML_NAMESPACE,
'name': group_name}
)
if node_id:
ET.SubElement(
group_elm,
'server',
{'id': node_id}
)
resp = self.connection.request('/shared_ip_groups',
method='POST',
data=ET.tostring(group_elm))
return self._to_shared_ip_group(resp.object)
def ex_list_ip_groups(self, details=False):
"""
Lists IDs and names for shared IP groups.
If details lists all details for shared IP groups.
@param details: True if details is required
@type details: C{bool}
@rtype: C{list} of L{OpenStack_1_0_SharedIpGroup}
"""
uri = '/shared_ip_groups/detail' if details else '/shared_ip_groups'
resp = self.connection.request(uri,
method='GET')
groups = findall(resp.object, 'sharedIpGroup',
self.XML_NAMESPACE)
return [self._to_shared_ip_group(el) for el in groups]
def ex_delete_ip_group(self, group_id):
"""
Deletes the specified shared IP group.
@param group_id: group id which should be used
@type group_id: C{str}
@rtype: C{bool}
"""
uri = '/shared_ip_groups/%s' % group_id
resp = self.connection.request(uri, method='DELETE')
return resp.status == httplib.NO_CONTENT
def ex_share_ip(self, group_id, node_id, ip, configure_node=True):
"""
Shares an IP address to the specified server.
@param group_id: group id which should be used
@type group_id: C{str}
@param node_id: ID of the node which should be used
@type node_id: C{str}
@param ip: ip which should be used
@type ip: C{str}
@param configure_node: configure node
@type configure_node: C{bool}
@rtype: C{bool}
"""
# @TODO: Remove this if in 0.6
if isinstance(node_id, Node):
node_id = node_id.id
if configure_node:
str_configure = 'true'
else:
str_configure = 'false'
elm = ET.Element(
'shareIp',
{'xmlns': self.XML_NAMESPACE,
'sharedIpGroupId': group_id,
'configureServer': str_configure},
)
uri = '/servers/%s/ips/public/%s' % (node_id, ip)
resp = self.connection.request(uri,
method='PUT',
data=ET.tostring(elm))
return resp.status == httplib.ACCEPTED
def ex_unshare_ip(self, node_id, ip):
"""
Removes a shared IP address from the specified server.
@param node_id: ID of the node which should be used
@type node_id: C{str}
@param ip: ip which should be used
@type ip: C{str}
@rtype: C{bool}
"""
# @TODO: Remove this if in 0.6
if isinstance(node_id, Node):
node_id = node_id.id
uri = '/servers/%s/ips/public/%s' % (node_id, ip)
resp = self.connection.request(uri,
method='DELETE')
return resp.status == httplib.ACCEPTED
def ex_list_ip_addresses(self, node_id):
"""
List all server addresses.
@param node_id: ID of the node which should be used
@type node_id: C{str}
@rtype: C{bool}
"""
# @TODO: Remove this if in 0.6
if isinstance(node_id, Node):
node_id = node_id.id
uri = '/servers/%s/ips' % node_id
resp = self.connection.request(uri,
method='GET')
return self._to_ip_addresses(resp.object)
def _metadata_to_xml(self, metadata):
if len(metadata) == 0:
return None
metadata_elm = ET.Element('metadata')
for k, v in list(metadata.items()):
meta_elm = ET.SubElement(metadata_elm, 'meta', {'key': str(k)})
meta_elm.text = str(v)
return metadata_elm
def _files_to_xml(self, files):
if len(files) == 0:
return None
personality_elm = ET.Element('personality')
for k, v in list(files.items()):
file_elm = ET.SubElement(personality_elm,
'file',
{'path': str(k)})
file_elm.text = base64.b64encode(b(v))
return personality_elm
def _reboot_node(self, node, reboot_type='SOFT'):
resp = self._node_action(node, ['reboot', ('type', reboot_type)])
return resp.status == httplib.ACCEPTED
def _node_action(self, node, body):
if isinstance(body, list):
attr = ' '.join(['%s="%s"' % (item[0], item[1])
for item in body[1:]])
body = '<%s xmlns="%s" %s/>' % (body[0], self.XML_NAMESPACE, attr)
uri = '/servers/%s/action' % (node.id)
resp = self.connection.request(uri, method='POST', data=body)
return resp
def _to_nodes(self, object):
node_elements = findall(object, 'server', self.XML_NAMESPACE)
return [self._to_node(el) for el in node_elements]
def _to_node_from_obj(self, obj):
return self._to_node(findall(obj, 'server', self.XML_NAMESPACE)[0])
def _to_node(self, el):
def get_ips(el):
return [ip.get('addr') for ip in el]
def get_meta_dict(el):
d = {}
for meta in el:
d[meta.get('key')] = meta.text
return d
public_ip = get_ips(findall(el, 'addresses/public/ip',
self.XML_NAMESPACE))
private_ip = get_ips(findall(el, 'addresses/private/ip',
self.XML_NAMESPACE))
metadata = get_meta_dict(findall(el, 'metadata/meta',
self.XML_NAMESPACE))
n = Node(id=el.get('id'),
name=el.get('name'),
state=self.NODE_STATE_MAP.get(
el.get('status'), NodeState.UNKNOWN),
public_ips=public_ip,
private_ips=private_ip,
driver=self.connection.driver,
extra={
'password': el.get('adminPass'),
'hostId': el.get('hostId'),
'imageId': el.get('imageId'),
'flavorId': el.get('flavorId'),
'uri': "https://%s%s/servers/%s" % (
self.connection.host,
self.connection.request_path, el.get('id')),
'metadata': metadata,
})
return n
def _to_sizes(self, object):
elements = findall(object, 'flavor', self.XML_NAMESPACE)
return [self._to_size(el) for el in elements]
def _to_size(self, el):
vcpus = int(el.get('vcpus')) if el.get('vcpus', None) else None
return OpenStackNodeSize(id=el.get('id'),
name=el.get('name'),
ram=int(el.get('ram')),
disk=int(el.get('disk')),
# XXX: needs hardcode
vcpus=vcpus,
bandwidth=None,
# Hardcoded
price=self._get_size_price(el.get('id')),
driver=self.connection.driver)
def ex_limits(self):
"""
Extra call to get account's limits, such as
rates (for example amount of POST requests per day)
and absolute limits like total amount of available
RAM to be used by servers.
@return: dict with keys 'rate' and 'absolute'
@rtype: C{dict}
"""
def _to_rate(el):
rate = {}
for item in list(el.items()):
rate[item[0]] = item[1]
return rate
def _to_absolute(el):
return {el.get('name'): el.get('value')}
limits = self.connection.request("/limits").object
rate = [_to_rate(el) for el in findall(limits, 'rate/limit',
self.XML_NAMESPACE)]
absolute = {}
for item in findall(limits, 'absolute/limit',
self.XML_NAMESPACE):
absolute.update(_to_absolute(item))
return {"rate": rate, "absolute": absolute}
def ex_save_image(self, node, name):
"""Create an image for node.
@param node: node to use as a base for image
@type node: L{Node}
@param name: name for new image
@type name: C{str}
@rtype: L{NodeImage}
"""
image_elm = ET.Element(
'image',
{'xmlns': self.XML_NAMESPACE,
'name': name,
'serverId': node.id}
)
return self._to_image(
self.connection.request("/images", method="POST",
data=ET.tostring(image_elm)).object)
def ex_delete_image(self, image):
"""Delete an image for node.
@param image: the image to be deleted
@type image: L{NodeImage}
@rtype: C{bool}
"""
uri = '/images/%s' % image.id
resp = self.connection.request(uri, method='DELETE')
return resp.status == httplib.NO_CONTENT
def _to_shared_ip_group(self, el):
servers_el = findall(el, 'servers', self.XML_NAMESPACE)
if servers_el:
servers = [s.get('id')
for s in findall(servers_el[0], 'server',
self.XML_NAMESPACE)]
else:
servers = None
return OpenStack_1_0_SharedIpGroup(id=el.get('id'),
name=el.get('name'),
servers=servers)
def _to_ip_addresses(self, el):
public_ips = [ip.get('addr') for ip in findall(
findall(el, 'public', self.XML_NAMESPACE)[0],
'ip', self.XML_NAMESPACE)]
private_ips = [ip.get('addr') for ip in findall(
findall(el, 'private', self.XML_NAMESPACE)[0],
'ip', self.XML_NAMESPACE)]
return OpenStack_1_0_NodeIpAddresses(public_ips, private_ips)
def _get_size_price(self, size_id):
try:
return get_size_price(driver_type='compute',
driver_name=self.api_name,
size_id=size_id)
except KeyError:
return 0.0
class OpenStack_1_0_SharedIpGroup(object):
"""
Shared IP group info.
"""
def __init__(self, id, name, servers=None):
self.id = str(id)
self.name = name
self.servers = servers
class OpenStack_1_0_NodeIpAddresses(object):
"""
List of public and private IP addresses of a Node.
"""
def __init__(self, public_addresses, private_addresses):
self.public_addresses = public_addresses
self.private_addresses = private_addresses
class OpenStack_1_1_Response(OpenStackResponse):
def __init__(self, *args, **kwargs):
# done because of a circular reference from
# NodeDriver -> Connection -> Response
self.node_driver = OpenStack_1_1_NodeDriver
super(OpenStack_1_1_Response, self).__init__(*args, **kwargs)
class OpenStackNetwork(object):
"""
A Virtual Network.
"""
def __init__(self, id, name, cidr, driver, extra=None):
self.id = str(id)
self.name = name
self.cidr = cidr
self.driver = driver
self.extra = extra or {}
def __repr__(self):
return '<OpenStackNetwork id="%s" name="%s" cidr="%s">' % (self.id,
self.name, self.cidr,)
class OpenStackSecurityGroup(object):
"""
A Security Group.
"""
def __init__(self, id, tenant_id, name, description, driver, rules=None,
extra=None):
"""
Constructor.
@keyword id: Group id.
@type id: C{str}
@keyword tenant_id: Owner of the security group.
@type tenant_id: C{str}
@keyword name: Human-readable name for the security group. Might
not be unique.
@type name: C{str}
@keyword description: Human-readable description of a security
group.
@type description: C{str}
@keyword rules: Rules associated with this group.
@type description: C{list} of L{OpenStackSecurityGroupRule}
@keyword extra: Extra attributes associated with this group.
@type extra: C{dict}
"""
self.id = id
self.tenant_id = tenant_id
self.name = name
self.description = description
self.driver = driver
self.rules = rules or []
self.extra = extra or {}
def __repr__(self):
return ('<OpenStackSecurityGroup id=%s tenant_id=%s name=%s \
description=%s>' % (self.id, self.tenant_id, self.name,
self.description))
class OpenStackSecurityGroupRule(object):
"""
A Rule of a Security Group.
"""
def __init__(self, id, parent_group_id, ip_protocol, from_port, to_port,
driver, ip_range=None, group=None, tenant_id=None,
extra=None):
"""
Constructor.
@keyword id: Rule id.
@type id: C{str}
@keyword parent_group_id: ID of the parent security group.
@type parent_group_id: C{str}
@keyword ip_protocol: IP Protocol (icmp, tcp, udp, etc).
@type ip_protocol: C{str}
@keyword from_port: Port at start of range.
@type from_port: C{int}
@keyword to_port: Port at end of range.
@type to_port: C{int}
@keyword ip_range: CIDR for address range.
@type ip_range: C{str}
@keyword group: Name of a source security group to apply to rule.
@type group: C{str}
@keyword tenant_id: Owner of the security group.
@type tenant_id: C{str}
@keyword extra: Extra attributes associated with this rule.
@type extra: C{dict}
"""
self.id = id
self.parent_group_id = parent_group_id
self.ip_protocol = ip_protocol
self.from_port = from_port
self.to_port = to_port
self.driver = driver
self.ip_range = ''
self.group = {}
if group is None:
self.ip_range = ip_range
else:
self.group = {'name': group, 'tenant_id': tenant_id}
self.tenant_id = tenant_id
self.extra = extra or {}
def __repr__(self):
return ('<OpenStackSecurityGroupRule id=%s parent_group_id=%s \
ip_protocol=%s from_port=%s to_port=%s>' % (self.id,
self.parent_group_id, self.ip_protocol, self.from_port,
self.to_port))
class OpenStack_1_1_Connection(OpenStackComputeConnection):
responseCls = OpenStack_1_1_Response
accept_format = 'application/json'
default_content_type = 'application/json; charset=UTF-8'
def encode_data(self, data):
return json.dumps(data)
class OpenStack_1_1_NodeDriver(OpenStackNodeDriver):
"""
OpenStack node driver.
"""
connectionCls = OpenStack_1_1_Connection
type = Provider.OPENSTACK
features = {"create_node": ["generates_password"]}
def __init__(self, *args, **kwargs):
self._ex_force_api_version = str(kwargs.pop('ex_force_api_version',
None))
super(OpenStack_1_1_NodeDriver, self).__init__(*args, **kwargs)
def create_node(self, **kwargs):
"""Create a new node
@inherits: L{NodeDriver.create_node}
@keyword ex_metadata: Key/Value metadata to associate with a node
@type ex_metadata: C{dict}
@keyword ex_files: File Path => File contents to create on
the no de
@type ex_files: C{dict}
@keyword ex_keyname: Name of existing public key to inject into
instance
@type ex_keyname: C{str}
@keyword ex_userdata: String containing user data
see
https://help.ubuntu.com/community/CloudInit
@type ex_userdata: C{str}
@keyword networks: The server is launched into a set of Networks.
@type networks: L{OpenStackNetwork}
@keyword ex_security_groups: List of security groups to assign to
the node
@type ex_security_groups: C{list} of L{OpenStackSecurityGroup}
"""
server_params = self._create_args_to_params(None, **kwargs)
resp = self.connection.request("/servers",
method='POST',
data={'server': server_params})
create_response = resp.object['server']
server_resp = self.connection.request(
'/servers/%s' % create_response['id'])
server_object = server_resp.object['server']
# adminPass is not always present
# http://docs.openstack.org/essex/openstack-compute/admin/
# content/configuring-compute-API.html#d6e1833
server_object['adminPass'] = create_response.get('adminPass', None)
return self._to_node(server_object)
def _to_images(self, obj, ex_only_active):
images = []
for image in obj['images']:
if ex_only_active and image.get('status') != 'ACTIVE':
continue
images.append(self._to_image(image))
return images
def _to_image(self, api_image):
server = api_image.get('server', {})
return NodeImage(
id=api_image['id'],
name=api_image['name'],
driver=self,
extra=dict(
updated=api_image['updated'],
created=api_image['created'],
status=api_image['status'],
progress=api_image.get('progress'),
metadata=api_image.get('metadata'),
serverId=server.get('id'),
minDisk=api_image.get('minDisk'),
minRam=api_image.get('minRam'),
)
)
def _to_nodes(self, obj):
servers = obj['servers']
return [self._to_node(server) for server in servers]
def _to_sizes(self, obj):
flavors = obj['flavors']
return [self._to_size(flavor) for flavor in flavors]
def _create_args_to_params(self, node, **kwargs):
server_params = {
'name': kwargs.get('name'),
'metadata': kwargs.get('ex_metadata', {}),
'personality': self._files_to_personality(kwargs.get("ex_files",
{}))
}
if 'ex_keyname' in kwargs:
server_params['key_name'] = kwargs['ex_keyname']
if 'ex_userdata' in kwargs:
server_params['user_data'] = base64.b64encode(
b(kwargs['ex_userdata'])).decode('ascii')
if 'networks' in kwargs:
networks = kwargs['networks']
networks = [{'uuid': network.id} for network in networks]
server_params['networks'] = networks
if 'ex_security_groups' in kwargs:
server_params['security_groups'] = []
for security_group in kwargs['ex_security_groups']:
name = security_group.name
server_params['security_groups'].append({'name': name})
if 'name' in kwargs:
server_params['name'] = kwargs.get('name')
else:
server_params['name'] = node.name
if 'image' in kwargs:
server_params['imageRef'] = kwargs.get('image').id
else:
server_params['imageRef'] = node.extra.get('imageId')
if 'size' in kwargs:
server_params['flavorRef'] = kwargs.get('size').id
else:
server_params['flavorRef'] = node.extra.get('flavorId')
return server_params
def _files_to_personality(self, files):
rv = []
for k, v in list(files.items()):
rv.append({'path': k, 'contents': base64.b64encode(b(v))})
return rv
def _reboot_node(self, node, reboot_type='SOFT'):
resp = self._node_action(node, 'reboot', type=reboot_type)
return resp.status == httplib.ACCEPTED
def ex_set_password(self, node, password):
"""
Changes the administrator password for a specified server.
@param node: Node to rebuild.
@type node: L{Node}
@param password: The administrator password.
@type password: C{str}
@rtype: C{bool}
"""
resp = self._node_action(node, 'changePassword', adminPass=password)
node.extra['password'] = password
return resp.status == httplib.ACCEPTED
def ex_rebuild(self, node, image):
"""
Rebuild a Node.
@param node: Node to rebuild.
@type node: L{Node}
@param image: New image to use.
@type image: L{NodeImage}
@rtype: C{bool}
"""
server_params = self._create_args_to_params(node, image=image)
resp = self._node_action(node, 'rebuild', **server_params)
return resp.status == httplib.ACCEPTED
def ex_resize(self, node, size):
"""
Change a node size.
@param node: Node to resize.
@type node: L{Node}
@type size: L{NodeSize}
@param size: New size to use.
@rtype: C{bool}
"""
server_params = self._create_args_to_params(node, size=size)
resp = self._node_action(node, 'resize', **server_params)
return resp.status == httplib.ACCEPTED
def ex_confirm_resize(self, node):
"""
Confirms a pending resize action.
@param node: Node to resize.
@type node: L{Node}
@rtype: C{bool}
"""
resp = self._node_action(node, 'confirmResize')
return resp.status == httplib.NO_CONTENT
def ex_revert_resize(self, node):
"""
Cancels and reverts a pending resize action.
@param node: Node to resize.
@type node: L{Node}
@rtype: C{bool}
"""
resp = self._node_action(node, 'revertResize')
return resp.status == httplib.ACCEPTED
def ex_save_image(self, node, name, metadata=None):
"""
Creates a new image.
@param node: Node
@type node: L{Node}
@param name: The name for the new image.
@type name: C{str}
@param metadata: Key and value pairs for metadata.
@type metadata: C{dict}
@rtype: L{NodeImage}
"""
optional_params = {}
if metadata:
optional_params['metadata'] = metadata
resp = self._node_action(node, 'createImage', name=name,
**optional_params)
image_id = self._extract_image_id_from_url(resp.headers['location'])
return self.ex_get_image(image_id=image_id)
def ex_set_server_name(self, node, name):
"""
Sets the Node's name.
@param node: Node
@type node: L{Node}
@param name: The name of the server.
@type name: C{str}
@rtype: L{Node}
"""
return self._update_node(node, name=name)
def ex_get_metadata(self, node):
"""
Get a Node's metadata.
@param node: Node
@type node: L{Node}
@return: Key/Value metadata associated with node.
@rtype: C{dict}
"""
return self.connection.request(
'/servers/%s/metadata' % (node.id,),
method='GET',).object['metadata']
def ex_set_metadata(self, node, metadata):
"""
Sets the Node's metadata.
@param node: Node
@type node: L{Node}
@param metadata: Key/Value metadata to associate with a node
@type metadata: C{dict}
@rtype: C{dict}
"""
return self.connection.request(
'/servers/%s/metadata' % (node.id,), method='PUT',
data={'metadata': metadata}
).object['metadata']
def ex_update_node(self, node, **node_updates):
"""
Update the Node's editable attributes. The OpenStack API currently
supports editing name and IPv4/IPv6 access addresses.
The driver currently only supports updating the node name.
@param node: Node
@type node: L{Node}
@keyword name: New name for the server
@type name: C{str}
@rtype: L{Node}
"""
potential_data = self._create_args_to_params(node, **node_updates)
updates = {'name': potential_data['name']}
return self._update_node(node, **updates)
def _to_networks(self, obj):
networks = obj['networks']
return [self._to_network(network) for network in networks]
def _to_network(self, obj):
return OpenStackNetwork(id=obj['id'],
name=obj['label'],
cidr=obj.get('cidr', None),
driver=self)
def ex_list_networks(self):
"""
Get a list of Networks that are available.
@rtype: C{list} of L{OpenStackNetwork}
"""
return self._to_networks(
self.connection.request('/os-networksv2').object)
def ex_create_network(self, name, cidr):
"""
Create a new Network
@param name: Name of network which should be used
@type name: C{str}
@param cidr: cidr of network which should be used
@type cidr: C{str}
@rtype: L{OpenStackNetwork}
"""
return self._to_network(self.connection.request(
'/os-networksv2', method='POST',
data={'network': {'cidr': cidr, 'label': name}}
).object['network'])
def ex_delete_network(self, network):
"""
Get a list of NodeNetorks that are available.
@param network: Network which should be used
@type network: L{OpenStackNetwork}
@rtype: C{bool}
"""
resp = self.connection.request('/os-networksv2/%s' % (network.id),
method='DELETE')
return resp.status == httplib.ACCEPTED
def _to_security_group_rules(self, obj):
return [self._to_security_group_rule(security_group_rule) for
security_group_rule in obj]
def _to_security_group_rule(self, obj):
ip_range = group = tenant_id = None
if obj['group'] == {}:
ip_range = obj['ip_range'].get('cidr', None)
else:
group = obj['group'].get('name', None)
tenant_id = obj['group'].get('tenant_id', None)
return OpenStackSecurityGroupRule(id=obj['id'],
parent_group_id=
obj['parent_group_id'],
ip_protocol=obj['ip_protocol'],
from_port=obj['from_port'],
to_port=obj['to_port'],
driver=self,
ip_range=ip_range,
group=group,
tenant_id=tenant_id)
def _to_security_groups(self, obj):
security_groups = obj['security_groups']
return [self._to_security_group(security_group) for security_group in
security_groups]
def _to_security_group(self, obj):
return OpenStackSecurityGroup(id=obj['id'],
tenant_id=obj['tenant_id'],
name=obj['name'],
description=obj.get('description', ''),
rules=self._to_security_group_rules(
obj.get('rules', [])),
driver=self)
def ex_list_security_groups(self):
"""
Get a list of Security Groups that are available.
@rtype: C{list} of L{OpenStackSecurityGroup}
"""
return self._to_security_groups(
self.connection.request('/os-security-groups').object)
def ex_get_node_security_groups(self, node):
"""
Get Security Groups of the specified server.
@rtype: C{list} of L{OpenStackSecurityGroup}
"""
return self._to_security_groups(
self.connection.request('/servers/%s/os-security-groups' %
(node.id)).object)
def ex_create_security_group(self, name, description):
"""
Create a new Security Group
@param name: Name of the new Security Group
@type name: C{str}
@param description: Description of the new Security Group
@type description: C{str}
@rtype: L{OpenStackSecurityGroup}
"""
return self._to_security_group(self.connection.request(
'/os-security-groups', method='POST',
data={'security_group': {'name': name, 'description': description}}
).object['security_group'])
def ex_delete_security_group(self, security_group):
"""
Delete a Security Group.
@param security_group: Security Group should be deleted
@type security_group: L{OpenStackSecurityGroup}
@rtype: C{bool}
"""
resp = self.connection.request('/os-security-groups/%s' %
(security_group.id),
method='DELETE')
return resp.status == httplib.NO_CONTENT
def ex_create_security_group_rule(self, security_group, ip_protocol,
from_port, to_port, cidr=None,
source_security_group=None):
"""
Create a new Rule in a Security Group
@param security_group: Security Group in which to add the rule
@type security_group: L{OpenStackSecurityGroup}
@param ip_protocol: Protocol to which this rule applies
Examples: tcp, udp, ...
@type ip_protocol: C{str}
@param from_port: First port of the port range
@type from_port: C{int}
@param to_port: Last port of the port range
@type to_port: C{int}
@param cidr: CIDR notation of the source IP range for this rule
@type cidr: C{str}
@param source_security_group: Existing Security Group to use as the
source (instead of CIDR)
@type source_security_group: L{OpenStackSecurityGroup
@rtype: L{OpenStackSecurityGroupRule}
"""
source_security_group_id = None
if type(source_security_group) == OpenStackSecurityGroup:
source_security_group_id = source_security_group.id
return self._to_security_group_rule(self.connection.request(
'/os-security-group-rules', method='POST',
data={'security_group_rule': {
'ip_protocol': ip_protocol,
'from_port': from_port,
'to_port': to_port,
'cidr': cidr,
'group_id': source_security_group_id,
'parent_group_id': security_group.id}}
).object['security_group_rule'])
def ex_delete_security_group_rule(self, rule):
"""
Delete a Rule from a Security Group.
@param rule: Rule should be deleted
@type rule: L{OpenStackSecurityGroupRule}
@rtype: C{bool}
"""
resp = self.connection.request('/os-security-group-rules/%s' %
(rule.id), method='DELETE')
return resp.status == httplib.NO_CONTENT
def ex_get_size(self, size_id):
"""
Get a NodeSize
@param size_id: ID of the size which should be used
@type size_id: C{str}
@rtype: L{NodeSize}
"""
return self._to_size(self.connection.request(
'/flavors/%s' % (size_id,)) .object['flavor'])
def ex_get_image(self, image_id):
"""
Get a NodeImage
@param image_id: ID of the image which should be used
@type image_id: C{str}
@rtype: L{NodeImage}
"""
return self._to_image(self.connection.request(
'/images/%s' % (image_id,)).object['image'])
def ex_delete_image(self, image):
"""
Delete a NodeImage
@param image: image witch should be used
@type image: L{NodeImage}
@rtype: C{bool}
"""
resp = self.connection.request('/images/%s' % (image.id,),
method='DELETE')
return resp.status == httplib.NO_CONTENT
def _node_action(self, node, action, **params):
params = params or None
return self.connection.request('/servers/%s/action' % (node.id,),
method='POST', data={action: params})
def _update_node(self, node, **node_updates):
"""
Updates the editable attributes of a server, which currently include
its name and IPv4/IPv6 access addresses.
"""
return self._to_node(
self.connection.request(
'/servers/%s' % (node.id,), method='PUT',
data={'server': node_updates}
).object['server']
)
def _to_node_from_obj(self, obj):
return self._to_node(obj['server'])
def _to_node(self, api_node):
public_networks_labels = ['public', 'internet']
public_ips, private_ips = [], []
for label, values in api_node['addresses'].items():
ips = [v['addr'] for v in values]
if label in public_networks_labels:
public_ips.extend(ips)
else:
private_ips.extend(ips)
return Node(
id=api_node['id'],
name=api_node['name'],
state=self.NODE_STATE_MAP.get(api_node['status'],
NodeState.UNKNOWN),
public_ips=public_ips,
private_ips=private_ips,
driver=self,
extra=dict(
hostId=api_node['hostId'],
# Docs says "tenantId", but actual is "tenant_id". *sigh*
# Best handle both.
tenantId=api_node.get('tenant_id') or api_node['tenantId'],
imageId=api_node['image']['id'],
flavorId=api_node['flavor']['id'],
uri=next(link['href'] for link in api_node['links'] if
link['rel'] == 'self'),
metadata=api_node['metadata'],
password=api_node.get('adminPass', None),
created=api_node['created'],
updated=api_node['updated'],
key_name=api_node.get('key_name', None),
),
)
def _to_size(self, api_flavor, price=None, bandwidth=None):
# if provider-specific subclasses can get better values for
# price/bandwidth, then can pass them in when they super().
if not price:
price = self._get_size_price(str(api_flavor['id']))
return OpenStackNodeSize(
id=api_flavor['id'],
name=api_flavor['name'],
ram=api_flavor['ram'],
disk=api_flavor['disk'],
vcpus=api_flavor['vcpus'],
bandwidth=bandwidth,
price=price,
driver=self,
)
def _get_size_price(self, size_id):
try:
return get_size_price(
driver_type='compute',
driver_name=self.api_name,
size_id=size_id,
)
except KeyError:
return(0.0)
def _extract_image_id_from_url(self, location_header):
path = urlparse.urlparse(location_header).path
image_id = path.split('/')[-1]
return image_id
def ex_rescue(self, node, password=None):
# Requires Rescue Mode extension
"""
Rescue a node
@param node: node
@type node: L{Node}
@param password: password
@type password: C{str}
@rtype: L{Node}
"""
if password:
resp = self._node_action(node, 'rescue', adminPass=password)
else:
resp = self._node_action(node, 'rescue')
password = json.loads(resp.body)['adminPass']
node.extra['password'] = password
return node
def ex_unrescue(self, node):
"""
Unrescue a node
@param node: node
@type node: L{Node}
@rtype: C{bool}
"""
resp = self._node_action(node, 'unrescue')
return resp.status == httplib.ACCEPTED
| apache-2.0 | -4,493,782,231,637,883,000 | 32.37645 | 79 | 0.53063 | false |
Cinntax/home-assistant | homeassistant/components/zha/core/channels/security.py | 1 | 6806 | """
Security channels module for Zigbee Home Automation.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/zha/
"""
import logging
import zigpy.zcl.clusters.security as security
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_send
from . import ZigbeeChannel
from .. import registries
from ..const import (
CLUSTER_COMMAND_SERVER,
SIGNAL_ATTR_UPDATED,
WARNING_DEVICE_MODE_EMERGENCY,
WARNING_DEVICE_SOUND_HIGH,
WARNING_DEVICE_SQUAWK_MODE_ARMED,
WARNING_DEVICE_STROBE_HIGH,
WARNING_DEVICE_STROBE_YES,
)
_LOGGER = logging.getLogger(__name__)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(security.IasAce.cluster_id)
class IasAce(ZigbeeChannel):
"""IAS Ancillary Control Equipment channel."""
pass
@registries.CHANNEL_ONLY_CLUSTERS.register(security.IasWd.cluster_id)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(security.IasWd.cluster_id)
class IasWd(ZigbeeChannel):
"""IAS Warning Device channel."""
@staticmethod
def set_bit(destination_value, destination_bit, source_value, source_bit):
"""Set the specified bit in the value."""
if IasWd.get_bit(source_value, source_bit):
return destination_value | (1 << destination_bit)
return destination_value
@staticmethod
def get_bit(value, bit):
"""Get the specified bit from the value."""
return (value & (1 << bit)) != 0
async def squawk(
self,
mode=WARNING_DEVICE_SQUAWK_MODE_ARMED,
strobe=WARNING_DEVICE_STROBE_YES,
squawk_level=WARNING_DEVICE_SOUND_HIGH,
):
"""Issue a squawk command.
This command uses the WD capabilities to emit a quick audible/visible pulse called a
"squawk". The squawk command has no effect if the WD is currently active
(warning in progress).
"""
value = 0
value = IasWd.set_bit(value, 0, squawk_level, 0)
value = IasWd.set_bit(value, 1, squawk_level, 1)
value = IasWd.set_bit(value, 3, strobe, 0)
value = IasWd.set_bit(value, 4, mode, 0)
value = IasWd.set_bit(value, 5, mode, 1)
value = IasWd.set_bit(value, 6, mode, 2)
value = IasWd.set_bit(value, 7, mode, 3)
await self.device.issue_cluster_command(
self.cluster.endpoint.endpoint_id,
self.cluster.cluster_id,
0x0001,
CLUSTER_COMMAND_SERVER,
[value],
)
async def start_warning(
self,
mode=WARNING_DEVICE_MODE_EMERGENCY,
strobe=WARNING_DEVICE_STROBE_YES,
siren_level=WARNING_DEVICE_SOUND_HIGH,
warning_duration=5, # seconds
strobe_duty_cycle=0x00,
strobe_intensity=WARNING_DEVICE_STROBE_HIGH,
):
"""Issue a start warning command.
This command starts the WD operation. The WD alerts the surrounding area by audible
(siren) and visual (strobe) signals.
strobe_duty_cycle indicates the length of the flash cycle. This provides a means
of varying the flash duration for different alarm types (e.g., fire, police, burglar).
Valid range is 0-100 in increments of 10. All other values SHALL be rounded to the
nearest valid value. Strobe SHALL calculate duty cycle over a duration of one second.
The ON state SHALL precede the OFF state. For example, if Strobe Duty Cycle Field specifies
“40,” then the strobe SHALL flash ON for 4/10ths of a second and then turn OFF for
6/10ths of a second.
"""
value = 0
value = IasWd.set_bit(value, 0, siren_level, 0)
value = IasWd.set_bit(value, 1, siren_level, 1)
value = IasWd.set_bit(value, 2, strobe, 0)
value = IasWd.set_bit(value, 4, mode, 0)
value = IasWd.set_bit(value, 5, mode, 1)
value = IasWd.set_bit(value, 6, mode, 2)
value = IasWd.set_bit(value, 7, mode, 3)
await self.device.issue_cluster_command(
self.cluster.endpoint.endpoint_id,
self.cluster.cluster_id,
0x0000,
CLUSTER_COMMAND_SERVER,
[value, warning_duration, strobe_duty_cycle, strobe_intensity],
)
@registries.BINARY_SENSOR_CLUSTERS.register(security.IasZone.cluster_id)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(security.IasZone.cluster_id)
class IASZoneChannel(ZigbeeChannel):
"""Channel for the IASZone Zigbee cluster."""
@callback
def cluster_command(self, tsn, command_id, args):
"""Handle commands received to this cluster."""
if command_id == 0:
state = args[0] & 3
async_dispatcher_send(
self._zha_device.hass, f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}", state
)
self.debug("Updated alarm state: %s", state)
elif command_id == 1:
self.debug("Enroll requested")
res = self._cluster.enroll_response(0, 0)
self._zha_device.hass.async_create_task(res)
async def async_configure(self):
"""Configure IAS device."""
# Xiaomi devices don't need this and it disrupts pairing
if self._zha_device.manufacturer == "LUMI":
self.debug("finished IASZoneChannel configuration")
return
from zigpy.exceptions import DeliveryError
self.debug("started IASZoneChannel configuration")
await self.bind()
ieee = self.cluster.endpoint.device.application.ieee
try:
res = await self._cluster.write_attributes({"cie_addr": ieee})
self.debug(
"wrote cie_addr: %s to '%s' cluster: %s",
str(ieee),
self._cluster.ep_attribute,
res[0],
)
except DeliveryError as ex:
self.debug(
"Failed to write cie_addr: %s to '%s' cluster: %s",
str(ieee),
self._cluster.ep_attribute,
str(ex),
)
self.debug("finished IASZoneChannel configuration")
await self.get_attribute_value("zone_type", from_cache=False)
@callback
def attribute_updated(self, attrid, value):
"""Handle attribute updates on this cluster."""
if attrid == 2:
value = value & 3
async_dispatcher_send(
self._zha_device.hass, f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}", value
)
async def async_initialize(self, from_cache):
"""Initialize channel."""
await self.get_attribute_value("zone_status", from_cache=from_cache)
await self.get_attribute_value("zone_state", from_cache=from_cache)
await super().async_initialize(from_cache)
| apache-2.0 | 7,192,171,406,314,994,000 | 34.612565 | 99 | 0.626286 | false |
SRabbelier/Melange | scripts/gci_statistic_seeder.py | 1 | 7157 | #!/usr/bin/python2.5
#
# Copyright 2010 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starts an interactive shell which allows to create statistic entities.
Usage is simple:
In order to seed all available statistics, just type:
>>> seed_all()
In order to seed one statistic:
>>> seed_one(link_id)
where link_id is for the desired statistic
In order to change program in scope:
>>> set_program(key_name)
where key_name represents a new program
In order to terminate the script:
>>> exit()
"""
__authors__ = [
'"Daniel Hans" <[email protected]>',
]
import sys
import interactive
interactive.setup()
from django.utils import simplejson
from soc.logic import dicts
from soc.modules.gci.logic.models.program import logic as program_logic
from soc.modules.statistic.logic.models.statistic import logic as \
statistic_logic
from soc.modules.statistic.models.statistic import Statistic
SUCCESS_MSG_FMT = 'Statistic %s has been sucessfully added.'
FAILURE_MSG_FMT = 'An error occured while adding %s statistic.'
DOES_NOT_EXISTS_MSG_FMT = 'Statistic %s does not exists.'
VISUALIZATION_SETS = {
"cumulative_standard": [
"Table",
"BarChart",
"ColumnChart",
"ImageChartBar",
],
"cumulative_countries": [
"Table"
],
"single_standard": [
"Table",
"BarChart",
"ColumnChart",
"ImageChartBar",
"ImageChartP",
"ImageChartP3",
"PieChart",
"ScatterChart"
],
"single_countries": [
"Table",
"GeoMap"
]
}
STATISTIC_PROPERTIES = {
"mentors_per_continent": (
"Mentors Per Continent",
{
"type": "per_field",
"field": "continent",
"model": "gci_mentor",
"subsets": [("all", {}), ("referenced", {}), ("no-referenced", {})],
"filter": "property_filter",
"params": {
"ref_logic": "gci_task",
"ref_field": "mentors",
"program_field": "program",
"property_conditions": {
"status": ["active", "inactive"]
},
}
},
{
"description": [("continent", "string", "Continent"),
("all_mentors", "number", "Mentors"),
("pro_mentors", "number", "Mentors with tasks"),
("nop_mentors", "number", "Mentors without tasks")],
"options": {
'Mentors Per Continent (cumulative)': {
"visualizations": VISUALIZATION_SETS['cumulative_standard'],
"columns": [0, 1, 2]
},
'Mentors Per Continent (all)': {
"visualizations": VISUALIZATION_SETS['single_standard'],
"columns": [0]
},
'Mentors Per Continent (with tasks)': {
"visualizations": VISUALIZATION_SETS['single_standard'],
"columns": [1]
},
'Mentors Per Continent (without tasks)': {
"visualizations": VISUALIZATION_SETS['single_standard'],
"columns": [2]
}
}
},
"org_admin"),
"students_per_age": (
"Students Per Age",
{
"type": "per_field",
"field": "age",
"model": "gci_student",
"transformer": "remove-out-of-range",
"filter": "property_filter",
"params": {
"program_field": "scope",
"property_conditions": {
"status": ['active', 'inactive']
},
}
},
{
"description": [("age", "number", "Age"),
("number", "number", "Number")],
"options": {
'Organization Admins Per Age': {
"visualizations": VISUALIZATION_SETS['single_standard']
}
}
},
"host"),
}
STATISTICS_LIST = [k for k in STATISTIC_PROPERTIES]
NAMES_DICT = dict((k, v) for k, (v, _, _, _)
in STATISTIC_PROPERTIES.iteritems())
INSTRUCTIONS_DICT = dict((k, v) for k, (_, v, _, _)
in STATISTIC_PROPERTIES.iteritems())
CHARTS_DICT = dict((k, v) for k, (_, _, v, _)
in STATISTIC_PROPERTIES.iteritems())
ACCESS_DICT = dict((k, v) for k, (_, _, _, v)
in STATISTIC_PROPERTIES.iteritems())
def _getCommonProperties():
"""Returns properties that are common for all statistic entities.
"""
program = program_logic.getFromKeyName(program_keyname)
properties = {
'access_for_other_programs': 'invisible',
'scope': program,
'scope_path': program_keyname,
}
return properties
def _getSpecificProperties(link_id):
"""Returns properties that are specific to a particular statistic.
"""
properties = {
'link_id': link_id,
'name': NAMES_DICT[link_id],
'chart_json': simplejson.dumps(CHARTS_DICT[link_id]),
'instructions_json': simplejson.dumps(INSTRUCTIONS_DICT[link_id]),
'read_access': ACCESS_DICT[link_id]
}
return properties
def _seedStatistic(properties):
"""Saves a new statistic entity, described by properties, in data store.
"""
entity = statistic_logic.updateOrCreateFromFields(properties, silent=True)
if entity:
print SUCCESS_MSG_FMT % properties['link_id']
else:
print FALIURE_MSG_FMT % properties['link_id']
def exit():
"""Terminates the script.
"""
sys.exit(0)
def seedOne(link_id):
"""Seeds a single statistic to the data store.
Args:
link_id: link_id of the statistic that should be added.
"""
if link_id not in STATISTICS_LIST:
print DOES_NOT_EXISTS_MSG_FMT % link_id
else:
properties = _getCommonProperties()
new_properties = _getSpecificProperties(link_id)
properties.update(new_properties)
_seedStatistic(properties)
def seedAll():
"""Seeds all available statistics to the data store.
"""
properties = _getCommonProperties()
for statistic in STATISTICS_LIST:
new_properties = _getSpecificProperties(statistic)
properties.update(new_properties)
_seedStatistic(properties)
def setProgram(keyname):
"""Sets program key name.
"""
program_keyname = keyname
def main(args):
context = {
'exit': exit,
'seed_all': seedAll,
'seed_one': seedOne,
'statistics_list': STATISTICS_LIST,
'set_program': setProgram,
}
interactive.remote(args, context)
program_keyname = 'melange/gcirunthrough'
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Usage: %s app_id [host]" % (sys.argv[0],)
sys.exit(1)
main(sys.argv[1:])
| apache-2.0 | -4,528,827,782,894,037,000 | 25.3125 | 76 | 0.586139 | false |
shuque/pydig | pydiglib/rr_svcb.py | 1 | 3490 | """
SVCB and HTTPS RR Types class.
"""
import socket
import struct
from .name import name_from_wire_message
# SVCB (Service Binding RR) Parameter Types
SVCB_PARAM = {
0: "mandatory",
1: "alpn",
2: "no-default-alpn",
3: "port",
4: "ipv4hint",
5: "echconfig",
6: "ipv6hint",
}
class RdataSVCB:
"""SVCB RR RDATA Class"""
def __init__(self, pkt, offset, rdlen):
self.pkt = pkt
self.offset = offset
self.end_rdata = offset + rdlen
self.rdata = pkt[offset:self.end_rdata]
self.priority = None
self.targetname = None
self.params = [] # list(key=value strings)
self.decode()
def decode(self):
self.priority, = struct.unpack("!H", self.rdata[:2])
d, self.offset = name_from_wire_message(self.pkt, self.offset+2)
self.targetname = d.text()
self.decode_params(self.pkt[self.offset:self.end_rdata])
def decode_params(self, params_wire):
lastkey = None
while params_wire:
pkey, plen = struct.unpack('!HH', params_wire[:4])
pdata = params_wire[4:4+plen]
pdata_text = None
if lastkey is not None:
if not pkey > lastkey:
print("ERROR: HTTPS RR keys are not in ascending order")
else:
lastkey = pkey
if pkey in SVCB_PARAM:
pkey_text = SVCB_PARAM[pkey]
else:
pkey_text = "key{:d}".format(pkey)
if pkey == 0: ## mandatory
keylist = []
while pdata:
key = struct.unpack("!H", pdata[:2])
keylist.append(str(key))
pdata = pdata[2:]
pdata_text = ','.join(keylist)
elif pkey == 1: ## alpn
alpn_list = []
while pdata:
alpn_len = pdata[0]
alpn = pdata[1:1+alpn_len].decode()
alpn_list.append(alpn)
pdata = pdata[1+alpn_len:]
pdata_text = ','.join(alpn_list)
elif pkey == 3: ## port
port = struct.unpack("!H", pdata[:2])
pdata_text = str(port)
elif pkey == 4: ## ipv4hint
ip4list = []
while pdata:
ip4 = socket.inet_ntop(socket.AF_INET, pdata[:4])
ip4list.append(ip4)
pdata = pdata[4:]
pdata_text = ','.join(ip4list)
elif pkey == 6: ## ipv6hint
ip6list = []
while pdata:
ip6 = socket.inet_ntop(socket.AF_INET6, pdata[:16])
ip6list.append(ip6)
pdata = pdata[16:]
pdata_text = ','.join(ip6list)
else:
pdata_text = pdata.hex()
if not pdata_text:
self.params.append(pkey_text)
else:
self.params.append(("{}={}".format(pkey_text, pdata_text)))
params_wire = params_wire[4+plen:]
def __str__(self):
return "%s %s %s" % (self.priority,
self.targetname,
" ".join(self.params))
| gpl-2.0 | -2,864,752,358,938,569,000 | 32.883495 | 77 | 0.439542 | false |
hbs/python-oauth2 | oauth2/__init__.py | 1 | 24945 | """
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import urllib
import time
import random
import urlparse
import hmac
import binascii
import httplib2
try:
from urlparse import parse_qs, parse_qsl
except ImportError:
from cgi import parse_qs, parse_qsl
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occurred.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class MissingSignature(Error):
pass
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def build_xoauth_string(url, consumer, token=None):
"""Build an XOAUTH string for use in SMTP/IMPA authentication."""
request = Request.from_consumer_and_token(consumer, token,
"GET", url)
signing_method = SignatureMethod_HMAC_SHA1()
request.sign_request(signing_method, consumer, token)
params = []
for k, v in sorted(request.iteritems()):
if v is not None:
params.append('%s="%s"' % (k, escape(v)))
return "%s %s %s" % ("GET", url, ','.join(params))
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s, safe='~')
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class Consumer(object):
"""A consumer of OAuth-protected services.
The OAuth consumer is a "third-party" service that wants to access
protected resources from an OAuth service provider on behalf of an end
user. It's kind of the OAuth client.
Usually a consumer must be registered with the service provider by the
developer of the consumer software. As part of that process, the service
provider gives the consumer a *key* and a *secret* with which the consumer
software can identify itself to the service. The consumer will include its
key in each request to identify itself, but will use its secret only when
signing requests, to prove that the request is from that particular
registered consumer.
Once registered, the consumer can then use its consumer credentials to ask
the service provider for a request token, kicking off the OAuth
authorization process.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def __str__(self):
data = {'oauth_consumer_key': self.key,
'oauth_consumer_secret': self.secret}
return urllib.urlencode(data)
class Token(object):
"""An OAuth credential used to request authorization or a protected
resource.
Tokens in OAuth comprise a *key* and a *secret*. The key is included in
requests to identify the token being used, but the secret is used only in
the signature, to prove that the requester is who the server gave the
token to.
When first negotiating the authorization, the consumer asks for a *request
token* that the live user authorizes with the service provider. The
consumer then exchanges the request token for an *access token* that can
be used to access protected resources.
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
"""Returns this token as a plain string, suitable for storage.
The resulting string includes the token's secret, so you should never
send or store this string where a third party can read it.
"""
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
@staticmethod
def from_string(s):
"""Deserializes a token from a string like one returned by
`to_string()`."""
if not len(s):
raise ValueError("Invalid parameter string.")
params = parse_qs(s, keep_blank_values=False)
if not len(params):
raise ValueError("Invalid parameter string.")
try:
key = params['oauth_token'][0]
except Exception:
raise ValueError("'oauth_token' not found in OAuth request.")
try:
secret = params['oauth_token_secret'][0]
except Exception:
raise ValueError("'oauth_token_secret' not found in "
"OAuth request.")
token = Token(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
def __str__(self):
return self.to_string()
def setter(attr):
name = attr.__name__
def getter(self):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
def deleter(self):
del self.__dict__[name]
return property(getter, attr, deleter)
class Request(dict):
"""The parameters and information for an HTTP request, suitable for
authorizing with OAuth credentials.
When a consumer wants to access a service's protected resources, it does
so using a signed HTTP request identifying itself (the consumer) with its
key, and providing an access token authorized by the end user to access
those resources.
"""
version = VERSION
def __init__(self, method=HTTP_METHOD, url=None, parameters=None):
self.method = method
self.url = url
if parameters is not None:
self.update(parameters)
@setter
def url(self, value):
self.__dict__['url'] = value
if value is not None:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(value)
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
if scheme not in ('http', 'https'):
raise ValueError("Unsupported URL %s (%s)." % (value, scheme))
# Normalized URL excludes params, query, and fragment.
self.normalized_url = urlparse.urlunparse((scheme, netloc, path, None, None, None))
else:
self.normalized_url = None
self.__dict__['url'] = None
@setter
def method(self, value):
self.__dict__['method'] = value.upper()
def _get_timestamp_nonce(self):
return self['oauth_timestamp'], self['oauth_nonce']
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
return dict([(k, v) for k, v in self.iteritems()
if not k.startswith('oauth_')])
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
oauth_params = ((k, v) for k, v in self.items()
if k.startswith('oauth_'))
stringy_params = ((k, escape(str(v))) for k, v in oauth_params)
header_params = ('%s="%s"' % (k, v) for k, v in stringy_params)
params_header = ', '.join(header_params)
auth_header = 'OAuth realm="%s"' % realm
if params_header:
auth_header = "%s, %s" % (auth_header, params_header)
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
# tell urlencode to deal with sequence values and map them correctly
# to resulting querystring. for example self["k"] = ["v1", "v2"] will
# result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D
return urllib.urlencode(self, True).replace('+', '%20')
def to_url(self):
"""Serialize as a URL for a GET request."""
base_url = urlparse.urlparse(self.url)
query = parse_qs(base_url.query)
for k, v in self.items():
query.setdefault(k, []).append(v)
url = (base_url.scheme, base_url.netloc, base_url.path, base_url.params,
urllib.urlencode(query, True), base_url.fragment)
return urlparse.urlunparse(url)
def get_parameter(self, parameter):
ret = self.get(parameter)
if ret is None:
raise Error('Parameter not found: %s' % parameter)
return ret
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
items = []
for key, value in self.iteritems():
if key == 'oauth_signature':
continue
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if hasattr(value, '__iter__'):
items.extend((key, item) for item in value)
else:
items.append((key, value))
# Include any query string parameters from the provided URL
query = urlparse.urlparse(self.url)[4]
items.extend(self._split_url_string(query).items())
encoded_str = urllib.urlencode(sorted(items))
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20').replace('%7E', '~')
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of sign."""
if 'oauth_consumer_key' not in self:
self['oauth_consumer_key'] = consumer.key
if token and 'oauth_token' not in self:
self['oauth_token'] = token.key
self['oauth_signature_method'] = signature_method.name
self['oauth_signature'] = signature_method.sign(self, consumer, token)
@classmethod
def make_timestamp(cls):
"""Get seconds since epoch (UTC)."""
return str(int(time.time()))
@classmethod
def make_nonce(cls):
"""Generate pseudorandom number."""
return str(random.randint(0, 100000000))
@classmethod
def from_request(cls, http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = cls._split_header(auth_header)
parameters.update(header_params)
except:
raise Error('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = cls._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = cls._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return cls(http_method, http_url, parameters)
return None
@classmethod
def from_consumer_and_token(cls, consumer, token=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': consumer.key,
'oauth_timestamp': cls.make_timestamp(),
'oauth_nonce': cls.make_nonce(),
'oauth_version': cls.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.verifier:
parameters['oauth_verifier'] = token.verifier
return Request(http_method, http_url, parameters)
@classmethod
def from_token_and_callback(cls, token, callback=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return cls(http_method, http_url, parameters)
@staticmethod
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
@staticmethod
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = parse_qs(param_str, keep_blank_values=False)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
class Client(httplib2.Http):
"""OAuthClient is a worker to attempt to execute a request."""
def __init__(self, consumer, token=None, cache=None, timeout=None,
proxy_info=None):
if consumer is not None and not isinstance(consumer, Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, Token):
raise ValueError("Invalid token.")
self.consumer = consumer
self.token = token
self.method = SignatureMethod_HMAC_SHA1()
httplib2.Http.__init__(self, cache=cache, timeout=timeout,
proxy_info=proxy_info)
def set_signature_method(self, method):
if not isinstance(method, SignatureMethod):
raise ValueError("Invalid signature method.")
self.method = method
def request(self, uri, method="GET", body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None):
DEFAULT_CONTENT_TYPE = 'application/x-www-form-urlencoded'
if not isinstance(headers, dict):
headers = {}
is_multipart = method == 'POST' and headers.get('Content-Type',
DEFAULT_CONTENT_TYPE) != DEFAULT_CONTENT_TYPE
if body and method == "POST" and not is_multipart:
parameters = dict(parse_qsl(body))
else:
parameters = None
req = Request.from_consumer_and_token(self.consumer,
token=self.token, http_method=method, http_url=uri,
parameters=parameters)
req.sign_request(self.method, self.consumer, self.token)
if method == "POST":
headers['Content-Type'] = headers.get('Content-Type',
DEFAULT_CONTENT_TYPE)
if is_multipart:
headers.update(req.to_header())
else:
body = req.to_postdata()
elif method == "GET":
uri = req.to_url()
else:
headers.update(req.to_header())
return httplib2.Http.request(self, uri, method=method, body=body,
headers=headers, redirections=redirections,
connection_type=connection_type)
class Server(object):
"""A skeletal implementation of a service provider, providing protected
resources to requests from authorized consumers.
This class implements the logic to check requests for authorization. You
can use it with your web server or web framework to protect certain
resources with OAuth.
"""
timestamp_threshold = 300 # In seconds, five minutes.
version = VERSION
signature_methods = None
def __init__(self, signature_methods=None):
self.signature_methods = signature_methods or {}
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.name] = signature_method
return self.signature_methods
def verify_request(self, request, consumer, token):
"""Verifies an api call and checks all the parameters."""
version = self._get_version(request)
self._check_signature(request, consumer, token)
parameters = request.get_nonoauth_parameters()
return parameters
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _get_version(self, request):
"""Verify the correct version request for this server."""
try:
version = request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise Error('OAuth version %s not supported.' % str(version))
return version
def _get_signature_method(self, request):
"""Figure out the signature with some defaults."""
try:
signature_method = request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise Error('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_verifier(self, request):
return request.get_parameter('oauth_verifier')
def _check_signature(self, request, consumer, token):
timestamp, nonce = request._get_timestamp_nonce()
self._check_timestamp(timestamp)
signature_method = self._get_signature_method(request)
try:
signature = request.get_parameter('oauth_signature')
except:
raise MissingSignature('Missing oauth_signature.')
# Validate the signature.
valid = signature_method.check(request, consumer, token, signature)
if not valid:
key, base = signature_method.signing_base(request, consumer, token)
raise Error('Invalid signature. Expected signature base '
'string: %s' % base)
built = signature_method.sign(request, consumer, token)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise Error('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' % (timestamp, now,
self.timestamp_threshold))
class SignatureMethod(object):
"""A way of signing requests.
The OAuth protocol lets consumers and service providers pick a way to sign
requests. This interface shows the methods expected by the other `oauth`
modules for signing requests. Subclass it and implement its methods to
provide a new way to sign requests.
"""
def signing_base(self, request, consumer, token):
"""Calculates the string that needs to be signed.
This method returns a 2-tuple containing the starting key for the
signing and the message to be signed. The latter may be used in error
messages to help clients debug their software.
"""
raise NotImplementedError
def sign(self, request, consumer, token):
"""Returns the signature for the given request, based on the consumer
and token also provided.
You should use your implementation of `signing_base()` to build the
message to sign. Otherwise it may be less useful for debugging.
"""
raise NotImplementedError
def check(self, request, consumer, token, signature):
"""Returns whether the given signature is the correct signature for
the given consumer and token signing the given request."""
built = self.sign(request, consumer, token)
return built == signature
class SignatureMethod_HMAC_SHA1(SignatureMethod):
name = 'HMAC-SHA1'
def signing_base(self, request, consumer, token):
if request.normalized_url is None:
raise ValueError("Base URL for request is not set.")
sig = (
escape(request.method),
escape(request.normalized_url),
escape(request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def sign(self, request, consumer, token):
"""Builds the base signature string."""
key, raw = self.signing_base(request, consumer, token)
# HMAC object.
try:
from hashlib import sha1 as sha
except ImportError:
import sha # Deprecated
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class SignatureMethod_PLAINTEXT(SignatureMethod):
name = 'PLAINTEXT'
def signing_base(self, request, consumer, token):
"""Concatenates the consumer key and secret with the token's
secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def sign(self, request, consumer, token):
key, raw = self.signing_base(request, consumer, token)
return raw
| mit | -6,773,699,701,043,492,000 | 32.938776 | 134 | 0.616837 | false |
GrognardsFromHell/TemplePlus | tpdatasrc/tpgamefiles/rules/races/race011_troll.py | 1 | 1219 | from toee import *
import race_defs
###################################################
def GetCategory():
return "Core 3.5 Ed Classes"
print "Registering race: Troll"
raceEnum = race_troll
raceSpec = race_defs.RaceSpec()
raceSpec.modifier_name = "Troll" # Python modifier to be applied
raceSpec.flags = 2
raceSpec.hit_dice = dice_new("6d8")
raceSpec.level_modifier = 5 # basic level modifier
raceSpec.stat_modifiers = [12, 4, 12, -4, -2, -4] # str, dex, con, int, wis, cha
raceSpec.natural_armor = 5
raceSpec.proto_id = 13016
raceSpec.help_topic = "TAG_TROLL"
raceSpec.height_male = [100, 120]
raceSpec.height_female = [100, 120]
raceSpec.weight_male = [870, 1210]
raceSpec.weight_female = [800, 1200]
raceSpec.feats = [feat_simple_weapon_proficiency, feat_martial_weapon_proficiency_all]
raceSpec.material_offset = 0 # offset into rules/material_ext.mes file
###################################################
def RegisterRace():
raceSpec.register(raceEnum)
def GetFavoredClass(obj = OBJ_HANDLE_NULL):
return stat_level_fighter
def GetLevelModifier(obj = OBJ_HANDLE_NULL):
return 5 | mit | -6,833,448,202,129,764,000 | 32.888889 | 96 | 0.604594 | false |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/cogent/util/modules.py | 1 | 1377 | #!/usr/bin/env python
"""Compiled modules may be out of date or missing"""
import os, sys
__author__ = "Peter Maxwell"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["Peter Maxwell"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Peter Maxwell"
__email__ = "[email protected]"
__status__ = "Production"
class ExpectedImportError(ImportError):
pass
def fail(msg):
print >>sys.stderr, msg
raise ExpectedImportError
def importVersionedModule(name, globals, min_version, alt_desc):
if os.environ.has_key('COGENT_PURE_PYTHON'):
fail('Not using compiled module "%s". Will use %s.' %
(name, alt_desc))
try:
m = __import__(name, globals)
except ImportError:
fail('Compiled module "%s" not found. Will use %s.' %
(name, alt_desc))
version = getattr(m, 'version_info', (0, 0))
desc = '.'.join(str(n) for n in version)
min_desc = '.'.join(str(n) for n in min_version)
max_desc = str(min_version[0])+'.x'
if version < min_version:
fail('Compiled module "%s" is too old as %s < %s. '
'Will use %s.' % (name, desc, min_desc, alt_desc))
if version[0] > min_version[0]:
fail('Compiled module "%s" is too new as %s > %s. '
'Will use %s.' % (name, desc, max_desc, alt_desc))
return m
| mit | 7,593,111,555,751,964,000 | 32.585366 | 66 | 0.580973 | false |
fiam/blangoblog | blango/forms.py | 1 | 1444 | from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
from django.contrib.auth.models import User
from django import forms
from blango.models import Comment
from blango.jsforms import JSModelForm
# This violates the DRY principe, but it's the only
# way I found for editing staff comments from
# the Django admin application
class CommentForm(JSModelForm):
author = forms.CharField(label=_('Name'), max_length=256)
author_uri = forms.CharField(label=_('Website'), max_length=256, required=False)
author_email = forms.EmailField(label=_('Email'), help_text=mark_safe('<span class="small">%s</span>' % _('(Won\'t be published)')))
class Meta:
model = Comment
fields = ('author', 'author_uri', 'author_email', 'body')
def save(self, entry):
self.instance.entry = entry
super(CommentForm, self).save()
def clean_author(self):
author = self.cleaned_data['author']
try:
User.objects.get(username=author)
raise forms.ValidationError(_('This username belongs to a registered user'))
except User.DoesNotExist:
return author
class UserCommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('body', )
def save(self, entry):
self.instance.user = self.user
self.instance.entry = entry
super(UserCommentForm, self).save(entry)
| bsd-3-clause | -577,320,069,726,871,300 | 31.088889 | 136 | 0.668975 | false |
stryder199/RyarkAssignments | Assignment2/web2py/applications/cqg/question/hamming.py | 1 | 4192 | import os
import file_util
import html_util
import hamming_util
# table properties
BORDER = 1
MIN_CELL_WIDTH = 36
MIN_CELL_HEIGHT = 16
# superclass for our two question types
class hamming:
def __init__(self,question_library_path,question_path):
self.question_library_path = question_library_path
self.question_path = question_path
config = file_util.dynamic_import(os.path.join(
question_library_path,question_path,'cqg_config.py'))
self.parity = config.parity
self.code_word = list(
hamming_util.generate_code_word(config.message,config.parity))
self.code_word_indexes = config.code_word_indexes
def get_question_library_path(self):
return self.question_library_path
def get_question_path(self):
return self.question_path
def get_css(self,answer):
return ('#hamming_table td { '
'text-align:center;'
'width:%i; height:%i; }'%(MIN_CELL_WIDTH,MIN_CELL_HEIGHT)
+ html_util.make_css_borders(BORDER,'hamming_table'))
class fill(hamming):
def __init__(self,question_library_path,question_path):
hamming.__init__(self,question_library_path,question_path)
# replace code_word hotspots with None
for i in self.code_word_indexes:
self.code_word[i-1] = None
def get_html(self,answer):
# generate question description
if self.parity == 0:
parity_string = 'even'
else:
parity_string = 'odd'
html = "<p>Fill in the bits for a valid Hamming code " + \
'using <b>' + parity_string + '</b> parity:</p>'
# generate a list of selects with bits specified by answer
# bits = list of len(code_word) items where bits[i]:
# code_word[i] if code_word[i] in [0,1]
# a select box if code_word[i] is None
indexes = range(1,len(self.code_word)+1) # one-relative
bits = []
for i,bit in enumerate(self.code_word,1):
if bit == None:
name = 'bit_' + str(i)
bit = html_util.get_select(
name,['','0','1'],answer[name])
bits.append(bit)
# generate table containing select lists
html += '<center>'
html += html_util.get_table([indexes,bits],'id="hamming_table"')
html += '</center>'
return html
def get_input_element_ids(self):
ids = []
for i,code in enumerate(self.code_word,1):
if code == None:
ids.append('bit_' + str(i))
return ids
def check_answer(self,answer):
# fill code_word with bits specified by answer
new_code_word = ''
for i,bit in enumerate(self.code_word,1):
if bit == None:
bit = answer['bit_' + str(i)]
# every input must be binary
if bit not in ['0','1']:
return False
new_code_word += bit
# check correctness of new_code_word
return hamming_util.check_code_word(
new_code_word,self.parity) == 0
class find_error(hamming):
def __init__(self,question_library_path,question_path):
hamming.__init__(self,question_library_path,question_path)
# flip bit specified by code_word_indexes
if self.code_word[self.code_word_indexes-1] == '0':
self.code_word[self.code_word_indexes-1] = '1'
else:
self.code_word[self.code_word_indexes-1] = '0'
def get_html(self,answer):
# generate question description
if self.parity == 0:
parity_string = 'even'
else:
parity_string = 'odd'
html = '<p>Assume exactly one bit is incorrect.</p>' + \
'Indicate the incorrect bit ' + \
'using <b>' + parity_string + '</b> parity:'
# generate list of radio buttons with
# the bit specified by answer set
indexes = range(1,len(self.code_word)+1) # one-relative
radio_buttons = []
for i in indexes:
is_set = answer['incorrect_bit'] == str(i)
radio_buttons.append(html_util.get_radio_button(
'incorrect_bit',str(i),is_set))
# generate table containing radio buttons
html += '<center>'
html += html_util.get_table(
[indexes,self.code_word,radio_buttons],'id="hamming_table"')
html += '</center>'
return html
def get_input_element_ids(self):
return ['incorrect_bit']
def check_answer(self,answer):
if not (answer['incorrect_bit'] != None and
answer['incorrect_bit'].isdigit()):
return False
code_word_string = ''
for code in self.code_word:
code_word_string += code
return int(answer['incorrect_bit']) == \
hamming_util.check_code_word(code_word_string,self.parity)
| mit | 5,480,421,653,543,299,000 | 28.111111 | 66 | 0.671756 | false |
gwct/core | core2/isofilter.py | 1 | 9979 | #!/usr/bin/python
########################################################################################
#Script to filter out isoforms from peptide files in FASTA ENSEMBL or NCBI format. This
#script can also add an easier to read species label to each sequence within the file.
#
#Sample ENSEMBL usage: python isoform_filter.py -i [input_fasta_file] -t ens -l [species_label] -o [output_filename]
#
#Sample NCBI usage: python isoform_filter.py -i [input_fasta_file] -t ncbi -g [toplevel_gff_file] -l [species_label] -o [output_filename]
#
#To just do the relabeling, set -f 0. You shouldn't need a gff file for the NCBI file in
#this case. For NCBI relabeling, the gene ID is also moved to the front of the title line.
#
#Written by: Gregg Thomas, Summer 2014
#
#NCBI filter command kindly provided by the folks at NCBI.
#
########################################################################################
import sys, re, os, argparse
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/corelib/")
import core
############################################
#Function definitions.
def optParse():
#This function handles the command line options.
parser = argparse.ArgumentParser()
parser.add_argument("-i", dest="input_file", help="An input file containing peptides from a species in FASTA format");
parser.add_argument("-t", dest="file_type", help="Currently supported file types are ENSEMBL and NCBI peptide files. Enter as 'ens' or 'ncbi' here. Note: If file type is NCBI you will also need to specify the top level gff file with -g")
parser.add_argument("-g", dest="gff_file", help="If file type is NCBI, the top level gff file is also needed and should be specified here.");
parser.add_argument("-l", dest="spec_label", help="A species label to add to the gene ID of each sequence.", default="");
parser.add_argument("-o", dest="output_file", help="The desired name of the output file. If none is specified the default is [input_filename]_isofiltered.fa or [input_filename]_isofiltered_relabel.fa");
args = parser.parse_args();
if None in [args.input_file, args.file_type, args.output_file]:
sys.exit(core.errorOut(1, "An input file (-i), input file type (-t), and an output file (-o) must all be specified"));
if args.file_type not in ['ens', 'ncbi']:
sys.exit(core.errorOut(2, "File type (-t) must be one of either 'ens' (Ensembl) or 'ncbi'"));
if args.file_type == "ens" and args.gff_file != None:
sys.exit(core.errorOut(3, "A gff file (-g) should not be specified with file type ens"));
if args.file_type == "ncbi" and args.gff_file == None:
sys.exit(core.errorOut(4, "A gff file (-g) must be specified with file type ncbi"));
return args.input_file, args.file_type, args.gff_file, args.spec_label, args.output_file;
############################################
def ensFilter(inseqs, spec_label, outfilename):
print "Indexing", len(inseqs), "sequences to be filtered.";
print "Parsing identifiers...";
for title in inseqs:
geneid = title[title.index("gene:") + 5:title.index("gene:") + 23];
if geneid in identDict:
identDict[geneid].append((title, inseqs[title]));
else:
identDict[geneid] = [];
identDict[geneid].append((title, inseqs[title]));
sys.stderr.write('\b');
print "Filtering and writing sequences...";
numbars, donepercent, i = 0,[],0;
for key in identDict:
numbars, donepercent = core.loadingBar(i, len(identDict), donepercent, numbars);
if len(identDict[key]) == 1:
long_title, long_seq = identDict[key][0];
else:
titlelist = [];
seqlist = [];
for tup in identDict[key]:
cur_itle, cur_seq = tup;
titlelist.append(cur_itle);
seqlist.append(cur_seq);
long_seq = max(seqlist, key=len)
long_title = titlelist[seqlist.index(long_seq)];
new_title = ">" + spec_label + "_" + long_title[1:];
core.writeSeq(outfilename, long_seq, new_title);
i += 1;
pstring = "100.0% complete.";
sys.stderr.write('\b' * len(pstring) + pstring);
print "\nDone!";
print i, "sequences written.";
print len(inseqs) - i, "sequences filtered.";
############################################
def ncbiFilter(inseqs, gff_file, spec_label, outfilename):
numbars, donepercent, i = 0, [], 0;
print "Obtaining longest isoforms from .gff file...";
cmd = "zcat " + gff_file + " | awk \'BEGIN{FS=\" \";OFS=\"|\"}$3==\"CDS\"{if($4<$5){print $5-$4+1,$9}else{print $4-$5+1,$9}}\' | grep \"[NX]P[_]\" | sed \'s/\([0-9]*\).*GeneID:\([0-9]*\).*\([NX]P[_][0-9]*\.[0-9]*\).*/\\1|\\2|\\3/\' | awk \'BEGIN{FS=\"|\";OFS=\"\t\";gene=\"\";acc=\"\";len=0}{if(acc!=$3){print gene,acc,len/3-1;gene=$2;acc=$3;len=$1}else{len=len+$1}}END{print gene,acc,len/3-1}\' | sort -k1,1n -k3,3nr -k2,2 | awk \'BEGIN{FS=\" \";OFS=\" \";gene=\"\";acc=\"\";len=0}{if(gene!=$1){print $1,$2,$3};gene=$1;acc=$2;len=$3}\' > ncbi_isoform_filter_tmp11567.txt"
os.system(cmd);
tmpFile = open("ncbi_isoform_filter_tmp11567.txt", "r");
tmpLines = tmpFile.readlines();
tmpFile.close();
os.system("rm ncbi_isoform_filter_tmp11567.txt");
longest_isos = [];
for each in tmpLines:
longest_isos.append(each.split("\t")[1]);
longest_isos = filter(None, longest_isos);
print "Writing longest isoforms to output file...";
count = 0;
for title in inseqs:
numbars, donepercent = core.loadingBar(i, len(inseqs), donepercent, numbars);
i += 1;
found = 0;
for gid in longest_isos:
if gid in title:
gid = title[title.index("P_")-1:title.index("|",title.index("P_"))]
new_title = ">" + spec_label + "_" + gid + " |" + title[1:title.index("P_")-1] + title[title.index("|",title.index("P_"))+1:];
core.writeSeq(outfilename, inseqs[title], new_title);
count += 1;
break;
pstring = "100.0% complete.";
sys.stderr.write('\b' * len(pstring) + pstring);
print "\nDone!";
print count, "sequences written.";
print len(inseqs) - count, "sequences filtered.";
############################################
#Main Block
############################################
infilename, in_type, gff_file, label, outfilename = optParse();
pad = 50;
print "=======================================================================";
print "\t\t\t" + core.getDateTime();
print core.spacedOut("Filtering isoforms from:", pad), infilename;
if in_type == "ens":
print core.spacedOut("File type:", pad), "Ensembl";
if in_type == "ncbi":
print core.spacedOut("File type:", pad), "NCBI";
print core.spacedOut("Using GFF file:", pad), gff_file;
if in_type == "crow":
print core.spacedOut("File type:", pad), "Crow";
if label != "":
print core.spacedOut("Adding label to beginning of FASTA headers:", pad), label;
print core.spacedOut("Writing output to:", pad), outfilename;
core.filePrep(outfilename);
print "--------------------------";
identDict = {};
ins, skip_flag = core.fastaReader(infilename);
if in_type == "ens":
ensFilter(ins, label, outfilename);
elif in_type == "ncbi":
ncbiFilter(ins, gff_file, label, outfilename);
print "=======================================================================";
## DEFUNCT FILTER FOR THE CROW FILES
# elif in_type == "crow":
# crowFilter(ins, label, outfilename);
# def crowFilter(inSeqs, filterflag, speclabel, outFilename):
# rotator = 0;
# numbars = 0;
# donepercent = [];
# i = 0;
# if filterflag == 1:
# print "Indexing", len(inSeqs), "sequences to be filtered.";
# print "Parsing identifiers...";
# for each in inSeqs:
# rotator = core.loadingRotator(i, rotator, 100)
# curTitle, curSeq = core.getFastafromInd(inFilename, each[0], each[1], each[2], each[3]);
# if "gene=" not in curTitle:
# print curTitle;
# continue;
# geneid = curTitle[curTitle.index("gene=") + 5:].strip();
# if geneid in identDict:
# identDict[geneid].append(each);
# else:
# identDict[geneid] = [];
# identDict[geneid].append(each);
# i = i + 1;
# sys.stderr.write('\b');
# print "Filtering and writing sequences...";
# i = 0;
# count = 0;
# for key in identDict:
# numbars, donepercent = core.loadingBar(i, len(identDict), donepercent, numbars);
# if len(identDict[key]) == 1:
# curTitle, curSeq = core.getFastafromInd(inFilename, identDict[key][0][0], identDict[key][0][1], identDict[key][0][2], identDict[key][0][3]);
# if speclabel != "":
# newTitle = ">" + speclabel + "_" + curTitle[1:];
# core.writeSeq(outFilename, curSeq, newTitle);
# else:
# core.writeSeq(outFilename, curSeq, curTitle);
# count = count + 1;
# else:
# titlelist = [];
# seqlist = [];
# for inds in identDict[key]:
# aTitle, aSeq = core.getFastafromInd(inFilename, inds[0], inds[1], inds[2], inds[3]);
# titlelist.append(aTitle);
# seqlist.append(aSeq);
# longseq = max(seqlist, key=len)
# for inds in identDict[key]:
# aTitle, aSeq = core.getFastafromInd(inFilename, inds[0], inds[1], inds[2], inds[3]);
# if aSeq == longseq:
# curTitle, curSeq = core.getFastafromInd(inFilename, inds[0], inds[1], inds[2], inds[3]);
# if speclabel != "":
# newTitle = ">" + speclabel + "_" + curTitle[1:];
# core.writeSeq(outFilename, curSeq, newTitle);
# else:
# core.writeSeq(outFilename, curSeq, curTitle);
# count = count + 1;
# break;
# i = i + 1;
# pstring = "100.0% complete.";
# sys.stderr.write('\b' * len(pstring) + pstring);
# print "\nDone!";
# print count, "out of", len(identDict), "identifiers written.";
# print len(inSeqs) - count, "sequences filtered.";
# else:
# print "Relabeling...";
# for seq in inSeqs:
# numbars, donepercent = core.loadingBar(i, len(inSeqs), donepercent, numbars);
# i = i + 1;
# curTitle, curSeq = core.getFastafromInd(inFilename, seq[0], seq[1], seq[2], seq[3]);
# newTitle = ">" + speclabel + "_" + curTitle[1:];
# core.writeSeq(outFilename, curSeq, newTitle);
# pstring = "100.0% complete.";
# sys.stderr.write('\b' * len(pstring) + pstring);
# print "\nDone!";
| gpl-3.0 | 8,496,657,265,609,689,000 | 32.712838 | 573 | 0.605772 | false |
dodger487/MIST | data/makeSnippets.py | 1 | 5346 | #!/usr/bin/env python
# Chris Riederer
# Google, Inc
# 2014-07-25
import test_detect
import numpy as np
import os
import json
import random
import sys
def makeNegativeSnippets(runData, number, snipPrefixTime=100000000, snipPostfixTime=500000000):
return makeSnippets(runData, True, numberNegative=number, snipPrefixTime=snipPrefixTime, snipPostfixTime=snipPostfixTime)
def makePositiveSnippets(runData, snipPrefixTime=100000000, snipPostfixTime=500000000):
return makeSnippets(runData, False, snipPrefixTime=snipPrefixTime, snipPostfixTime=snipPostfixTime)
def makeSnippets(runData, isNegative, numberNegative=None, snipPrefixTime=10000000, snipPostfixTime=100000000):
"""Given a runData file, makes smaller snippets of positive examples for training
runData: the JSON object representation of a recording
snipPrefixTime: the time, in NANOSECONDS, preceding the label time that we're
putting in the snippet
snipPrefixTime: the time, in NANOSECONDS, after the label time that we're
putting in the snippet
"""
data = np.array(runData["magnetometer"])
data = data[data[:, 2:].any(1)]
domain = data[:,0]
if isNegative and len(runData['labels']) != 0:
raise Exception("Length of labels should be 0 when generating negative examples")
elif not isNegative and len(runData['labels']) == 0:
raise Exception("Length of labels cannot be 0 when generating positive examples")
elif isNegative:
# generate start point for snippets, and ensure snippet is entirely in recorded data
possibleStartPoints = domain[domain < domain[-1] - snipPostfixTime - snipPostfixTime]
labels = [[labelTime, 1] for labelTime in random.sample(possibleStartPoints, numberNegative)]
else:
labels = runData['labels']
snippets = []
for index, (labelTime, label) in enumerate(labels):
snippet = runData.copy()
if isNegative:
snippet['labels'] = []
else:
snippet['labels'] = [[labelTime, label]]
snippet['filename'] = "%s-%02d.json" % (runData['filename'].rsplit('.')[0], index)
snippetIndices = (domain >= labelTime-snipPrefixTime) & (domain < labelTime+snipPostfixTime)
snippet['magnetometer'] = list(map(list, data[snippetIndices, :])) # convert back to python list, so JSON can serialize
snippets.append(snippet)
return snippets
def makeSnippet(runData, snipId, startTime, snipLength=600000000):
data = np.array(runData["magnetometer"])
data = data[data[:, 2:].any(1)]
domain = data[:,0]
snippet = runData.copy()
labels = [[labelTime, label] for labelTime, label in runData['labels'] if startTime < labelTime < startTime+snipLength]
snippet['labels'] = labels
# todo: filename
snippet['filename'] = "%s-hn-%02d.json" % (runData['filename'].rsplit('.')[0], snipId)
snippetIndices = (domain >= startTime) & (domain < startTime+snipLength)
snippet['magnetometer'] = list(map(list, data[snippetIndices, :])) # convert back to python list, so JSON can serialize
return snippet
def findHardNegatives(runData, snipLength=600000000):
"""Find portions of a signal that are difficult for our detector to realize are negative"""
# TODO: initially writing this just for negative runData files... should make it work with everything
detector = test_detect.OriginalDetector()
snippet = runData.copy()
data = np.array(runData["magnetometer"])
data = data[data[:, 2:].any(1)]
domain = data[:,0]
min_cost = float('inf')
for startTime in domain[(domain < domain[-1] - snipLength)]:
snippetIndices = (domain >= startTime) & (domain < startTime+snipLength)
snippet['magnetometer'] = list(map(list, data[snippetIndices, :])) # convert back to python list, so JSON can serialize
snippet['labels'] = []
cost = detector.evaluateCost(snippet, True)
if cost < min_cost:
min_cost = cost
worst_snip = snippet.copy()
return worst_snip
def createSnippetsFromRunDataList(runDataList):
runDataList = test_detect.GetRunDataFromArgs(sys.argv[1:])
for runData in runDataList:
snips = createSnippetsFromPlot(runData)
for snip in snips:
newFilename = os.path.join('relabeled', snip['filename'])
with open(newFilename, 'w') as f:
print newFilename
json.dump(snip, f)
def createSnippetsFromPlot(runData, inputLabels=[], snipLength=600000000):
"""This creates a plot from runData. When the user clicks on the plot, a snippet
of length snipLength nanoseconds is created and plotted. The user can repeat
this process as many times as he or she likes. When the user closes the
original plot, the list of the created snippets is returned.
"""
snippets = []
def onclick(event):
startTime = event.xdata
print "Start time of snippet: %16d" % int(startTime)
snipId = len(snippets)
snip = makeSnippet(runData, snipId, startTime, snipLength=snipLength)
snippets.append(snip) # add to snippets
test_detect.PlotData(snip) # plot new snip
test_detect.pl.show()
test_detect.PlotData(runData, inputLabels=inputLabels)
fig = test_detect.pl.gcf()
cid = fig.canvas.mpl_connect('button_press_event', onclick)
test_detect.pl.show()
return snippets
if __name__ == '__main__':
runDataList = test_detect.GetRunDataFromArgs(sys.argv[1:])
createSnippetsFromRunDataList(runDataList)
# print sum([len(runData['labels']) for runData in runDataList])
| apache-2.0 | -2,253,044,168,712,271,600 | 39.195489 | 123 | 0.721848 | false |
rhildred/rhildred.github.io | tag_generator.py | 1 | 1427 | #!/usr/bin/env python
'''
tag_generator.py
Copyright 2017 Long Qian
Contact: [email protected]
This script creates tags for your Jekyll blog hosted by Github page.
No plugins required.
'''
import glob
import os
import re
post_dir = '_posts/'
tag_dir = 'tag/'
filenames = glob.glob(post_dir + '*')
total_tags = []
for filename in filenames:
f = open(filename, 'r')
crawl = False
for line in f:
if crawl:
current_tags = line.strip().split()
if current_tags[0] == 'tags:':
total_tags.extend(current_tags[1:])
crawl = False
break
if line.strip() == '---':
if not crawl:
crawl = True
else:
crawl = False
break
f.close()
total_tags = set(total_tags)
old_tags = glob.glob(tag_dir + '*.md')
for tag in old_tags:
os.remove(tag)
for tag in total_tags:
sTag = re.sub("^\.", "", tag)
tag_filename = tag_dir + sTag.lower().replace('.', '-') + '.md'
f = open(tag_filename, 'a')
write_str = '---\nlayout: tagpage\ntitle: \"Tag: ' + tag + '\"\ntag: ' + tag + '\nrobots: noindex\nexclude_from_search: true\ntagline: \'"Creative Active Individuals can only grow up in a society that emphasizes learning instead of teaching." - Chris Alexander\'\n---\n'
f.write(write_str)
f.close()
print("Tags generated, count", total_tags.__len__())
| mit | 1,607,549,321,355,180,300 | 25.924528 | 274 | 0.573931 | false |
Samfox2/motioneye | motioneye/v4l2ctl.py | 1 | 11312 |
# Copyright (c) 2013 Calin Crisan
# This file is part of motionEye.
#
# motionEye is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import fcntl
import logging
import os.path
import re
import stat
import subprocess
import time
import utils
_resolutions_cache = {}
_ctrls_cache = {}
_ctrl_values_cache = {}
_DEV_V4L_BY_ID = '/dev/v4l/by-id/'
def find_v4l2_ctl():
try:
return subprocess.check_output('which v4l2-ctl', shell=True).strip()
except subprocess.CalledProcessError: # not found
return None
def list_devices():
global _resolutions_cache, _ctrls_cache, _ctrl_values_cache
logging.debug('listing v4l2 devices...')
try:
output = ''
started = time.time()
p = subprocess.Popen('v4l2-ctl --list-devices 2>/dev/null', shell=True, stdout=subprocess.PIPE, bufsize=1)
fd = p.stdout.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
while True:
try:
data = p.stdout.read(1024)
if not data:
break
except IOError:
data = ''
time.sleep(0.01)
output += data
if len(output) > 10240:
logging.warn('v4l2-ctl command returned more than 10k of output')
break
if time.time() - started > 3:
logging.warn('v4l2-ctl command ran for more than 3 seconds')
break
except subprocess.CalledProcessError:
logging.debug('failed to list devices (probably no devices installed)')
return []
try:
# try to kill the v4l2-ctl subprocess
p.kill()
except:
pass # nevermind
name = None
devices = []
for line in output.split('\n'):
if line.startswith('\t'):
device = line.strip()
persistent_device = find_persistent_device(device)
devices.append((device, persistent_device, name))
logging.debug('found device %(name)s: %(device)s, %(persistent_device)s' % {
'name': name, 'device': device, 'persistent_device': persistent_device})
else:
name = line.split('(')[0].strip()
# clear the cache
_resolutions_cache = {}
_ctrls_cache = {}
_ctrl_values_cache = {}
return devices
def list_resolutions(device):
global _resolutions_cache
device = utils.make_str(device)
if device in _resolutions_cache:
return _resolutions_cache[device]
logging.debug('listing resolutions of device %(device)s...' % {'device': device})
resolutions = set()
output = ''
started = time.time()
p = subprocess.Popen('v4l2-ctl -d "%(device)s" --list-formats-ext | grep -vi stepwise | grep -oE "[0-9]+x[0-9]+" || true' % {
'device': device}, shell=True, stdout=subprocess.PIPE, bufsize=1)
fd = p.stdout.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
while True:
try:
data = p.stdout.read(1024)
if not data:
break
except IOError:
data = ''
time.sleep(0.01)
output += data
if len(output) > 10240:
logging.warn('v4l2-ctl command returned more than 10k of output')
break
if time.time() - started > 3:
logging.warn('v4l2-ctl command ran for more than 3 seconds')
break
try:
# try to kill the v4l2-ctl subprocess
p.kill()
except:
pass # nevermind
for pair in output.split('\n'):
pair = pair.strip()
if not pair:
continue
width, height = pair.split('x')
width = int(width)
height = int(height)
if (width, height) in resolutions:
continue # duplicate resolution
if width < 96 or height < 96: # some reasonable minimal values
continue
if width % 16 or height % 16: # ignore non-modulo 16 resolutions
continue
resolutions.add((width, height))
logging.debug('found resolution %(width)sx%(height)s for device %(device)s' % {
'device': device, 'width': width, 'height': height})
if not resolutions:
logging.debug('no resolutions found for device %(device)s, using common values' % {'device': device})
# no resolution returned by v4l2-ctl call, add common default resolutions
resolutions = utils.COMMON_RESOLUTIONS
resolutions = list(sorted(resolutions, key=lambda r: (r[0], r[1])))
_resolutions_cache[device] = resolutions
return resolutions
def device_present(device):
device = utils.make_str(device)
try:
st = os.stat(device)
return stat.S_ISCHR(st.st_mode)
except:
return False
def find_persistent_device(device):
device = utils.make_str(device)
try:
devs_by_id = os.listdir(_DEV_V4L_BY_ID)
except OSError:
return device
for p in devs_by_id:
p = os.path.join(_DEV_V4L_BY_ID, p)
if os.path.realpath(p) == device:
return p
return device
def get_brightness(device):
return _get_ctrl(device, 'brightness')
def set_brightness(device, value):
_set_ctrl(device, 'brightness', value)
def get_contrast(device):
return _get_ctrl(device, 'contrast')
def set_contrast(device, value):
_set_ctrl(device, 'contrast', value)
def get_saturation(device):
return _get_ctrl(device, 'saturation')
def set_saturation(device, value):
_set_ctrl(device, 'saturation', value)
def get_hue(device):
return _get_ctrl(device, 'hue')
def set_hue(device, value):
_set_ctrl(device, 'hue', value)
def _get_ctrl(device, control):
global _ctrl_values_cache
device = utils.make_str(device)
if not device_present(device):
return None
if device in _ctrl_values_cache and control in _ctrl_values_cache[device]:
return _ctrl_values_cache[device][control]
controls = _list_ctrls(device)
properties = controls.get(control)
if properties is None:
logging.warn('control %(control)s not found for device %(device)s' % {
'control': control, 'device': device})
return None
value = int(properties['value'])
# adjust the value range
if 'min' in properties and 'max' in properties:
min_value = int(properties['min'])
max_value = int(properties['max'])
value = int(round((value - min_value) * 100.0 / (max_value - min_value)))
else:
logging.warn('min and max values not found for control %(control)s of device %(device)s' % {
'control': control, 'device': device})
logging.debug('control %(control)s of device %(device)s is %(value)s%%' % {
'control': control, 'device': device, 'value': value})
return value
def _set_ctrl(device, control, value):
global _ctrl_values_cache
device = utils.make_str(device)
if not device_present(device):
return
controls = _list_ctrls(device)
properties = controls.get(control)
if properties is None:
logging.warn('control %(control)s not found for device %(device)s' % {
'control': control, 'device': device})
return
_ctrl_values_cache.setdefault(device, {})[control] = value
# adjust the value range
if 'min' in properties and 'max' in properties:
min_value = int(properties['min'])
max_value = int(properties['max'])
value = int(round(min_value + value * (max_value - min_value) / 100.0))
else:
logging.warn('min and max values not found for control %(control)s of device %(device)s' % {
'control': control, 'device': device})
logging.debug('setting control %(control)s of device %(device)s to %(value)s' % {
'control': control, 'device': device, 'value': value})
output = ''
started = time.time()
p = subprocess.Popen('v4l2-ctl -d "%(device)s" --set-ctrl %(control)s=%(value)s' % {
'device': device, 'control': control, 'value': value}, shell=True, stdout=subprocess.PIPE, bufsize=1)
fd = p.stdout.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
while True:
try:
data = p.stdout.read(1024)
if not data:
break
except IOError:
data = ''
time.sleep(0.01)
output += data
if len(output) > 10240:
logging.warn('v4l2-ctl command returned more than 10k of output')
break
if time.time() - started > 3:
logging.warn('v4l2-ctl command ran for more than 3 seconds')
break
try:
# try to kill the v4l2-ctl subprocess
p.kill()
except:
pass # nevermind
def _list_ctrls(device):
global _ctrls_cache
device = utils.make_str(device)
if device in _ctrls_cache:
return _ctrls_cache[device]
output = ''
started = time.time()
p = subprocess.Popen('v4l2-ctl -d "%(device)s" --list-ctrls' % {
'device': device}, shell=True, stdout=subprocess.PIPE, bufsize=1)
fd = p.stdout.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
while True:
try:
data = p.stdout.read(1024)
if not data:
break
except IOError:
data = ''
time.sleep(0.01)
output += data
if len(output) > 10240:
logging.warn('v4l2-ctl command returned more than 10k of output')
break
if time.time() - started > 3:
logging.warn('v4l2-ctl command ran for more than 3 seconds')
break
try:
# try to kill the v4l2-ctl subprocess
p.kill()
except:
pass # nevermind
controls = {}
for line in output.split('\n'):
if not line:
continue
match = re.match('^\s*(\w+)\s+\(\w+\)\s+\:\s*(.+)', line)
if not match:
continue
(control, properties) = match.groups()
properties = dict([v.split('=', 1) for v in properties.split(' ') if v.count('=')])
controls[control] = properties
_ctrls_cache[device] = controls
return controls
| gpl-3.0 | -9,183,610,515,678,452,000 | 26.062201 | 129 | 0.573285 | false |
oshtaier/robottelo | tests/foreman/cli/test_host_collection.py | 1 | 20256 | # -*- encoding: utf-8 -*-
# vim: ts=4 sw=4 expandtab ai
"""Test class for Host Collection CLI"""
from ddt import ddt
from fauxfactory import gen_string
from robottelo.cli.contentview import ContentView
from robottelo.cli.lifecycleenvironment import LifecycleEnvironment
from robottelo.cli.factory import (
CLIFactoryError, make_org, make_host_collection, make_content_view,
make_lifecycle_environment, make_content_host)
from robottelo.cli.hostcollection import HostCollection
from robottelo.common.decorators import data, skip_if_bug_open
from robottelo.test import CLITestCase
@ddt
class TestHostCollection(CLITestCase):
"""Host Collection CLI tests."""
org = None
new_cv = None
promoted_cv = None
new_lifecycle = None
library = None
default_cv = None
def setUp(self): # noqa
"""Tests for Host Collections via Hammer CLI"""
super(TestHostCollection, self).setUp()
if TestHostCollection.org is None:
TestHostCollection.org = make_org(cached=True)
if TestHostCollection.new_lifecycle is None:
TestHostCollection.new_lifecycle = make_lifecycle_environment(
{u'organization-id': TestHostCollection.org['id']},
cached=True)
if TestHostCollection.library is None:
library_result = LifecycleEnvironment.info(
{u'organization-id': TestHostCollection.org['id'],
u'name': u'Library'}
)
TestHostCollection.library = library_result.stdout
if TestHostCollection.default_cv is None:
cv_result = ContentView.info(
{u'organization-id': TestHostCollection.org['id'],
u'name': u'Default Organization View'}
)
TestHostCollection.default_cv = cv_result.stdout
if TestHostCollection.new_cv is None:
TestHostCollection.new_cv = make_content_view(
{u'organization-id': TestHostCollection.org['id']}
)
TestHostCollection.promoted_cv = None
cv_id = TestHostCollection.new_cv['id']
ContentView.publish({u'id': cv_id})
result = ContentView.version_list({u'content-view-id': cv_id})
version_id = result.stdout[0]['id']
promotion = ContentView.version_promote({
u'id': version_id,
u'to-lifecycle-environment-id': (
TestHostCollection.new_lifecycle['id']),
u'organization-id': TestHostCollection.org['id']
})
if promotion.stderr == []:
TestHostCollection.promoted_cv = TestHostCollection.new_cv
def _new_host_collection(self, options=None):
"""Make a host collection and asserts its success"""
if options is None:
options = {}
if not options.get('organization-id', None):
options['organization-id'] = self.org['id']
group = make_host_collection(options)
# Fetch it
result = HostCollection.info(
{
'id': group['id']
}
)
self.assertEqual(
result.return_code,
0,
"Host collection was not found")
self.assertEqual(
len(result.stderr), 0, "No error was expected")
# Return the host collection dictionary
return group
@data(
{'name': gen_string('alpha', 15)},
{'name': gen_string('alphanumeric', 15)},
{'name': gen_string('numeric', 15)},
{'name': gen_string('latin1', 15)},
{'name': gen_string('utf8', 15)},
{'name': gen_string('html', 15)},
)
def test_positive_create_1(self, test_data):
"""@Test: Check if host collection can be created with random names
@Feature: Host Collection
@Assert: Host collection is created and has random name
"""
new_host_col = self._new_host_collection({'name': test_data['name']})
# Assert that name matches data passed
self.assertEqual(
new_host_col['name'],
test_data['name'],
"Names don't match"
)
@data(
{'description': gen_string('alpha', 15)},
{'description': gen_string('alphanumeric', 15)},
{'description': gen_string('numeric', 15)},
{'description': gen_string('latin1', 15)},
{'description': gen_string('utf8', 15)},
{'description': gen_string('html', 15)},
)
def test_positive_create_2(self, test_data):
"""@Test: Check if host collection can be created with random description
@Feature: Host Collection
@Assert: Host collection is created and has random description
"""
new_host_col = self._new_host_collection(
{'description': test_data['description']})
# Assert that description matches data passed
self.assertEqual(
new_host_col['description'],
test_data['description'],
"Descriptions don't match"
)
@data('1', '3', '5', '10', '20')
def test_positive_create_3(self, test_data):
"""@Test: Check if host collection can be created with random limits
@Feature: Host Collection
@Assert: Host collection is created and has random limits
"""
new_host_col = self._new_host_collection(
{'max-content-hosts': test_data})
# Assert that limit matches data passed
self.assertEqual(
new_host_col['max-content-hosts'],
str(test_data),
("Limits don't match '%s' != '%s'" %
(new_host_col['max-content-hosts'], str(test_data)))
)
@data(
{'name': gen_string('alpha', 300)},
{'name': gen_string('alphanumeric', 300)},
{'name': gen_string('numeric', 300)},
{'name': gen_string('latin1', 300)},
{'name': gen_string('utf8', 300)},
{'name': gen_string('html', 300)},
)
def test_negative_create_1(self, test_data):
"""@Test: Check if host collection can be created with random names
@Feature: Host Collection
@Assert: Host collection is created and has random name
"""
with self.assertRaises(Exception):
self._new_host_collection({'name': test_data['name']})
@data(
{'name': gen_string('alpha', 15)},
{'name': gen_string('alphanumeric', 15)},
{'name': gen_string('numeric', 15)},
{'name': gen_string('latin1', 15)},
{'name': gen_string('utf8', 15)},
{'name': gen_string('html', 15)},
)
def test_positive_update_1(self, test_data):
"""@Test: Check if host collection name can be updated
@Feature: Host Collection
@Assert: Host collection is created and name is updated
"""
new_host_col = self._new_host_collection()
# Assert that name does not matches data passed
self.assertNotEqual(
new_host_col['name'],
test_data['name'],
"Names should not match"
)
# Update host collection
result = HostCollection.update(
{
'id': new_host_col['id'],
'organization-id': self.org['id'],
'name': test_data['name']
}
)
self.assertEqual(
result.return_code,
0,
"Host collection was not updated")
self.assertEqual(
len(result.stderr), 0, "No error was expected")
# Fetch it
result = HostCollection.info(
{
'id': new_host_col['id'],
}
)
self.assertEqual(
result.return_code,
0,
"Host collection was not updated")
self.assertEqual(
len(result.stderr), 0, "No error was expected")
# Assert that name matches new value
self.assertIsNotNone(
result.stdout.get('name', None),
"The name field was not returned"
)
self.assertEqual(
result.stdout['name'],
test_data['name'],
"Names should match"
)
# Assert that name does not match original value
self.assertNotEqual(
new_host_col['name'],
result.stdout['name'],
"Names should not match"
)
@skip_if_bug_open('bugzilla', 1171669)
@data(
{'description': gen_string('alpha', 15)},
{'description': gen_string('alphanumeric', 15)},
{'description': gen_string('numeric', 15)},
{'description': gen_string('latin1', 15)},
{'description': gen_string('utf8', 15)},
{'description': gen_string('html', 15)},
)
def test_positive_update_2(self, test_data):
"""@Test: Check if host collection description can be updated
@Feature: Host Collection
@Assert: Host collection is created and description is updated
@BZ: 1171669
"""
new_host_col = self._new_host_collection()
# Assert that description does not match data passed
self.assertNotEqual(
new_host_col['description'],
test_data['description'],
"Descriptions should not match"
)
# Update sync plan
result = HostCollection.update(
{
'id': new_host_col['id'],
'organization-id': self.org['id'],
'description': test_data['description']
}
)
self.assertEqual(
result.return_code,
0,
"Host collection was not updated")
self.assertEqual(
len(result.stderr), 0, "No error was expected")
# Fetch it
result = HostCollection.info(
{
'id': new_host_col['id'],
}
)
self.assertEqual(
result.return_code,
0,
"Host collection was not updated")
self.assertEqual(
len(result.stderr), 0, "No error was expected")
# Assert that description matches new value
self.assertIsNotNone(
result.stdout.get('description', None),
"The description field was not returned"
)
self.assertEqual(
result.stdout['description'],
test_data['description'],
"Descriptions should match"
)
# Assert that description does not matches original value
self.assertNotEqual(
new_host_col['description'],
result.stdout['description'],
"Descriptions should not match"
)
@skip_if_bug_open('bugzilla', 1171669)
@data('3', '6', '9', '12', '15', '17', '19')
def test_positive_update_3(self, test_data):
"""@Test: Check if host collection limits be updated
@Feature: Host Collection
@Assert: Host collection limits is updated
@BZ: 1171669
"""
new_host_col = self._new_host_collection()
# Update sync interval
result = HostCollection.update(
{
'id': new_host_col['id'],
'organization-id': self.org['id'],
'max-content-hosts': test_data
}
)
self.assertEqual(
result.return_code,
0,
"Host collection was not updated")
self.assertEqual(
len(result.stderr), 0, "No error was expected")
# Fetch it
result = HostCollection.info(
{
'id': new_host_col['id'],
}
)
self.assertEqual(
result.return_code,
0,
"Host collection was not updated")
self.assertEqual(
len(result.stderr), 0, "No error was expected")
# Assert that limit was updated
self.assertEqual(
result.stdout['max-content-hosts'],
test_data,
"Limits don't match"
)
self.assertNotEqual(
new_host_col['max-content-hosts'],
result.stdout['max-content-hosts'],
"Limits don't match"
)
@data(
{'name': gen_string('alpha', 15)},
{'name': gen_string('alphanumeric', 15)},
{'name': gen_string('numeric', 15)},
{'name': gen_string('latin1', 15)},
{'name': gen_string('utf8', 15)},
{'name': gen_string('html', 15)},
)
def test_positive_delete_1(self, test_data):
"""@Test: Check if host collection can be created and deleted
@Feature: Host Collection
@Assert: Host collection is created and then deleted
"""
new_host_col = self._new_host_collection({'name': test_data['name']})
# Assert that name matches data passed
self.assertEqual(
new_host_col['name'],
test_data['name'],
"Names don't match"
)
# Delete it
result = HostCollection.delete(
{'id': new_host_col['id'],
'organization-id': self.org['id']})
self.assertEqual(
result.return_code,
0,
"Host collection was not deleted")
self.assertEqual(
len(result.stderr), 0, "No error was expected")
# Fetch it
result = HostCollection.info(
{
'id': new_host_col['id'],
}
)
self.assertNotEqual(
result.return_code,
0,
"Host collection should not be found"
)
self.assertGreater(
len(result.stderr),
0,
"Expected an error here"
)
def test_add_content_host(self):
"""@Test: Check if content host can be added to host collection
@Feature: Host Collection
@Assert: Host collection is created and content-host is added
"""
host_col_name = gen_string('alpha', 15)
content_host_name = gen_string('alpha', 15)
try:
new_host_col = self._new_host_collection({'name': host_col_name})
new_system = make_content_host({
u'name': content_host_name,
u'organization-id': self.org['id'],
u'content-view-id': self.default_cv['id'],
u'lifecycle-environment-id': self.library['id'],
})
except CLIFactoryError as err:
self.fail(err)
result = HostCollection.info({
u'id': new_host_col['id'],
u'organization-id': self.org['id']
})
no_of_content_host = result.stdout['total-content-hosts']
result = HostCollection.add_content_host({
u'id': new_host_col['id'],
u'organization-id': self.org['id'],
u'content-host-ids': new_system['id']
})
self.assertEqual(result.return_code, 0,
"Content Host not added to host collection")
self.assertEqual(len(result.stderr), 0,
"No error was expected")
result = HostCollection.info({
u'id': new_host_col['id'],
u'organization-id': self.org['id']
})
self.assertEqual(
result.return_code, 0, 'Failed to get info for host collection')
self.assertEqual(
len(result.stderr), 0, 'There should not be an error here')
self.assertGreater(result.stdout['total-content-hosts'],
no_of_content_host,
"There should not be an exception here")
def test_remove_content_host(self):
"""@Test: Check if content host can be removed from host collection
@Feature: Host Collection
@Assert: Host collection is created and content-host is removed
"""
host_col_name = gen_string('alpha', 15)
content_host_name = gen_string('alpha', 15)
try:
new_host_col = self._new_host_collection({'name': host_col_name})
new_system = make_content_host({
u'name': content_host_name,
u'organization-id': self.org['id'],
u'content-view-id': self.default_cv['id'],
u'lifecycle-environment-id': self.library['id'],
})
except CLIFactoryError as err:
self.fail(err)
result = HostCollection.add_content_host({
u'id': new_host_col['id'],
u'organization-id': self.org['id'],
u'content-host-ids': new_system['id']
})
self.assertEqual(result.return_code, 0,
"Content Host not added to host collection")
self.assertEqual(len(result.stderr), 0,
"No error was expected")
result = HostCollection.info({
u'id': new_host_col['id'],
u'organization-id': self.org['id']
})
no_of_content_host = result.stdout['total-content-hosts']
result = HostCollection.remove_content_host({
u'id': new_host_col['id'],
u'organization-id': self.org['id'],
u'content-host-ids': new_system['id']
})
self.assertEqual(result.return_code, 0,
"Content Host not removed host collection")
self.assertEqual(len(result.stderr), 0,
"No error was expected")
result = HostCollection.info({
u'id': new_host_col['id'],
u'organization-id': self.org['id']
})
self.assertEqual(
result.return_code, 0, 'Failed to get info for host collection')
self.assertEqual(
len(result.stderr), 0, 'There should not be an error here')
self.assertGreater(no_of_content_host,
result.stdout['total-content-hosts'],
"There should not be an exception here")
def test_content_hosts(self):
"""@Test: Check if content hosts added to host collection is listed
@Feature: Host Collection
@Assert: Content-host added to host-collection is listed
"""
host_col_name = gen_string('alpha', 15)
content_host_name = gen_string('alpha', 15)
try:
new_host_col = self._new_host_collection({'name': host_col_name})
new_system = make_content_host({
u'name': content_host_name,
u'organization-id': self.org['id'],
u'content-view-id': self.default_cv['id'],
u'lifecycle-environment-id': self.library['id'],
})
except CLIFactoryError as err:
self.fail(err)
no_of_content_host = new_host_col['total-content-hosts']
result = HostCollection.add_content_host({
u'id': new_host_col['id'],
u'organization-id': self.org['id'],
u'content-host-ids': new_system['id']
})
self.assertEqual(result.return_code, 0,
"Content Host not added to host collection")
self.assertEqual(len(result.stderr), 0,
"No error was expected")
result = HostCollection.info({
u'id': new_host_col['id'],
u'organization-id': self.org['id']
})
self.assertEqual(
result.return_code, 0, 'Failed to get info for host collection')
self.assertEqual(
len(result.stderr), 0, 'There should not be an error here')
self.assertGreater(result.stdout['total-content-hosts'],
no_of_content_host,
"There should not be an exception here")
result = HostCollection.content_hosts({
u'name': host_col_name,
u'organization-id': self.org['id']
})
self.assertEqual(
result.return_code, 0, 'Failed to get list of content-host')
self.assertEqual(
len(result.stderr), 0, 'There should not be an error here')
self.assertEqual(
new_system['id'], result.stdout[0]['id'],
'There should not be an error here')
| gpl-3.0 | 1,622,137,752,100,094,200 | 32.370675 | 81 | 0.54063 | false |
Atush/py_learning | generator/contact.py | 1 | 1799 | from model.contact import Contact
import random
import string
import os.path
import jsonpickle
import getopt
import sys
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of contacts", 'file'])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "data/contacts.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits + " "*10
return prefix + "".join(random.choice(symbols) for i in range(random.randrange(maxlen)))
def random_phone(maxlen):
symbols = string.digits
return "".join(random.choice(symbols) for i in range(random.randrange(maxlen)))
def random_email(maxlen_username, maxlen_domain):
symbols = string.ascii_letters
username = "".join(random.choice(symbols) for i in range(random.randrange(maxlen_username)))
domain = "".join(random.choice(symbols) for i in range(random.randrange(maxlen_domain))) + "." + "".join(random.choice(string.ascii_letters) for i in range(random.randrange(4)))
return username + "@" + domain
testdata = [Contact(firstname="", lastname="", address="", homephone="", mobile="", workphone="", email="", email2="", email3="", phone2="")] + [Contact(firstname = random_string("FN", 10), lastname=random_string("LN", 10), address=random_string("address", 20), homephone=random_phone(10), mobile=random_phone(10), workphone=random_phone(10), email=random_email(15,5), email2=random_email(15,5), email3=random_email(15,5), phone2=random_phone(10)) for i in range(5)]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent = 2)
out.write(jsonpickle.encode(testdata)) | apache-2.0 | -7,085,304,235,291,700,000 | 39.909091 | 466 | 0.679266 | false |
yoshizow/global-pygments-plugin | test/test_global.py | 1 | 1089 | #
# Copyright (c) 2014
# Yoshitaro Makise
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import unittest
sys.path.append('..')
from pygments_parser import *
class GlobalTestCase(unittest.TestCase):
def test_parse_langmap(self):
langmap = parse_langmap('Ruby:.rb,C++:.cc.hh')
self.assertEqual(langmap, {'.rb': 'Ruby',
'.cc': 'C++',
'.hh': 'C++'})
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 4,583,215,890,235,020,000 | 33.03125 | 71 | 0.660239 | false |
Debian/dput-ng | dput/uploader.py | 1 | 10756 | # -*- coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# Copyright (c) 2012 dput authors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""
Uploader implementation. The code in here surrounds the uploaders'
implementations, and properly invokes the uploader with correct
arguments, etc.
"""
import os
import abc
import sys
import tempfile
import shutil
from contextlib import contextmanager
import dput.profile
from dput.changes import parse_changes_file
from dput.core import logger, _write_upload_log
from dput.hook import run_pre_hooks, run_post_hooks
from dput.util import (run_command, get_obj)
from dput.overrides import (make_delayed_upload, force_passive_ftp_upload)
from dput.exceptions import (DputConfigurationError, DputError,
UploadException)
class AbstractUploader(object):
"""
Abstract base class for all concrete uploader implementations.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, profile):
self._config = profile
interface = 'cli'
if 'interface' in profile:
interface = profile['interface']
logger.trace("Using interface %s" % (interface))
interface_obj = get_obj('interfaces', interface)
if interface_obj is None:
raise DputConfigurationError("No such interface: `%s'" % (
interface
))
self.interface = interface_obj()
self.interface.initialize()
def _pre_hook(self):
self._run_hook("pre_upload_command")
def _post_hook(self):
self._run_hook("post_upload_command")
def _run_hook(self, hook):
if hook in self._config and self._config[hook] != "":
cmd = self._config[hook]
(output, stderr, ret) = run_command(cmd)
if ret == -1:
if not os.path.exists(cmd):
logger.warning(
"Error: You've set a hook (%s) to run (`%s`), "
"but it can't be found (and doesn't appear to exist)."
" Please verify the path and correct it." % (
hook,
self._config[hook]
)
)
return
sys.stdout.write(output) # XXX: Fixme
sys.stdout.flush()
if ret != 0:
raise DputError(
"Command `%s' returned an error: %s [err=%d]" % (
self._config[hook],
stderr,
ret
)
)
def __del__(self):
self.interface.shutdown()
def upload_write_error(self, e):
"""
.. warning::
don't call this.
please don't call this
"""
# XXX: Refactor this, please god, refactor this.
logger.warning("""Upload permissions error
You either don't have the rights to upload a file, or, if this is on
ftp-master, you may have tried to overwrite a file already on the server.
Continuing anyway in case you want to recover from an incomplete upload.
No file was uploaded, however.""")
@abc.abstractmethod
def initialize(self, **kwargs):
"""
Setup the things needed to upload a file. Usually this means creating
a network connection & authenticating.
"""
pass
@abc.abstractmethod
def upload_file(self, filename, upload_filename=None):
"""
Upload a single file (``filename``) to the server.
"""
pass
@abc.abstractmethod
def shutdown(self):
"""
Disconnect and shutdown.
"""
pass
@contextmanager
def uploader(uploader_method, profile, simulate=True):
"""
Context-managed uploader implementation.
Invoke sorta like::
with uploader() as obj:
obj.upload_file('filename')
This will automatically call that object's
:meth:`dput.uploader.AbstractUploader.initialize`,
pre-hook, yield the object, call the post hook and invoke it's
:meth:`dput.uploader.AbstractUploader.shutdown`.
"""
cls = get_obj('uploaders', uploader_method)
if not cls:
logger.error(
"Failed to resolve method %s to an uploader class" % (
uploader_method
)
)
raise DputConfigurationError(
"Failed to resolve method %s to an uploader class" % (
uploader_method
)
)
obj = cls(profile)
if not simulate or simulate >= 2:
obj.initialize()
obj._pre_hook()
try:
yield obj
finally:
if not simulate:
obj._post_hook()
if not simulate or simulate >= 2:
obj.shutdown()
def determine_logfile(changes, conf, args):
"""
Figure out what logfile to write to. This is mostly an internal
implementation. Returns the file to log to, given a changes and
profile.
"""
# dak requires '<package>_<version>_<[a-zA-Z0-9+-]+>.changes'
# XXX: Correct --force behavior
logfile = changes.get_changes_file() # XXX: Check for existing one
xtns = [".changes", ".dud"]
for xtn in xtns:
if logfile.endswith(xtn):
logfile = "%s.%s.upload" % (logfile[:-len(xtn)], conf['name'])
break
else:
raise UploadException("File %s does not look like a .changes file" % (
changes.get_filename()
))
if (
os.access(logfile, os.R_OK) and
os.stat(logfile).st_size > 0 and
not args.force
):
raise UploadException("""Package %s was already uploaded to %s
If you want to upload nonetheless, use --force or remove %s""" % (
changes.get_package_name(),
conf['name'],
logfile
))
logger.debug("Writing log to %s" % (logfile))
return logfile
def should_write_logfile(args):
return not args.simulate and not args.check_only and not args.no_upload_log
def check_modules(profile):
if 'hooks' in profile:
for hook in profile['hooks']:
obj = get_obj('hooks', hook)
if obj is None:
raise DputConfigurationError(
"Error: no such hook '%s'" % (
hook
)
)
class DputNamespace(dict):
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, val):
self[key] = val
def invoke_dput_simple(changes, host, **kwargs):
changes = parse_changes_file(changes, os.path.dirname(changes))
# XXX: Abspath???
config = {
"host": host,
"debug": False,
"config": None,
"force": False,
"simulate": False,
"check_only": None,
"no_upload_log": None,
"full_upload_log": None,
"delayed": None,
"passive": None,
}
config.update(kwargs)
config = DputNamespace(config)
return invoke_dput(changes, config)
def invoke_dput(changes, args):
"""
.. warning::
This method may change names. Please use it via :func:`dput.upload`.
also, please don't depend on args, that's likely to change shortly.
Given a changes file ``changes``, and arguments to dput ``args``,
upload a package to the archive that makes sense.
"""
profile = dput.profile.load_profile(args.host)
check_modules(profile)
fqdn = None
if "fqdn" in profile:
fqdn = profile['fqdn']
else:
fqdn = profile['name']
logfile = determine_logfile(changes, profile, args)
tmp_logfile = tempfile.NamedTemporaryFile()
if should_write_logfile(args):
full_upload_log = profile["full_upload_log"]
if args.full_upload_log:
full_upload_log = args.full_upload_log
_write_upload_log(tmp_logfile.name, full_upload_log)
if "unchecked" in args and args.unchecked:
profile['allow_unsigned_uploads'] = True
if args.delayed is not None:
make_delayed_upload(profile, args.delayed)
if args.simulate:
logger.warning("Not uploading for real - dry run")
if args.passive:
force_passive_ftp_upload(profile)
logger.info("Uploading %s using %s to %s (host: %s; directory: %s)" % (
changes.get_package_name(),
profile['method'],
profile['name'],
fqdn,
profile['incoming']
))
if changes.get_changes_file().endswith(".changes"):
if 'hooks' in profile:
run_pre_hooks(changes, profile)
else:
logger.trace(profile)
logger.warning("No hooks defined in the profile. "
"Not checking upload.")
# check only is a special case of -s
if args.check_only:
args.simulate = 1
with uploader(profile['method'], profile,
simulate=args.simulate) as obj:
if args.check_only:
logger.info("Package %s passes all checks" % (
changes.get_package_name()
))
return
if args.no_upload_log:
logger.info("Not writing upload log upon request")
files = changes.get_files() + [changes.get_changes_file()]
for path in files:
logger.info("Uploading %s%s" % (
os.path.basename(path),
" (simulation)" if args.simulate else ""
))
if not args.simulate:
obj.upload_file(path)
if args.simulate:
return
if changes.get_changes_file().endswith(".changes"):
if 'hooks' in profile:
run_post_hooks(changes, profile)
else:
logger.trace(profile)
logger.warning("No hooks defined in the profile. "
"Not post-processing upload.")
if should_write_logfile(args):
tmp_logfile.flush()
shutil.copy(tmp_logfile.name, logfile)
#print(tmp_logfile.name)
tmp_logfile.close()
| gpl-2.0 | 3,489,147,929,488,707,600 | 28.961003 | 79 | 0.577538 | false |
BBN-Q/Auspex | src/auspex/instruments/hall_probe.py | 1 | 1712 | # Copyright 2016 Raytheon BBN Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
import numpy as np
class HallProbe(object):
"""Simple wrapper for converting Hall probe voltage measurements to
actual fields values."""
def __init__(self, calibration_file, supply_voltage_method, readout_voltage_method):
super(HallProbe, self).__init__()
self.name = "Lakeshore Hall Probe"
with open(calibration_file) as cf:
lines = [l for l in cf.readlines() if l[0] != '#']
if len(lines) != 2:
raise Exception("Invalid Hall probe calibration file, must contain two lines.")
try:
self.output_voltage = float(lines[0])
except:
raise TypeError("Could not convert output voltage to floating point value.")
try:
poly_coeffs = np.array(lines[1].split(), dtype=np.float)
self.field_vs_voltage = np.poly1d(poly_coeffs)
except:
raise TypeError("Could not convert calibration coefficients into list of floats")
self.getter = readout_voltage_method
self.setter = supply_voltage_method
self.setter(self.output_voltage)
@property
def field(self):
return self.get_field()
def get_field(self):
return self.field_vs_voltage( self.getter() )
def __repr__(self):
name = "Mystery Instrument" if self.name == "" else self.name
return "{} @ {}".format(name, self.resource_name)
| apache-2.0 | -244,677,329,928,552,860 | 37.909091 | 97 | 0.620327 | false |
avastjohn/maventy_new | registration/urls.py | 1 | 3567 | """
URLConf for Django user registration and authentication.
If the default behavior of the registration views is acceptable to
you, simply use a line like this in your root URLConf to set up the
default URLs for registration::
(r'^accounts/', include('registration.urls')),
This will also automatically set up the views in
``django.contrib.auth`` at sensible default locations.
But if you'd like to customize the behavior (e.g., by passing extra
arguments to the various views) or split up the URLs, feel free to set
up your own URL patterns for these views instead. If you do, it's a
good idea to use the names ``registration_activate``,
``registration_complete`` and ``registration_register`` for the
various steps of the user-signup process.
"""
import functools
from django.conf.urls.defaults import *
from django.views.generic.simple import direct_to_template
from django.contrib.auth import views as auth_views
from registration.views import activate
from registration.views import register
from registration.forms import RegistrationFormUniqueEmailWithCaptcha
# register2 is register with the form_class given
register2 = functools.partial(register,
form_class = RegistrationFormUniqueEmailWithCaptcha)
urlpatterns = patterns('',
# Activation keys get matched by \w+ instead of the more specific
# [a-fA-F0-9]{40} because a bad activation key should still get to the view;
# that way it can return a sensible "invalid key" message instead of a
# confusing 404.
url(r'^activate/(?P<activation_key>\w+)/$',
activate,
name='registration_activate'),
url(r'^login/$',
auth_views.login,
{'template_name': 'registration/login.html'},
name='auth_login'),
url(r'^logout/$',
auth_views.logout,
name='auth_logout'),
url(r'^password/change/$',
auth_views.password_change,
name='auth_password_change'),
url(r'^password/change/done/$',
auth_views.password_change_done,
name='auth_password_change_done'),
url(r'^password/reset/$',
auth_views.password_reset,
name='auth_password_reset'),
url(r'^password/reset/confirm/(?P<uidb36>.+)/(?P<token>.+)/$',
auth_views.password_reset_confirm,
name='auth_password_reset_confirm'),
url(r'^password/reset/complete/$',
auth_views.password_reset_complete,
name='auth_password_reset_complete'),
url(r'^password/reset/done/$',
auth_views.password_reset_done,
name='auth_password_reset_done'),
url(r'^register/$',
register2,
name='registration_register'),
url(r'^register/complete/$',
direct_to_template,
{'template': 'registration/registration_complete.html'},
name='registration_complete'),
)
| bsd-3-clause | -5,240,740,255,448,144,000 | 45.324675 | 99 | 0.540791 | false |
France-ioi/taskgrader | tools/testSelect/zipImport.py | 1 | 4094 | #!/usr/bin/env python3
# Copyright (c) 2016 France-IOI, MIT license
#
# http://opensource.org/licenses/MIT
# This tool imports solutions and test cases from a zip file.
import os, subprocess, sys, zipfile
SELFDIR = os.path.normpath(os.path.dirname(os.path.abspath(__file__)))
CFG_TESTSELECT = os.path.join(SELFDIR, 'testSelect.py')
CFG_LANGEXTS = {
'.c': 'c',
'.cpp': 'cpp',
'.py': 'python',
'.py2': 'python2',
'.py3': 'python3',
'.ml': 'ocaml',
'.java': 'java',
'.js': 'nodejs',
'.jvs': 'javascool',
'.pas': 'pascal',
'.sh': 'shell',
'': 'sh'
}
def unzip(zipFile, testsPath, solsPath):
"""Unzip a zip export into testsPath and solsPath."""
newTests = []
newSols = []
# Explore list of files in the zip archive
for name in zipFile.namelist():
folder, filename = os.path.split(name)
if folder:
# Rename a/b/c.ext to b-c.ext
newFilename = '%s-%s' % (os.path.split(folder)[1], filename)
else:
newFilename = filename
r, ext = os.path.splitext(newFilename)
# Check type of file from extension
if ext == '.in':
newTestPath = os.path.join(testsPath, newFilename)
newTests.append(newTestPath)
newFile = open(newTestPath, 'wb')
elif ext in ['.c', '.cpp', '.py', '.py2', '.py3', '.ml', '.pas', '.js', '.java', '.jvs', '.sh']:
newSolPath = os.path.join(solsPath, newFilename)
newSols.append(newSolPath)
newFile = open(newSolPath, 'wb')
else:
# Not a test nor solution
continue
# Extract file directly to target path
newFile.write(zipFile.open(name).read())
newFile.close()
return (newTests, newSols)
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Please specify a zip file to import.")
sys.exit(1)
if not (os.path.isfile('taskSettings.json') or os.path.isfile('testSelect.json') or os.path.isdir('tests')):
print("Current folder isn't a task. Aborting.")
sys.exit(1)
try:
zipFile = zipfile.ZipFile(sys.argv[1])
except:
print("Unable to open zip file '%s'." % sys.argv[1])
sys.exit(1)
# Paths to store the tests and solutions in
testsPath = 'tests/importedtests/'
solsPath = 'tests/importedsols/'
try:
os.makedirs(testsPath)
except:
pass
try:
os.makedirs(solsPath)
except:
pass
print("*** Extracting files from zip...")
newTests, newSols = unzip(zipFile, testsPath, solsPath)
print("Extracted %d test cases, %d solutions." % (len(newTests), len(newSols)))
# Import into testSelect
print("\n*** Importing into testSelect...")
if len(newTests) > 0:
subprocess.check_call([CFG_TESTSELECT, 'addtest'] + newTests)
if len(newSols) > 0:
# Fetch language for each solution
# We optimize the number of calls to testSelect by grouping solutions for each language
solLangs = {}
for sol in newSols:
r, ext = os.path.splitext(sol)
try:
lang = CFG_LANGEXTS[ext]
except:
print("""
Warning: Couldn't detect language for `%s`.
Please import manually with the command:
testSelect.py addsol -l [LANG] %s""" % (sol, sol))
continue
if lang in solLangs:
solLangs[lang].append(sol)
else:
solLangs[lang] = [sol]
# Launch testSelect for each language/solutions
for lang in solLangs.keys():
subprocess.check_call([CFG_TESTSELECT, 'addsol', '-l', lang] + solLangs[lang])
print("\n*** Computing new coverage information...")
subprocess.check_call([CFG_TESTSELECT, 'compute'])
print("\n*** Selecting tests...")
subprocess.check_call([CFG_TESTSELECT, 'compute'])
print("""
All done!
Use `testSelect.py serve` to see current solutions/tests coverage,
and `testSelect.py export` to export selected tests into the task.""")
| mit | 5,931,554,227,149,325,000 | 30.736434 | 112 | 0.581094 | false |
chenders/deadonfilm | app/deadonfilm.py | 1 | 3643 | import imdb
import json
import os
import logging
from logging.handlers import RotatingFileHandler
from urllib.parse import urlparse
from flask import (
Flask,
redirect,
make_response,
request,
send_from_directory,
render_template
)
import psycopg2.extras
url = urlparse(os.environ.get('IMDB_DB'))
insecure_redirect = os.environ.get('SECURE_REDIRECT_URL', False)
app = Flask(__name__, root_path='./')
i = imdb.IMDb()
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
)
conn.autocommit = True
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
@app.before_first_request
def setup_logging():
logger = RotatingFileHandler('app/logs/deadonfilm.log', maxBytes=1000000, backupCount=2)
logger = logging.getLogger('deadonfilm')
logger.setLevel(logging.DEBUG)
app.logger.addHandler(logger)
app.logger.setLevel(logging.DEBUG)
@app.route('/')
def index():
if insecure_redirect and not request.is_secure:
return redirect(insecure_redirect, code=301)
return render_template('index.html')
@app.route('/search/')
def search():
"""
Find movie by title search (using IMDb API). Query argument ``q``.
"""
app.logger.info('Searching for %s' % request.args.get('q'))
movie = request.args.get('q')
m = i.search_movie(movie)
resp = make_response(json.dumps(
[{
'value': mt['long imdb title'],
'id': mt.getID()
} for mt in m if mt.get('kind') == 'movie']))
resp.headers['Content-Type'] = 'application/json'
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
@app.route('/died/', methods=['POST'])
def died():
"""
Who died from the movie with the given IMDb id?
"""
movie_id = request.form['id']
movie = i.get_movie(movie_id, info=["full credits"])
if movie is None:
resp = make_response("Movie not found: {}".format(movie_id, 404))
else:
actors = movie.data['cast']
actors_by_id = {}
for actor in actors:
actors_by_id[int(actor.getID())] = actor
cursor.execute("""SELECT
* from name_basics WHERE
person_id IN %s AND
death_year NOTNULL
""", (tuple(actors_by_id.keys()),))
pastos = []
for person in cursor.fetchall():
person_id = person['person_id']
character = str(actors_by_id[person_id].currentRole)
pastos.append({
'person_id': person['person_id'],
'birth': person['birth_year'],
'death': person['death_year'],
'character': character,
'name': person['primary_name']
})
pastos = sorted(pastos, key=lambda pasto: pasto['death'], reverse=True)
resp = make_response(json.dumps(pastos))
resp.headers['Content-Type'] = 'application/json'
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
if __name__ == '__main__':
@app.route('/static/js/<path:path>')
def send_js(path):
return send_from_directory('./static/js', path)
@app.route('/static/css/<path:path>')
def send_css(path):
return send_from_directory('./static/css', path)
@app.route('/static/images/<path:path>')
def send_img(path):
return send_from_directory('./static/images', path)
@app.route('/dist/<path:path>')
def send_dist(path):
return send_from_directory('./dist', path)
app.run()
| mit | -4,903,017,006,393,662,000 | 27.912698 | 92 | 0.59429 | false |
admk/soap | soap/parser/program.py | 1 | 1972 | import re
import sh
from soap.datatype import type_cast
from soap.expression import is_variable
from soap.program import ProgramFlow, PragmaInputFlow, PragmaOutputFlow
from soap.parser.common import _lift_child, _lift_dontcare, CommonVisitor
from soap.parser.expression import DeclarationVisitor, ExpressionVisitor
from soap.parser.grammar import compiled_grammars
from soap.parser.statement import StatementVisitor
class PragmaVisitor(object):
def _visit_comma_seperated_list(self, node, children):
item, comma_item_list = children
return [item] + [each for _, each in comma_item_list]
def visit_pragma_input_statement(self, node, children):
pragma_lit, input_lit, input_list = children
return PragmaInputFlow(input_list)
def visit_pragma_output_statement(self, node, children):
pragma_lit, output_lit, output_list = children
return PragmaOutputFlow(output_list)
def visit_input_assign_expr(self, node, children):
variable, _, number = children
return variable, number
def visit_input_expr(self, node, children):
child = _lift_child(self, node, children)
if not is_variable(child):
return child
return child, type_cast(child.dtype, top=True)
visit_input_list = visit_output_list = _visit_comma_seperated_list
visit_input = visit_output = visit_pragma = _lift_dontcare
class _ProgramVisitor(
CommonVisitor, DeclarationVisitor, ExpressionVisitor,
StatementVisitor, PragmaVisitor):
grammar = compiled_grammars['statement']
def _preprocess(text):
text = re.sub(r'#\s*pragma', '__pragma', text)
text = sh.cpp('-E', '-P', _in=text).stdout.decode('utf-8')
text = re.sub(r'__pragma', '#pragma', text)
return text
def parse(program, decl=None):
decl = decl or {}
visitor = _ProgramVisitor(decl)
program = _preprocess(program)
flow = visitor.parse(program)
return ProgramFlow(flow)
| mit | 7,840,282,045,129,042,000 | 32.423729 | 73 | 0.698783 | false |
beeftornado/sentry | src/sentry/templatetags/sentry_avatars.py | 2 | 2789 | from __future__ import absolute_import
from django import template
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from six.moves.urllib.parse import urlencode
from sentry.models import User, UserAvatar
from sentry.utils.avatar import get_email_avatar, get_gravatar_url, get_letter_avatar
register = template.Library()
# Adapted from http://en.gravatar.com/site/implement/images/django/
# The "mm" default is for the grey, "mystery man" icon. See:
# http://en.gravatar.com/site/implement/images/
@register.simple_tag(takes_context=True)
def gravatar_url(context, email, size, default="mm"):
return get_gravatar_url(email, size, default)
@register.simple_tag(takes_context=True)
def letter_avatar_svg(context, display_name, identifier, size=None):
return mark_safe(get_letter_avatar(display_name, identifier, size=size))
@register.simple_tag(takes_context=True)
def profile_photo_url(context, user_id, size=None):
try:
avatar = UserAvatar.objects.get_from_cache(user=user_id)
except UserAvatar.DoesNotExist:
return
url = reverse("sentry-user-avatar-url", args=[avatar.ident])
if size:
url += "?" + urlencode({"s": size})
return settings.SENTRY_URL_PREFIX + url
# Don't use this in any situations where you're rendering more
# than 1-2 avatars. It will make a request for every user!
@register.simple_tag(takes_context=True)
def email_avatar(context, display_name, identifier, size=None, try_gravatar=True):
return mark_safe(get_email_avatar(display_name, identifier, size, try_gravatar))
@register.inclusion_tag("sentry/partial/avatar.html")
def avatar(user, size=36):
# user can be User or OrganizationMember
if isinstance(user, User):
user_id = user.id
email = user.email
else:
user_id = user.user_id
email = user.email
if user_id:
email = user.user.email
return {
"email": email,
"user_id": user_id,
"size": size,
"avatar_type": user.get_avatar_type(),
"display_name": user.get_display_name(),
"label": user.get_label(),
}
@register.inclusion_tag("sentry/partial/avatar.html")
def avatar_for_email(user, size=36):
# user can be User or OrganizationMember
if isinstance(user, User):
user_id = user.id
email = user.email
else:
user_id = user.user_id
email = user.email
if user_id:
email = user.user.email
return {
"for_email": True,
"email": email,
"user_id": user_id,
"size": size,
"avatar_type": user.get_avatar_type(),
"display_name": user.get_display_name(),
"label": user.get_label(),
}
| bsd-3-clause | 8,308,949,113,032,601,000 | 31.057471 | 85 | 0.66583 | false |
zfrenchee/pandas | pandas/tests/series/test_alter_axes.py | 1 | 9506 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytest
from datetime import datetime
import numpy as np
import pandas as pd
from pandas import Index, Series
from pandas.core.index import MultiIndex, RangeIndex
from pandas.compat import lrange, range, zip
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesAlterAxes(TestData):
def test_setindex(self):
# wrong type
series = self.series.copy()
pytest.raises(TypeError, setattr, series, 'index', None)
# wrong length
series = self.series.copy()
pytest.raises(Exception, setattr, series, 'index',
np.arange(len(series) - 1))
# works
series = self.series.copy()
series.index = np.arange(len(series))
assert isinstance(series.index, Index)
def test_rename(self):
renamer = lambda x: x.strftime('%Y%m%d')
renamed = self.ts.rename(renamer)
assert renamed.index[0] == renamer(self.ts.index[0])
# dict
rename_dict = dict(zip(self.ts.index, renamed.index))
renamed2 = self.ts.rename(rename_dict)
assert_series_equal(renamed, renamed2)
# partial dict
s = Series(np.arange(4), index=['a', 'b', 'c', 'd'], dtype='int64')
renamed = s.rename({'b': 'foo', 'd': 'bar'})
tm.assert_index_equal(renamed.index, Index(['a', 'foo', 'c', 'bar']))
# index with name
renamer = Series(np.arange(4),
index=Index(['a', 'b', 'c', 'd'], name='name'),
dtype='int64')
renamed = renamer.rename({})
assert renamed.index.name == renamer.index.name
def test_rename_by_series(self):
s = Series(range(5), name='foo')
renamer = Series({1: 10, 2: 20})
result = s.rename(renamer)
expected = Series(range(5), index=[0, 10, 20, 3, 4], name='foo')
tm.assert_series_equal(result, expected)
def test_rename_set_name(self):
s = Series(range(4), index=list('abcd'))
for name in ['foo', 123, 123., datetime(2001, 11, 11), ('foo',)]:
result = s.rename(name)
assert result.name == name
tm.assert_numpy_array_equal(result.index.values, s.index.values)
assert s.name is None
def test_rename_set_name_inplace(self):
s = Series(range(3), index=list('abc'))
for name in ['foo', 123, 123., datetime(2001, 11, 11), ('foo',)]:
s.rename(name, inplace=True)
assert s.name == name
exp = np.array(['a', 'b', 'c'], dtype=np.object_)
tm.assert_numpy_array_equal(s.index.values, exp)
def test_set_name_attribute(self):
s = Series([1, 2, 3])
s2 = Series([1, 2, 3], name='bar')
for name in [7, 7., 'name', datetime(2001, 1, 1), (1,), u"\u05D0"]:
s.name = name
assert s.name == name
s2.name = name
assert s2.name == name
def test_set_name(self):
s = Series([1, 2, 3])
s2 = s._set_name('foo')
assert s2.name == 'foo'
assert s.name is None
assert s is not s2
def test_rename_inplace(self):
renamer = lambda x: x.strftime('%Y%m%d')
expected = renamer(self.ts.index[0])
self.ts.rename(renamer, inplace=True)
assert self.ts.index[0] == expected
def test_set_index_makes_timeseries(self):
idx = tm.makeDateIndex(10)
s = Series(lrange(10))
s.index = idx
assert s.index.is_all_dates
def test_reset_index(self):
df = tm.makeDataFrame()[:5]
ser = df.stack()
ser.index.names = ['hash', 'category']
ser.name = 'value'
df = ser.reset_index()
assert 'value' in df
df = ser.reset_index(name='value2')
assert 'value2' in df
# check inplace
s = ser.reset_index(drop=True)
s2 = ser
s2.reset_index(drop=True, inplace=True)
assert_series_equal(s, s2)
# level
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
s = Series(np.random.randn(6), index=index)
rs = s.reset_index(level=1)
assert len(rs.columns) == 2
rs = s.reset_index(level=[0, 2], drop=True)
tm.assert_index_equal(rs.index, Index(index.get_level_values(1)))
assert isinstance(rs, Series)
def test_reset_index_level(self):
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]],
columns=['A', 'B', 'C'])
for levels in ['A', 'B'], [0, 1]:
# With MultiIndex
s = df.set_index(['A', 'B'])['C']
result = s.reset_index(level=levels[0])
tm.assert_frame_equal(result, df.set_index('B'))
result = s.reset_index(level=levels[:1])
tm.assert_frame_equal(result, df.set_index('B'))
result = s.reset_index(level=levels)
tm.assert_frame_equal(result, df)
result = df.set_index(['A', 'B']).reset_index(level=levels,
drop=True)
tm.assert_frame_equal(result, df[['C']])
with tm.assert_raises_regex(KeyError, 'Level E '):
s.reset_index(level=['A', 'E'])
# With single-level Index
s = df.set_index('A')['B']
result = s.reset_index(level=levels[0])
tm.assert_frame_equal(result, df[['A', 'B']])
result = s.reset_index(level=levels[:1])
tm.assert_frame_equal(result, df[['A', 'B']])
result = s.reset_index(level=levels[0], drop=True)
tm.assert_series_equal(result, df['B'])
with tm.assert_raises_regex(IndexError, 'Too many levels'):
s.reset_index(level=[0, 1, 2])
def test_reset_index_range(self):
# GH 12071
s = pd.Series(range(2), name='A', dtype='int64')
series_result = s.reset_index()
assert isinstance(series_result.index, RangeIndex)
series_expected = pd.DataFrame([[0, 0], [1, 1]],
columns=['index', 'A'],
index=RangeIndex(stop=2))
assert_frame_equal(series_result, series_expected)
def test_reorder_levels(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]],
names=['L0', 'L1', 'L2'])
s = Series(np.arange(6), index=index)
# no change, position
result = s.reorder_levels([0, 1, 2])
assert_series_equal(s, result)
# no change, labels
result = s.reorder_levels(['L0', 'L1', 'L2'])
assert_series_equal(s, result)
# rotate, position
result = s.reorder_levels([1, 2, 0])
e_idx = MultiIndex(levels=[['one', 'two', 'three'], [0, 1], ['bar']],
labels=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0]],
names=['L1', 'L2', 'L0'])
expected = Series(np.arange(6), index=e_idx)
assert_series_equal(result, expected)
def test_rename_axis_inplace(self):
# GH 15704
series = self.ts.copy()
expected = series.rename_axis('foo')
result = series.copy()
no_return = result.rename_axis('foo', inplace=True)
assert no_return is None
assert_series_equal(result, expected)
def test_set_axis_inplace(self):
# GH14636
s = Series(np.arange(4), index=[1, 3, 5, 7], dtype='int64')
expected = s.copy()
expected.index = list('abcd')
for axis in 0, 'index':
# inplace=True
# The FutureWarning comes from the fact that we would like to have
# inplace default to False some day
for inplace, warn in (None, FutureWarning), (True, None):
result = s.copy()
kwargs = {'inplace': inplace}
with tm.assert_produces_warning(warn):
result.set_axis(list('abcd'), axis=axis, **kwargs)
tm.assert_series_equal(result, expected)
# inplace=False
result = s.set_axis(list('abcd'), axis=0, inplace=False)
tm.assert_series_equal(expected, result)
# omitting the "axis" parameter
with tm.assert_produces_warning(None):
result = s.set_axis(list('abcd'), inplace=False)
tm.assert_series_equal(result, expected)
# wrong values for the "axis" parameter
for axis in 2, 'foo':
with tm.assert_raises_regex(ValueError, 'No axis named'):
s.set_axis(list('abcd'), axis=axis, inplace=False)
def test_set_axis_prior_to_deprecation_signature(self):
s = Series(np.arange(4), index=[1, 3, 5, 7], dtype='int64')
expected = s.copy()
expected.index = list('abcd')
for axis in 0, 'index':
with tm.assert_produces_warning(FutureWarning):
result = s.set_axis(0, list('abcd'), inplace=False)
tm.assert_series_equal(result, expected)
| bsd-3-clause | 8,677,358,192,297,557,000 | 34.33829 | 78 | 0.528508 | false |
teamfx/openjfx-9-dev-rt | modules/javafx.web/src/main/native/Tools/Scripts/webkitpy/common/system/filesystem.py | 1 | 11234 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Wrapper object for the file system / source tree."""
import codecs
import errno
import exceptions
import filecmp
import glob
import hashlib
import os
import shutil
import sys
import tempfile
import time
class FileSystem(object):
"""FileSystem interface for webkitpy.
Unless otherwise noted, all paths are allowed to be either absolute
or relative."""
sep = os.sep
pardir = os.pardir
def abspath(self, path):
# FIXME: This gross hack is needed while we transition from Cygwin to native Windows, because we
# have some mixing of file conventions from different tools:
if sys.platform == 'cygwin':
path = os.path.normpath(path)
path_components = path.split(os.sep)
if path_components and len(path_components[0]) == 2 and path_components[0][1] == ':':
path_components[0] = path_components[0][0]
path = os.path.join('/', 'cygdrive', *path_components)
return os.path.abspath(path)
def realpath(self, path):
return os.path.realpath(path)
def path_to_module(self, module_name):
"""A wrapper for all calls to __file__ to allow easy unit testing."""
# FIXME: This is the only use of sys in this file. It's possible this function should move elsewhere.
return sys.modules[module_name].__file__ # __file__ is always an absolute path.
def expanduser(self, path):
return os.path.expanduser(path)
def basename(self, path):
return os.path.basename(path)
def chdir(self, path):
return os.chdir(path)
def copyfile(self, source, destination):
shutil.copyfile(source, destination)
def dirname(self, path):
return os.path.dirname(path)
def exists(self, path):
return os.path.exists(path)
def dirs_under(self, path, dir_filter=None):
"""Return the list of all directories under the given path in topdown order.
Args:
dir_filter: if not None, the filter will be invoked
with the filesystem object and the path of each dirfound.
The dir is included in the result if the callback returns True.
"""
def filter_all(fs, dirpath):
return True
dir_filter = dir_filter or filter_all
dirs = []
for (dirpath, dirnames, filenames) in os.walk(path):
if dir_filter(self, dirpath):
dirs.append(dirpath)
return dirs
def files_under(self, path, dirs_to_skip=[], file_filter=None):
"""Return the list of all files under the given path in topdown order.
Args:
dirs_to_skip: a list of directories to skip over during the
traversal (e.g., .svn, resources, etc.)
file_filter: if not None, the filter will be invoked
with the filesystem object and the dirname and basename of
each file found. The file is included in the result if the
callback returns True.
"""
def filter_all(fs, dirpath, basename):
return True
file_filter = file_filter or filter_all
files = []
if self.isfile(path):
if file_filter(self, self.dirname(path), self.basename(path)):
files.append(path)
return files
if self.basename(path) in dirs_to_skip:
return []
for (dirpath, dirnames, filenames) in os.walk(path):
for d in dirs_to_skip:
if d in dirnames:
dirnames.remove(d)
for filename in filenames:
if file_filter(self, dirpath, filename):
files.append(self.join(dirpath, filename))
return files
def getcwd(self):
return os.getcwd()
def glob(self, path):
return glob.glob(path)
def isabs(self, path):
return os.path.isabs(path)
def isfile(self, path):
return os.path.isfile(path)
def getsize(self, path):
return os.path.getsize(path)
def isdir(self, path):
return os.path.isdir(path)
def join(self, *comps):
return os.path.join(*comps)
def listdir(self, path):
return os.listdir(path)
def mkdtemp(self, **kwargs):
"""Create and return a uniquely named directory.
This is like tempfile.mkdtemp, but if used in a with statement
the directory will self-delete at the end of the block (if the
directory is empty; non-empty directories raise errors). The
directory can be safely deleted inside the block as well, if so
desired.
Note that the object returned is not a string and does not support all of the string
methods. If you need a string, coerce the object to a string and go from there.
"""
class TemporaryDirectory(object):
def __init__(self, **kwargs):
self._kwargs = kwargs
self._directory_path = tempfile.mkdtemp(**self._kwargs)
def __str__(self):
return self._directory_path
def __enter__(self):
return self._directory_path
def __exit__(self, type, value, traceback):
# Only self-delete if necessary.
# FIXME: Should we delete non-empty directories?
if os.path.exists(self._directory_path):
os.rmdir(self._directory_path)
return TemporaryDirectory(**kwargs)
def maybe_make_directory(self, *path):
"""Create the specified directory if it doesn't already exist."""
try:
os.makedirs(self.join(*path))
except OSError, e:
if e.errno != errno.EEXIST:
raise
def move(self, source, destination):
shutil.move(source, destination)
def mtime(self, path):
return os.stat(path).st_mtime
def normpath(self, path):
return os.path.normpath(path)
def open_binary_tempfile(self, suffix):
"""Create, open, and return a binary temp file. Returns a tuple of the file and the name."""
temp_fd, temp_name = tempfile.mkstemp(suffix)
f = os.fdopen(temp_fd, 'wb')
return f, temp_name
def open_binary_file_for_reading(self, path):
return codecs.open(path, 'rb')
def read_binary_file(self, path):
"""Return the contents of the file at the given path as a byte string."""
with file(path, 'rb') as f:
return f.read()
def write_binary_file(self, path, contents):
with file(path, 'wb') as f:
f.write(contents)
def open_text_file_for_reading(self, path, errors='strict'):
# Note: There appears to be an issue with the returned file objects
# not being seekable. See http://stackoverflow.com/questions/1510188/can-seek-and-tell-work-with-utf-8-encoded-documents-in-python .
return codecs.open(path, 'r', 'utf8', errors)
def open_text_file_for_writing(self, path):
return codecs.open(path, 'w', 'utf8')
def open_stdin(self):
return codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace')
def read_text_file(self, path):
"""Return the contents of the file at the given path as a Unicode string.
The file is read assuming it is a UTF-8 encoded file with no BOM."""
with codecs.open(path, 'r', 'utf8') as f:
return f.read()
def write_text_file(self, path, contents):
"""Write the contents to the file at the given location.
The file is written encoded as UTF-8 with no BOM."""
with codecs.open(path, 'w', 'utf-8') as f:
f.write(contents.decode('utf-8') if type(contents) == str else contents)
def sha1(self, path):
contents = self.read_binary_file(path)
return hashlib.sha1(contents).hexdigest()
def relpath(self, path, start='.'):
return os.path.relpath(path, start)
class _WindowsError(exceptions.OSError):
"""Fake exception for Linux and Mac."""
pass
def remove(self, path, osremove=os.remove):
"""On Windows, if a process was recently killed and it held on to a
file, the OS will hold on to the file for a short while. This makes
attempts to delete the file fail. To work around that, this method
will retry for a few seconds until Windows is done with the file."""
try:
exceptions.WindowsError
except AttributeError:
exceptions.WindowsError = FileSystem._WindowsError
retry_timeout_sec = 3.0
sleep_interval = 0.1
while True:
try:
osremove(path)
return True
except exceptions.WindowsError, e:
time.sleep(sleep_interval)
retry_timeout_sec -= sleep_interval
if retry_timeout_sec < 0:
raise e
def rmtree(self, path):
"""Delete the directory rooted at path, whether empty or not."""
shutil.rmtree(path, ignore_errors=True)
def copytree(self, source, destination):
shutil.copytree(source, destination)
def split(self, path):
"""Return (dirname, basename + '.' + ext)"""
return os.path.split(path)
def splitext(self, path):
"""Return (dirname + os.sep + basename, '.' + ext)"""
return os.path.splitext(path)
def compare(self, path1, path2):
return filecmp.cmp(path1, path2)
| gpl-2.0 | -5,104,576,357,975,560,000 | 35.23871 | 140 | 0.619904 | false |
ekadhanda/bin | python/coda-scrip.py | 1 | 9971 | #! /usr/bin/env python
# Written by Vasaant S/O Krishnan Friday, 19 May 2017
# Run without arguments for instructions.
import sys
usrFile = sys.argv[1:]
if len(usrFile) == 0:
print ""
print "# Script to read in file of the CODA format and perform some basic"
print "# statistical computations. An index.txt and chain.txt file must be"
print "# provided and the script will automatically identify them for internal"
print "# use. Options are:"
print ""
print "# print = Outputs mean, std and confidence interval (default 95%)."
print "# var = Specify your required variable for hist, trace."
print "# per = Specify your required confidence interval (requires var=)."
print "# hist = Plot histogram (requires var=)."
print "# bins = Choose bin size (default bins=100)"
print "# trace = Trace plot (requires var=)."
print ""
print " -->$ coda-script.py CODAindex.txt CODAchain.txt per=xx var=xx bins=xx print hist trace"
print ""
exit()
import re
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
#=====================================================================
# Define variables.
#
ints = '\s+?([+-]?\d+)' # Integers for regex
#floats = '\s+?([+-]?\d+(?:\.\d+)?)' # Floats or int
floats = '\s+?([+-]?\d+(?:\.\d+)?|\.\d+)([eE][+-]?\d+)?' # Floats or int or scientific
codaFiles = [] # CODAindex and CODAchain files
indexFileFnd = False # CODAindex file identified?
chainFileFnd = False # CODAchain file identified?
indexCodes = {} # Dictionary containing CODAindex info.
# chainIndx = [] # Indexes/Column 1 of CODAchain.txt file
chainData = [] # Data/Column 2 of CODAchain.txt file
percentile = 95.0 # Default percentile
bins = 100 # Default number of bins for histogram
reqIndxCode = '' # User requested varible for hist, trace
#=====================================================================
#=====================================================================
# Determine which are the CODAindex and CODAchain files and
# automatically assign them to their respective variables.
#
for i in usrFile:
codaSearch = re.search('.txt',i)
if codaSearch:
codaFiles.append(i)
if len(codaFiles) == 2: # Assuming 1 index and 1 chain file
for j in codaFiles:
with open(j,'r') as chkTyp: # Run a quick check on the first line only
firstLine = chkTyp.readline()
codaIndex = re.search('^(\S+)' + ints + ints + '$', firstLine)
codaChain = re.search('^(\d+)' + floats + '$', firstLine)
if codaIndex:
indexFile = j
indexFileFnd = True
if codaChain:
chainFile = j
chainFileFnd = True
else:
print "Insfficient files of CODA*.txt format."
print "Check your input files."
#=====================================================================
#=====================================================================
# Determine percentile
#
for i in usrFile:
userPercentile = re.search('per=([+-]?\d+(?:\.\d+)?)',i)
if userPercentile:
percentile = abs(float(userPercentile.group(1)))
usrFile.append('print')
#=====================================================================
#=====================================================================
# Determine user requested variable from CODAIndex file
#
for i in usrFile:
userReqCodaIndx = re.search('var=(\S+)',i)
if userReqCodaIndx:
reqIndxCode = str(userReqCodaIndx.group(1))
# ... same for number of bins:
for i in usrFile:
userReqBins = re.search('bins=(\d+)',i)
if userReqBins:
bins = int(userReqBins.group(1))
usrFile.append('hist')
#=====================================================================
if indexFileFnd and chainFileFnd:
#=====================================================================
# Harvest index file for the variable list and corresponding
# [start,stop] coords:
#
for line in open(indexFile, 'r'):
reqIndex = re.search('^(\S+)' + ints + ints + '$', line)
if reqIndex:
key = str(reqIndex.group(1))
value = [int(reqIndex.group(2)), int(reqIndex.group(3))]
indexCodes[key] = value
maxElement = max(indexCodes, key = indexCodes.get) # The key with the largest value
chainLen = max(indexCodes[maxElement]) # The largest value (expected amt. of data)
#=====================================================================
#=====================================================================
# I thought that initialising the arrays before filling them
# would be faster. It is not.
#
# chainIndx = np.zeros(chainLen)
# chainData = np.zeros(chainLen)
# with open(chainFile, 'r') as harvestVals:
# for i in range(chainLen):
# currLine = harvestVals.readline()
# reqChain = re.search('^(\d+)' + floats + '$', currLine)
# if reqChain:
# chainIndx[i] = int(reqChain.group(1))
# chainData[i] = float(reqChain.group(2))
#=====================================================================
#=====================================================================
# Harvest chain file
#
for line in open(chainFile, 'r'):
reqChain = re.search('^(\d+)' + floats + '$', line)
if reqChain:
#chainIndx.append( int(reqChain.group(1)))
chainData.append(float(reqChain.group(2)))
#chainIndx = np.array(chainIndx)
chainData = np.array(chainData)
#=====================================================================
#=====================================================================
# Basic check on the harvest by comparing harvested vs. expected
# no. of data.
#
if len(chainData) != chainLen:
print " Warning! "
print " %10d lines expected from %s."%(chainLen,indexFile)
print " %10d lines harvested from %s."%(len(chainData),chainFile)
#=====================================================================
#=====================================================================
# Output some basic statistics to the terminal.
#
if 'print' in usrFile:
print "\n%20s %10s %10s"%("mean","std",str(percentile)+"%")
for i in indexCodes:
strtIndx = indexCodes[i][0] - 1 # Python starts from 0. CODAindex from 1
stopIndx = indexCodes[i][1] # ... but np.array needs this to get to the end
npPerTile = np.percentile(chainData[strtIndx:stopIndx],[0,percentile]) # Numpy sorts internally
minPer = npPerTile[0]
maxPer = npPerTile[1]
print "%8s %10.4f %10.4f %6d, %6.3f"%(i, chainData[strtIndx:stopIndx].mean(),
chainData[strtIndx:stopIndx].std(),
minPer,maxPer
)
print ""
#=====================================================================
#=====================================================================
# Trace plot that gives the variable value as a function of its
# rank (or position in the chain)
#
if 'trace' in usrFile:
if reqIndxCode != '':
for i in indexCodes:
if reqIndxCode == i:
strtIndx = indexCodes[i][0] - 1 # Python starts from 0. CODAindex from 1
stopIndx = indexCodes[i][1] # ... but np.array needs this to get to the end
traceRank = range(stopIndx-strtIndx)
plt.plot(traceRank,chainData[strtIndx:stopIndx])
plt.xlabel('Rank')
plt.ylabel('Variable: '+i)
plt.show()
else:
print "No variable selected by user for trace plot."
#=====================================================================
#=====================================================================
# Histogram
#
if 'hist' in usrFile:
if reqIndxCode != '':
for i in indexCodes:
if reqIndxCode == i:
strtIndx = indexCodes[i][0] - 1 # Python starts from 0. CODAindex from 1
stopIndx = indexCodes[i][1] # ... but np.array needs this to get to the end
[n, bins, patches] = plt.hist(chainData[strtIndx:stopIndx],
bins = bins,
normed = True,
histtype= 'step'
)
y = mlab.normpdf(bins, chainData[strtIndx:stopIndx].mean(),
chainData[strtIndx:stopIndx].std()
)
npPerTile = np.percentile(chainData[strtIndx:stopIndx],[0,percentile])
maxPer = npPerTile[1]
plt.axvline(x=maxPer, color='k', label=str(percentile)+'%',ls=':',lw=0.8)
plt.plot(bins,y,'--')
plt.ylabel('Variable: '+i)
plt.legend(frameon=False)
plt.show()
else:
print "No variable selected by user for histogram."
#=====================================================================
| mit | 5,365,606,739,219,322,000 | 40.032922 | 110 | 0.446094 | false |
flacjacket/dotfiles | jupyter/ipython_kernel_config.py | 1 | 1849 | import os
import sys
import matplotlib as mpl
from matplotlib import cm
from colormaps import (
inferno, magma, plasma, viridis, inferno_r, magma_r, plasma_r, viridis_r
)
# Add the notebook dir to the path
if os.name == 'nt':
nbdir = 'D:/Dropbox/Notebooks'
else:
nbdir = '~/notebooks'
nbdir = os.path.expanduser(nbdir)
nbdir = os.path.normpath(nbdir)
sys.path.append(nbdir)
# Let's fix jet even before matplotlib 2.0
mpl.cm.cmap_d["inferno"] = inferno
mpl.cm.cmap_d["inferno_r"] = inferno_r
mpl.cm.cmap_d["magma"] = magma
mpl.cm.cmap_d["magma_r"] = magma_r
mpl.cm.cmap_d["plasma"] = plasma
mpl.cm.cmap_d["plasma_r"] = plasma_r
mpl.cm.cmap_d["viridis"] = viridis
mpl.cm.cmap_d["viridis_r"] = viridis_r
mpl.cm.inferno = inferno
mpl.cm.inferno_r = inferno_r
mpl.cm.magma = magma
mpl.cm.magma_r = magma_r
mpl.cm.plasma = plasma
mpl.cm.plasma_r = plasma_r
mpl.cm.viridis = viridis
mpl.cm.viridis_r = viridis_r
mpl.rcParams["image.cmap"] = "viridis"
# Load the default config
load_subconfig("ipython_config.py")
# Set a bunch of stuff to import automatically
c = get_config()
app = c.IPKernelApp
app.matplotlib = "inline"
c.InlineBackend.rc = {'figure.dpi': 80, 'figure.facecolor': 'white'}
app.exec_lines.append("import numpy as np")
app.exec_lines.append("import scipy as sp")
app.exec_lines.append("import pandas as pd")
app.exec_lines.append("import matplotlib as mpl")
app.exec_lines.append("import matplotlib.pyplot as plt")
app.exec_lines.append("from scipy import optimize")
app.exec_lines.append("from scipy import interpolate")
app.exec_lines.append("nbdir = r'{}'".format(nbdir))
app.exec_lines.append("from sympy import init_printing")
# Setup the SymPy pretty printing
if os.name == 'nt':
app.exec_lines.append("init_printing(use_latex='mathjax')")
else:
app.exec_lines.append("init_printing(use_latex=True)")
| mit | -2,205,698,834,725,386,200 | 27.015152 | 76 | 0.716604 | false |
ESOedX/edx-platform | common/djangoapps/third_party_auth/management/commands/remove_social_auth_users.py | 1 | 2126 | """
Management command to remove social auth users. Intended for use in masters
integration sandboxes to allow partners reset users and enrollment data.
"""
from __future__ import absolute_import
import logging
from django.conf import settings
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from six.moves import input
from third_party_auth.models import SAMLProviderConfig
log = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Management command to remove all social auth entries AND the corresponding edX
users for a given IDP.
Usage:
manage.py remove_social_auth_users gtx
"""
confirmation_prompt = "Type 'confirm' to continue with deletion\n"
def add_arguments(self, parser):
parser.add_argument('IDP', help='slug for the idp to remove all users from')
parser.add_argument(
'--force',
action='store_true',
help='Skip manual confirmation step before deleting objects',
)
@transaction.atomic
def handle(self, *args, **options):
slug = options['IDP']
if not settings.FEATURES.get('ENABLE_ENROLLMENT_RESET'):
raise CommandError('ENABLE_ENROLLMENT_RESET feature not enabled on this enviroment')
try:
SAMLProviderConfig.objects.current_set().get(slug=slug)
except SAMLProviderConfig.DoesNotExist:
raise CommandError(u'No SAML provider found for slug {}'.format(slug))
users = User.objects.filter(social_auth__provider=slug)
user_count = len(users)
count, models = users.delete()
log.info(
u'\n%s users and their related models will be deleted:\n%s\n',
user_count,
models,
)
if not options['force']:
confirmation = input(self.confirmation_prompt)
if confirmation != 'confirm':
raise CommandError('User confirmation required. No records have been modified')
log.info(u'Deleting %s records...', count)
| agpl-3.0 | -630,919,036,611,812,900 | 32.21875 | 96 | 0.663688 | false |
akretion/odoo | addons/mrp/models/mrp_routing.py | 7 | 5147 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
class MrpRouting(models.Model):
""" Specifies routings of work centers """
_name = 'mrp.routing'
_description = 'Routings'
name = fields.Char('Routing', required=True)
active = fields.Boolean(
'Active', default=True,
help="If the active field is set to False, it will allow you to hide the routing without removing it.")
code = fields.Char(
'Reference',
copy=False, default=lambda self: _('New'), readonly=True)
note = fields.Text('Description')
operation_ids = fields.One2many(
'mrp.routing.workcenter', 'routing_id', 'Operations',
copy=True, oldname='workcenter_lines')
location_id = fields.Many2one(
'stock.location', 'Raw Materials Location',
help="Keep empty if you produce at the location where you find the raw materials. "
"Set a location if you produce at a fixed location. This can be a partner location "
"if you subcontract the manufacturing operations.")
company_id = fields.Many2one(
'res.company', 'Company',
default=lambda self: self.env['res.company']._company_default_get('mrp.routing'))
@api.model
def create(self, vals):
if 'code' not in vals or vals['code'] == _('New'):
vals['code'] = self.env['ir.sequence'].next_by_code('mrp.routing') or _('New')
return super(MrpRouting, self).create(vals)
class MrpRoutingWorkcenter(models.Model):
_name = 'mrp.routing.workcenter'
_description = 'Work Center Usage'
_order = 'sequence, id'
name = fields.Char('Operation', required=True)
workcenter_id = fields.Many2one('mrp.workcenter', 'Work Center', required=True)
sequence = fields.Integer(
'Sequence', default=100,
help="Gives the sequence order when displaying a list of routing Work Centers.")
routing_id = fields.Many2one(
'mrp.routing', 'Parent Routing',
index=True, ondelete='cascade', required=True,
help="The routing contains all the Work Centers used and for how long. This will create work orders afterwards "
"which alters the execution of the manufacturing order.")
note = fields.Text('Description')
company_id = fields.Many2one(
'res.company', 'Company',
readonly=True, related='routing_id.company_id', store=True)
worksheet = fields.Binary('worksheet')
time_mode = fields.Selection([
('auto', 'Compute based on real time'),
('manual', 'Set duration manually')], string='Duration Computation',
default='auto')
time_mode_batch = fields.Integer('Based on', default=10)
time_cycle_manual = fields.Float(
'Manual Duration', default=60,
help="Time in minutes. Is the time used in manual mode, or the first time supposed in real time when there are not any work orders yet.")
time_cycle = fields.Float('Duration', compute="_compute_time_cycle")
workorder_count = fields.Integer("# Work Orders", compute="_compute_workorder_count")
batch = fields.Selection([
('no', 'Once all products are processed'),
('yes', 'Once a minimum number of products is processed')], string='Next Operation',
help="Set 'no' to schedule the next work order after the previous one. Set 'yes' to produce after the quantity set in 'Quantity To Process' has been produced.",
default='no', required=True)
batch_size = fields.Float('Quantity to Process', default=1.0)
workorder_ids = fields.One2many('mrp.workorder', 'operation_id', string="Work Orders")
@api.multi
@api.depends('time_cycle_manual', 'time_mode', 'workorder_ids')
def _compute_time_cycle(self):
manual_ops = self.filtered(lambda operation: operation.time_mode == 'manual')
for operation in manual_ops:
operation.time_cycle = operation.time_cycle_manual
for operation in self - manual_ops:
data = self.env['mrp.workorder'].read_group([
('operation_id', '=', operation.id),
('state', '=', 'done')], ['operation_id', 'duration', 'qty_produced'], ['operation_id'],
limit=operation.time_mode_batch)
count_data = dict((item['operation_id'][0], (item['duration'], item['qty_produced'])) for item in data)
if count_data.get(operation.id) and count_data[operation.id][1]:
operation.time_cycle = (count_data[operation.id][0] / count_data[operation.id][1]) * (operation.workcenter_id.capacity or 1.0)
else:
operation.time_cycle = operation.time_cycle_manual
@api.multi
def _compute_workorder_count(self):
data = self.env['mrp.workorder'].read_group([
('operation_id', 'in', self.ids),
('state', '=', 'done')], ['operation_id'], ['operation_id'])
count_data = dict((item['operation_id'][0], item['operation_id_count']) for item in data)
for operation in self:
operation.workorder_count = count_data.get(operation.id, 0)
| agpl-3.0 | 1,015,476,141,267,269,400 | 49.960396 | 168 | 0.63979 | false |
cts-admin/cts | cts/members/test_views.py | 1 | 5788 | from datetime import date, timedelta
from django.test import TestCase
from django.urls import reverse
from .models import CorporateMember, IndividualMember, Team
from .utils import get_temporary_image
class IndividualMemberListViewTests(TestCase):
url = reverse('members:individual-members')
@classmethod
def setUpTestData(cls):
IndividualMember.objects.create(
name='CTS Conservationist',
email='[email protected]'
)
def test_view_render(self):
response = self.client.get(self.url)
self.assertContains(response, 'Individual Members')
self.assertContains(response, 'CTS Conservationist')
def test_view_should_only_render_former_members_once(self):
IndividualMember.objects.create(
name='Former CTS Conservationist',
email='[email protected]',
member_since=date(2015, 7, 26),
member_until=date(2015, 7, 27),
)
response = self.client.get(self.url)
self.assertContains(response, 'Former CTS Conservationist', count=1)
class CorporateMemberListViewTests(TestCase):
url = reverse('members:corporate-members')
@classmethod
def setUpTestData(cls):
cls.today = today = date.today()
cls.member = CorporateMember.objects.create(
display_name='Corporation',
contact_email='[email protected]',
membership_level=2,
)
cls.member.invoice_set.create(
sent_date=today,
amount=500,
paid_date=today,
expiration_date=today + timedelta(days=1),
)
def test_view_render(self):
response = self.client.get(self.url)
self.assertContains(response, 'Corporate Members')
self.assertContains(response, 'Corporation')
def test_view_should_not_render_unapproved(self):
CorporateMember.objects.create(
display_name='Corporation unapproved',
contact_email='[email protected]',
membership_level=2,
)
response = self.client.get(self.url)
self.assertNotContains(response, 'Corporation unapproved')
def test_view_renders_orgs_by_tier(self):
member = CorporateMember.objects.create(
display_name='AAA',
contact_email='[email protected]',
membership_level=2,
)
member.invoice_set.create(
sent_date=self.today,
# shouldn't sort by amount
amount=self.member.invoice_set.first().amount - 1,
paid_date=self.today,
expiration_date=self.today + timedelta(days=1),
)
response = self.client.get(self.url)
members = response.context['members']
self.assertEqual(
sorted(members.keys()),
['bronze', 'diamond', 'gold', 'platinum', 'silver']
)
self.assertQuerysetEqual(
members['silver'],
['<CorporateMember: Corporation>', '<CorporateMember: AAA>']
)
class CorporateMemberJoinViewTests(TestCase):
def test_get(self):
response = self.client.get(reverse('members:corporate-members-join'))
self.assertContains(response, "Become a CTS Corporate Member")
def test_submit_success(self):
data = {
'display_name': 'Foo Widgets',
'billing_name': 'Foo Widgets, Inc.',
'logo': get_temporary_image(),
'url': 'http://example.com',
'contact_name': 'Joe Conservationist',
'contact_email': '[email protected]',
'billing_email': '',
'membership_level': 2,
'address': 'USA',
'description': 'We make widgets!',
'cts_usage': 'fun',
'amount': 2000,
}
response = self.client.post(reverse('members:corporate-members-join'), data)
self.assertRedirects(response, reverse('members:corporate-members-join-thanks'))
member = CorporateMember.objects.latest('id')
self.assertEqual(member.display_name, data['display_name'])
self.assertEqual(member.invoice_set.get().amount, data['amount'])
class CorporateMemberRenewalViewTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.member = CorporateMember.objects.create(
display_name='Corporation',
contact_email='[email protected]',
membership_level=2,
)
def test_get(self):
response = self.client.get(self.member.get_renewal_link())
self.assertContains(response, 'Become a CTS Corporate Member')
self.assertEqual(response.context['form'].instance, self.member)
def test_invalid_token(self):
url = reverse('members:corporate-members-renew', kwargs={'token': 'aaaaa'})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
class TeamListViewTests(TestCase):
url = reverse('members:teams')
@classmethod
def setUpTestData(cls):
dev = IndividualMember.objects.create(
name='CTS Conservationist',
email='[email protected]',
)
cls.security_team = Team.objects.create(name='Security team')
cls.ops_team = Team.objects.create(name='Ops team', slug='ops', description='Ops stuff.')
cls.ops_team.members.add(dev)
def test_get(self):
response = self.client.get(self.url)
# Sorted by name
self.assertSequenceEqual(response.context['teams'], [self.ops_team, self.security_team])
self.assertContains(response, '<h3 id="ops-team">Ops team</h3>')
self.assertContains(response, '<p>Ops stuff.</p>')
self.assertContains(response, '<ul><li>CTS Conservationist</li></ul>', html=True)
| gpl-3.0 | 4,546,342,254,215,766,000 | 35.402516 | 97 | 0.621113 | false |
Xilinx/hopper | hopper/utils/git/watcher.py | 1 | 5949 | # Copyright (c) 2015 Xilinx Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import urlparse
import shutil
import datetime
from hopper.utils.logger import *
import hopper.utils.git.tasks
import hopper.utils.git.repo
import hopper.source.meta
import threading
class Watcher:
class GroupState:
def __init__(self, layers):
self.layers = layers
self.refstate = {}
def getRefPairs(self):
pairs = []
for i in self.layers:
if i.source and isinstance(i.source, hopper.source.meta.GitSource):
if i.source.canFetch():
# TODO: handle ref/tag/commit properly below
pairs.append((i.source.remote, "refs/heads/" + i.source.ref))
return pairs
def filterPairs(self, remoterefs):
filteredrefs = {}
for p in self.getRefPairs():
if p[0] in remoterefs:
for i in remoterefs[p[0]].iteritems():
if i[0] == p[1]:
if p[0] not in filteredrefs:
filteredrefs[p[0]] = {}
filteredrefs[p[0]][i[0]] = i[1]
return filteredrefs
def update(self, remoterefs, trigger = False):
rrefs = self.filterPairs(remoterefs)
pairs = self.getRefPairs()
changed = False
oldrefstate = self.refstate
newrefstate = {}
for i in pairs:
if i[0] in rrefs:
if i[1] in rrefs[i[0]]:
newcommit = rrefs[i[0]][i[1]]
if i[0] not in newrefstate:
newrefstate[i[0]] = {}
newrefstate[i[0]][i[1]] = newcommit
log("remote: %s, ref: %s, value = %s" % (i[0], i[1], newcommit))
if trigger:
changed = True
if oldrefstate != None:
if i[0] in oldrefstate and i[1] in oldrefstate[i[0]]:
if newrefstate[i[0]][i[1]] != oldrefstate[i[0]][i[1]]:
changed = True
self.refstate = newrefstate
return changed
def cloneRefPin(self, remoterefs):
filtered = self.filterPairs(remoterefs)
# create layers that match the layers object, fill in pinned refs
pinnedlayers = hopper.source.meta.LayerCollection(self.layers.defaultversion)
for i in self.layers:
if isinstance(i.source, hopper.source.meta.GitSource):
# TODO: fixup pciking of ref name
refname = "refs/heads/" + i.source.ref
refpin = None
if i.source.remote in filtered:
refs = filtered[i.source.remote]
if refname in refs:
refpin = refs[refname]
newsource = hopper.source.meta.GitSource(i.source.remote, refpin)
else:
newsource = i.source
pinnedlayers.add(i.getFullName(), newsource)
return pinnedlayers
def __init__(self, environment):
self.environment = environment
self.stop = threading.Event()
self.thread = None
self.interval = 0
self.lock = threading.RLock()
self.groups = []
self.changeevent = threading.Condition()
self.changequeue = []
def addLayers(self, layers):
group = Watcher.GroupState(layers)
self.groups.append(group)
def start(self, interval = 30):
if self.thread and self.thread.isAlive():
return
self.interval = interval
self.thread = threading.Thread(target = self.__worker__)
self.daemon = True
self.thread.start()
def stop(self):
if self.thread and self.thread.isAlive():
self.stop.set()
self.thread.join()
def alive(self):
if self.thread and self.thread.isAlive():
return True
return False
def trigger(self):
self.__check__(True)
def __check__(self, trigger = False):
with self.lock:
haschanges = False
remotes = []
for i in self.groups:
for p in i.getRefPairs():
if p[0] not in remotes:
remotes.append(p[0])
self.environment.debug("need to update for the following remotes -> %s" % remotes)
refstate = {}
for i in remotes:
self.environment.log("Grabbing refs from remote for %s" % i)
result = hopper.utils.git.tasks.GitTask.run(["ls-remote", i], environment = self.environment)
if result[0] == 0:
refstate[i] = {}
for r in result[1].splitlines():
parts = r.split()
refstate[i][parts[1]] = parts[0]
self.environment.debug("got refs -> %s" % repr(refstate[i]))
else:
self.environment.error("Failed to get remote state for '%s' error message = %s" % (i, result[1]))
return
haschanges = False
for i in self.groups:
if i.update(refstate, trigger):
self.environment.log("Changes have happened since last check, pinning")
changes = i.cloneRefPin(refstate)
self.changequeue.append((i.layers, changes, datetime.datetime.utcnow()))
haschanges = True
if haschanges:
with self.changeevent:
self.changeevent.notifyAll()
def __worker__(self):
while not self.stop.wait(self.interval):
self.__check__()
def wait(self):
if self.alive():
if self.hasnext():
return
with self.changeevent:
self.changeevent.wait()
def hasnext(self):
with self.lock:
if len(self.changequeue) != 0:
return True
return False
def getnext(self):
with self.lock:
if len(self.changequeue) != 0:
return self.changequeue.pop()
return None
| mit | 497,451,319,322,704,600 | 28.161765 | 102 | 0.675408 | false |
SLongofono/448_Project4 | testMath.py | 1 | 5536 | import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import Variance
import math
def testVariance():
print ("1. Testing Variance")
weighting = [2,2,2,2,2,2,2,2,2,2]
test1 = [['artist1', 'artist2', 'artist3'],['genre1', 'genre2', 'genre3'],0,0,0,0,0,0,0,0]
test2 = [['artist1'],['genre1', 'genre2'],1,2,3,4,5,6,7,8]
test3 = [['artist1'],['genre1','genre2'],6,7,8,9,2,3,4,5]
test4 = []
emptylist = -1
diffList1 = []
diffList2 = []
knownVal1 = [0,0,1,2,3,4,5,6,7,8]
knownVal2 = [0,0,5,5,5,5,3,3,3,3]
print "\t A. Variance between a populated list and a list of zeros ..."
for i in range(len(test1)):
diffList1.append(Variance.getVariance(test1,test2)[i] -knownVal1[i])
print "\t B. Variance between 2 populated lists ..."
for i in range(len(test2)):
diffList2.append(Variance.getVariance(test3,test2)[i] - knownVal2[i])
print "\t C. Variance calculated on an empty List ..."
emptylistValue = Variance.getVariance(test3,test4)
if emptylistValue == emptylist:
for i in range (len(diffList1)):
if ((diffList1[i] or diffList2[i]) > .0000001):
return False
return True
def testWeightedDifference():
print "2. Testing Weighted Difference"
weighting = [2,2,2,2,2,2,2,2,2,2]
badWeighting = []
test1 = [['artist1', 'artist2', 'artist3'],['genre1', 'genre2', 'genre3'],0,0,0,0,0,0,0,0]
test2 = [['artist1'],['genre1', 'genre2'],1,2,3,4,5,6,7,8]
test3 = [['artist1'],['genre1', 'genre2'],6,7,8,9,2,3,4,5]
test4 = []
diffList1 = []
diffList2 = []
diffList3 = []
knownVal1 = [0,0,2,4,6,8,10,12,14,16]
knownVal2 = [0,0,10,10,10,10,6,6,6,6]
emptylistValue = -1
print "\t A. Weighted Difference between a populated list and a list of zeros ..."
for i in range(len(test1)):
diffList1.append(Variance.getWeightedDifference(test2, test1, weighting)[i] - knownVal1[i])
print "\t B. Weighted Difference between 2 populated lists ..."
for i in range(len(test1)):
diffList2.append(Variance.getWeightedDifference(test3, test2, weighting)[i] - knownVal2[i])
print "\t C. Testing when Weighting is an empty list ..."
diffList3 = Variance.getWeightedDifference(test3,test2,badWeighting)
print "\t D.Testing when one of the lists is an empty list ..."
emptylist = Variance.getWeightedDifference(test4,test2,weighting)
if emptylist == emptylistValue:
for i in range(len(diffList1)):
if((diffList1[i] or diffList2[i])> .0000001):
return False
return True
def testgetNewWeight():
print "3. Testing getNewWeight"
badstddevs = []
stddevs = [1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0]
knownVal1 = [1, 1, 1, 0.5, 0.333, 0.25, 0.2, 0.167, 0.143, 0.125]
emptylistValue = -1
diffList = []
print "\t A. getNewWeight when stddevs is empty ..."
emptylist = Variance.getNewWeight(badstddevs)
print "\t B. getNewWeight when stddevs is populated ..."
for i in range(len(knownVal1)):
diffList.append(Variance.getNewWeight(stddevs)[i] - knownVal1[i])
if emptylist == emptylistValue:
for i in range(len(diffList)):
if(diffList[i] > .0000001):
return False
return True
def filter2sigmaTest():
print("4. Testing Filter2Sigma")
averages = [[],[],10.0,10.0,10.0,10.0,10.0,10.0,10.0,10.0]
stddevs = [2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0]
knownVal = [1, 1, 1, 0, 0, 0, 0]
testSongs = [
[[],[], 10.0,10.0,10.0,10.0,10.0,10.0,10.0,10.0],
[[],[], 6.0,10.0,10.0,10.0,10.0,10.0,10.0,10.0],
[[],[], 10.0,10.0,10.0,10.0,10.0,10.0,10.0,14.0],
[[],[], 5.0,10.0,10.0,10.0,10.0,10.0,10.0,10.0],
[[],[], 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0],
[[],[], 15.0,10.0,10.0,10.0,10.0,10.0,10.0,10.0],
[[],[], 10.0,10.0,10.0,10.0,10.0,10.0,10.0,15.0],
]
val = Variance.filter2Sigma(testSongs, averages, stddevs)
return val == knownVal
def teststdDev():
print("5. Testing Standard Deviation")
stdDev = []
diffList = []
listWithRowsAsColumns = [[1,2,3,4,5,6,7,8],
[6,1,9,0,5,7,3,4],
[5,5,5,5,5,5,5,5],
[23,100,1,0,8,9,5,6],
[7,5,4,3,2,1,9,6]
]
listofCalculatedStdDevs = [2.449,3.0,0.0,33.481,2.645]
for column in listWithRowsAsColumns:
vals = [x for x in column]
Nval = len(vals)
mean = sum(vals)/Nval
stdDev.append((sum([(x-mean)**2 for x in vals])/(Nval-1))**0.5)
for i in range(len(listofCalculatedStdDevs)):
diffList.append(stdDev[i] - listofCalculatedStdDevs[i])
for i in range(len(diffList)):
if(diffList[i] > .001):
return False
return True
def go():
numTests = 0
numPassed = 0
print "**************************************"
print "********MATH FUNCTION TESTING*********"
print "**************************************"
numTests +=1
if testVariance():
print "\t Variance test passed! \n\n"
numPassed += 1
numTests +=1
if testWeightedDifference():
print "\tWeightedDifference test passed!\n\n"
numPassed +=1
numTests +=1
if testgetNewWeight():
print "\t getNewWeight test passed!\n\n"
numPassed +=1
numTests +=1
if (filter2sigmaTest()):
print "\t f2sigma test passed!\n\n"
numPassed+=1
numTests +=1
if(teststdDev()):
print "\t Standard Deviation Test Passed!"
numPassed +=1
print "Tests: %d\nTests passed: %d\nPercentage: %f\n\n" % (numTests,numPassed, (float(numPassed)/numTests)*100)
return numTests,numPassed
if __name__ == "__main__":
x,y = go()
print "Tests: %d\nTests passed: %d\nPercentage: %f\n\n" % (x,y, (float(y)/x)*100)
| mit | 5,970,112,297,490,376,000 | 28.763441 | 112 | 0.621929 | false |
alissonbf/easy-estoque | docs/conf.py | 1 | 6983 | # -*- coding: utf-8 -*-
#
# coopervap documentation build configuration file, created by
# sphinx-quickstart on Wed Oct 6 18:00:51 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'coopervap'
copyright = u'2010, TI Livre'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'coopervapdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'coopervap.tex', u'coopervap Documentation',
u'TI Livre', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'coopervap', u'coopervap Documentation',
[u'TI Livre'], 1)
]
| gpl-2.0 | 836,110,182,731,122,000 | 31.328704 | 80 | 0.707719 | false |
delftrobotics/keras-retinanet | tests/layers/test_filter_detections.py | 1 | 6618 | """
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from tensorflow import keras
import keras_retinanet.backend
import keras_retinanet.layers
import numpy as np
class TestFilterDetections(object):
def test_simple(self):
# create simple FilterDetections layer
filter_detections_layer = keras_retinanet.layers.FilterDetections()
# create simple input
boxes = np.array([[
[0, 0, 10, 10],
[0, 0, 10, 10], # this will be suppressed
]], dtype=keras.backend.floatx())
boxes = keras.backend.constant(boxes)
classification = np.array([[
[0, 0.9], # this will be suppressed
[0, 1],
]], dtype=keras.backend.floatx())
classification = keras.backend.constant(classification)
# compute output
actual_boxes, actual_scores, actual_labels = filter_detections_layer.call([boxes, classification])
actual_boxes = keras.backend.eval(actual_boxes)
actual_scores = keras.backend.eval(actual_scores)
actual_labels = keras.backend.eval(actual_labels)
# define expected output
expected_boxes = -1 * np.ones((1, 300, 4), dtype=keras.backend.floatx())
expected_boxes[0, 0, :] = [0, 0, 10, 10]
expected_scores = -1 * np.ones((1, 300), dtype=keras.backend.floatx())
expected_scores[0, 0] = 1
expected_labels = -1 * np.ones((1, 300), dtype=keras.backend.floatx())
expected_labels[0, 0] = 1
# assert actual and expected are equal
np.testing.assert_array_equal(actual_boxes, expected_boxes)
np.testing.assert_array_equal(actual_scores, expected_scores)
np.testing.assert_array_equal(actual_labels, expected_labels)
def test_simple_with_other(self):
# create simple FilterDetections layer
filter_detections_layer = keras_retinanet.layers.FilterDetections()
# create simple input
boxes = np.array([[
[0, 0, 10, 10],
[0, 0, 10, 10], # this will be suppressed
]], dtype=keras.backend.floatx())
boxes = keras.backend.constant(boxes)
classification = np.array([[
[0, 0.9], # this will be suppressed
[0, 1],
]], dtype=keras.backend.floatx())
classification = keras.backend.constant(classification)
other = []
other.append(np.array([[
[0, 1234], # this will be suppressed
[0, 5678],
]], dtype=keras.backend.floatx()))
other.append(np.array([[
5678, # this will be suppressed
1234,
]], dtype=keras.backend.floatx()))
other = [keras.backend.constant(o) for o in other]
# compute output
actual = filter_detections_layer.call([boxes, classification] + other)
actual_boxes = keras.backend.eval(actual[0])
actual_scores = keras.backend.eval(actual[1])
actual_labels = keras.backend.eval(actual[2])
actual_other = [keras.backend.eval(a) for a in actual[3:]]
# define expected output
expected_boxes = -1 * np.ones((1, 300, 4), dtype=keras.backend.floatx())
expected_boxes[0, 0, :] = [0, 0, 10, 10]
expected_scores = -1 * np.ones((1, 300), dtype=keras.backend.floatx())
expected_scores[0, 0] = 1
expected_labels = -1 * np.ones((1, 300), dtype=keras.backend.floatx())
expected_labels[0, 0] = 1
expected_other = []
expected_other.append(-1 * np.ones((1, 300, 2), dtype=keras.backend.floatx()))
expected_other[-1][0, 0, :] = [0, 5678]
expected_other.append(-1 * np.ones((1, 300), dtype=keras.backend.floatx()))
expected_other[-1][0, 0] = 1234
# assert actual and expected are equal
np.testing.assert_array_equal(actual_boxes, expected_boxes)
np.testing.assert_array_equal(actual_scores, expected_scores)
np.testing.assert_array_equal(actual_labels, expected_labels)
for a, e in zip(actual_other, expected_other):
np.testing.assert_array_equal(a, e)
def test_mini_batch(self):
# create simple FilterDetections layer
filter_detections_layer = keras_retinanet.layers.FilterDetections()
# create input with batch_size=2
boxes = np.array([
[
[0, 0, 10, 10], # this will be suppressed
[0, 0, 10, 10],
],
[
[100, 100, 150, 150],
[100, 100, 150, 150], # this will be suppressed
],
], dtype=keras.backend.floatx())
boxes = keras.backend.constant(boxes)
classification = np.array([
[
[0, 0.9], # this will be suppressed
[0, 1],
],
[
[1, 0],
[0.9, 0], # this will be suppressed
],
], dtype=keras.backend.floatx())
classification = keras.backend.constant(classification)
# compute output
actual_boxes, actual_scores, actual_labels = filter_detections_layer.call([boxes, classification])
actual_boxes = keras.backend.eval(actual_boxes)
actual_scores = keras.backend.eval(actual_scores)
actual_labels = keras.backend.eval(actual_labels)
# define expected output
expected_boxes = -1 * np.ones((2, 300, 4), dtype=keras.backend.floatx())
expected_boxes[0, 0, :] = [0, 0, 10, 10]
expected_boxes[1, 0, :] = [100, 100, 150, 150]
expected_scores = -1 * np.ones((2, 300), dtype=keras.backend.floatx())
expected_scores[0, 0] = 1
expected_scores[1, 0] = 1
expected_labels = -1 * np.ones((2, 300), dtype=keras.backend.floatx())
expected_labels[0, 0] = 1
expected_labels[1, 0] = 0
# assert actual and expected are equal
np.testing.assert_array_equal(actual_boxes, expected_boxes)
np.testing.assert_array_equal(actual_scores, expected_scores)
np.testing.assert_array_equal(actual_labels, expected_labels)
| apache-2.0 | -4,984,361,034,989,192,000 | 37.254335 | 106 | 0.601239 | false |
ww-Kenya/rainbow6 | motion_detection.py | 1 | 5448 | import argparse
import datetime
import imutils
import time
import cv2
import RPi.GPIO as GPIO
import os
import smtplib
from servo import Servo
RESIZE_WIDTH = 500
RESIZE_HEIGHT = 375
THRESHOLD = 30
MAXPIXELVAL = 255
MORNINGTIME = 7
NIGHTTIME = 19
MIN_RECTANGLE = 2000
MAX_RECTANGLE = 90000
HARDDRIVE_LOCATION = "/media/pi/Seagate\ Expansion\ Drive/videos/"
HOME_LOCATION = "/home/pi/test/rainbow6/"
TITLE = ""
server = smtplib.SMTP('smtp.gmail.com',587)
server.starttls()
#server.login("","")
msg = "intruder"
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
ap.add_argument("-a", "--min-area", type=int, default = MIN_RECTANGLE, help="minimum area size")
ap.add_argument("-m", "--max-area", type=int, default = MAX_RECTANGLE,help="maximum area size")
args = vars(ap.parse_args())
if time.gmtime().tm_hour <= MORNINGTIME or time.gmtime().tm_hour >= NIGHTTIME:
print("Using Pi Camera")
camera = cv2.VideoCapture(1)
time.sleep(0.25)
else:
print("Using regular camera")
camera = cv2.VideoCapture(0)
time.sleep(0.25)
motor = Servo(12, 16, 18, 1.8)
timecount = time.gmtime().tm_sec
firstFrame = None
moved = False
motionDetected = False
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
out = None
resetTimer = (time.gmtime().tm_sec +30 ) % 60
settling = False
time.sleep(1)
emailed = False
while True:
(grabbed, frame) = camera.read()
text = "Unoccupied"
if not grabbed:
break
frame = imutils.resize(frame, width=RESIZE_WIDTH)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
if firstFrame is None:
firstFrame = gray
continue
if resetTimer is time.gmtime().tm_sec:
firstFrame = None
frame = None
thresh = None
frameDelta = None
resetTimer = (time.gmtime().tm_sec + 30) % 60
print("Reseting")
continue
if settling and settletime is time.gmtime().tm_sec:
settling = False
firstFrame = None
frame = None
thresh = None
frameDelta = None
continue
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, THRESHOLD, MAXPIXELVAL, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
_, cnts, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
count = 0
for c in cnts:
if cv2.contourArea(c) < args["min_area"]:
continue
if cv2.contourArea(c) > args["max_area"]:
continue
motionTimerMinute = time.gmtime().tm_min
motionDetected = True
if out is None:
TITLE = str(time.gmtime().tm_year) + "-" + str(time.gmtime().tm_mon) + "-" + str(time.gmtime().tm_mday) + "-" + str(time.gmtime().tm_hour) + "-" + str(time.gmtime().tm_min) + '.avi'
out = cv2.VideoWriter(TITLE,fourcc, 20.0,(RESIZE_WIDTH,RESIZE_HEIGHT))
if not emailed:
#server.sendmail("","",msg)
emailed = True
(x, y, w, h) = cv2.boundingRect(c)
if count is 0:
maxx = x
maxw = w
maxh = h
maxy = y
else:
maxarea = maxw*maxh
if maxarea < w*h:
maxx = x
maxw = w
maxh = h
maxy = y
count = count + 1
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Occupied"
if motionDetected:
out.write(frame)
cv2.putText(frame, "Room Status: {}".format(text), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I %M:%S%p"), (10, frame.shape[0] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.25, (0, 0, 255), 1)
if count > 0 and not settling:
cv2.rectangle(frame, (maxx, maxy), (maxx + maxw, maxy + maxh), (0, 255, 0), 2)
centerRect = maxx + maxw / 2
if time.gmtime().tm_sec != timecount:
if (centerRect > (RESIZE_WIDTH / 2 + int(RESIZE_WIDTH*0.05))):
motor.moveDegrees(36,0.02)
moved = True
elif (centerRect < (RESIZE_WIDTH / 2 - int(RESIZE_WIDTH*0.05))):
motor.moveDegrees(-36,0.02)
moved = True
timecount = time.gmtime().tm_sec
elif out is not None:
minutes = time.gmtime().tm_min
minuteCheck = (motionTimerMinute + 1) % 60
if minutes is minuteCheck:
motionDetected = False
print("Releasing out stream")
out.release()
time.sleep(1)
print(HOME_LOCATION+TITLE)
print(HARDDRIVE_LOCATION+TITLE)
#os.rename(HOME_LOCATION+TITLE , HARDDRIVE_LOCATION+TITLE)
out = None
emailed = False
#cv2.imshow("First Frame", firstFrame)
cv2.imshow("Security Feed", frame)
cv2.imshow("Thresh", thresh)
cv2.imshow("Frame Delta", frameDelta)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
if moved:
moved = False
while motor.ismoving:
pass
settletime = (time.gmtime().tm_sec +2) % 60
settling = True
firstFrame = None
frame = None
thresh = None
frameDelta = None
camera.release()
cv2.destroyAllWindows()
if out is not None:
out.release()
time.sleep(1)
server.quit()
| mit | 8,836,440,019,275,679,000 | 27.978723 | 193 | 0.581131 | false |
architecture-building-systems/CEAforArcGIS | cea/datamanagement/terrain_helper.py | 1 | 6135 | """
This script extracts terrain elevation from NASA - SRTM
https://www2.jpl.nasa.gov/srtm/
"""
import os
import gdal
import numpy as np
import pandas as pd
import requests
from geopandas import GeoDataFrame as Gdf
from osgeo import ogr
from osgeo import osr
from shapely.geometry import Polygon
import cea.config
import cea.inputlocator
from cea.utilities.standardize_coordinates import get_projected_coordinate_system, get_geographic_coordinate_system
__author__ = "Jimeno Fonseca"
__copyright__ = "Copyright 2018, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Jimeno Fonseca"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "[email protected]"
__status__ = "Production"
def request_elevation(lon, lat):
# script for returning elevation from lat, long, based on open elevation data
# which in turn is based on SRTM
query = ('https://api.open-elevation.com/api/v1/lookup?locations=' + str(lat) + ',' + str(lon))
r = requests.get(query).json() # json object, various ways you can extract value
# one approach is to use pandas json functionality:
elevation = pd.io.json.json_normalize(r, 'results')['elevation'].values[0]
return elevation
def calc_bounding_box_projected_coordinates(shapefile_zone, shapefile_surroundings):
# connect both files and avoid repetition
data_zone = Gdf.from_file(shapefile_zone)
data_dis = Gdf.from_file(shapefile_surroundings)
data_dis = data_dis.loc[~data_dis["Name"].isin(data_zone["Name"])]
data = data_dis.append(data_zone, ignore_index = True, sort=True)
data = data.to_crs(get_geographic_coordinate_system())
lon = data.geometry[0].centroid.coords.xy[0][0]
lat = data.geometry[0].centroid.coords.xy[1][0]
crs = get_projected_coordinate_system(float(lat), float(lon))
data = data.to_crs(get_projected_coordinate_system(float(lat), float(lon)))
result = data.total_bounds
result = [np.float32(x) for x in result] # in float32 so the raster works
return result, crs, lon, lat
def terrain_elevation_extractor(locator, config):
"""this is where the action happens if it is more than a few lines in ``main``.
NOTE: ADD YOUR SCRIPT'S DOCUMENATION HERE (how)
NOTE: RENAME THIS FUNCTION (SHOULD PROBABLY BE THE SAME NAME AS THE MODULE)
"""
# local variables:
elevation = config.terrain_helper.elevation
grid_size = config.terrain_helper.grid_size
extra_border = np.float32(30) # adding extra 30 m to avoid errors of no data
raster_path = locator.get_terrain()
locator.ensure_parent_folder_exists(raster_path)
# get the bounding box coordinates
assert os.path.exists(
locator.get_surroundings_geometry()), 'Get surroundings geometry file first or the coordinates of the area where' \
' to extract the terrain from in the next format: lon_min, lat_min, lon_max, lat_max'
print("generating terrain from Surroundings area")
bounding_box_surroundings_file, crs, lon, lat = calc_bounding_box_projected_coordinates(locator.get_surroundings_geometry(), locator.get_zone_geometry())
x_min = bounding_box_surroundings_file[0] - extra_border
y_min = bounding_box_surroundings_file[1] - extra_border
x_max = bounding_box_surroundings_file[2] + extra_border
y_max = bounding_box_surroundings_file[3] + extra_border
# make sure output is a whole number when min-max is divided by grid size
x_extra = grid_size - ((x_max - x_min) % grid_size)/2
y_extra = grid_size - ((y_max - y_min) % grid_size)/2
x_min -= x_extra
y_min -= y_extra
x_max += x_extra
y_max += y_extra
##TODO: get the elevation from satellite data. Open-elevation was working, but the project is dying.
# if elevation is None:
# print('extracting elevation from satellite data, this needs connection to the internet')
# elevation = request_elevation(lon, lat)
# print("Proceeding to calculate terrain file with fixed elevation in m of ", elevation)
# else:
# print("Proceeding to calculate terrain file with fixed elevation in m of ",elevation)
print("Proceeding to calculate terrain file with fixed elevation in m of ", elevation)
# now calculate the raster with the fixed elevation
calc_raster_terrain_fixed_elevation(crs, elevation, grid_size, raster_path, locator,
x_max, x_min, y_max, y_min)
def calc_raster_terrain_fixed_elevation(crs, elevation, grid_size, raster_path, locator, x_max, x_min, y_max,
y_min):
# local variables:
temp_shapefile = locator.get_temporary_file("terrain.shp")
cols = int((x_max - x_min) / grid_size)
rows = int((y_max - y_min) / grid_size)
shapes = Polygon([[x_min, y_min], [x_max, y_min], [x_max, y_max], [x_min, y_max], [x_min, y_min]])
geodataframe = Gdf(index=[0], crs=crs, geometry=[shapes])
geodataframe.to_file(temp_shapefile)
# 1) opening the shapefile
source_ds = ogr.Open(temp_shapefile)
source_layer = source_ds.GetLayer()
target_ds = gdal.GetDriverByName('GTiff').Create(raster_path, cols, rows, 1, gdal.GDT_Float32) ##COMMENT 2
target_ds.SetGeoTransform((x_min, grid_size, 0, y_max, 0, -grid_size)) ##COMMENT 3
# 5) Adding a spatial reference ##COMMENT 4
target_dsSRS = osr.SpatialReference()
target_dsSRS.ImportFromProj4(crs)
target_ds.SetProjection(target_dsSRS.ExportToWkt())
band = target_ds.GetRasterBand(1)
band.SetNoDataValue(-9999) ##COMMENT 5
gdal.RasterizeLayer(target_ds, [1], source_layer, burn_values=[elevation]) ##COMMENT 6
target_ds = None # closing the file
def main(config):
"""
Create the terrain.tif file
:param config:
:type config: cea.config.Configuration
:return:
"""
assert os.path.exists(config.scenario), 'Scenario not found: %s' % config.scenario
locator = cea.inputlocator.InputLocator(config.scenario)
terrain_elevation_extractor(locator, config)
if __name__ == '__main__':
main(cea.config.Configuration())
| mit | -4,838,354,374,274,120,000 | 40.734694 | 157 | 0.682152 | false |
babble/babble | include/jython/Lib/test/test_support.py | 1 | 19785 | """Supporting definitions for the Python regression tests."""
if __name__ != 'test.test_support':
raise ImportError, 'test_support must be imported from the test package'
import sys
import time
class Error(Exception):
"""Base class for regression test exceptions."""
class TestFailed(Error):
"""Test failed."""
class TestSkipped(Error):
"""Test skipped.
This can be raised to indicate that a test was deliberatly
skipped, but not because a feature wasn't available. For
example, if some resource can't be used, such as the network
appears to be unavailable, this should be raised instead of
TestFailed.
"""
class ResourceDenied(TestSkipped):
"""Test skipped because it requested a disallowed resource.
This is raised when a test calls requires() for a resource that
has not be enabled. It is used to distinguish between expected
and unexpected skips.
"""
verbose = 1 # Flag set to 0 by regrtest.py
use_resources = None # Flag set to [] by regrtest.py
junit_xml_dir = None # Option set by regrtest.py
max_memuse = 0 # Disable bigmem tests (they will still be run with
# small sizes, to make sure they work.)
# _original_stdout is meant to hold stdout at the time regrtest began.
# This may be "the real" stdout, or IDLE's emulation of stdout, or whatever.
# The point is to have some flavor of stdout the user can actually see.
_original_stdout = None
def record_original_stdout(stdout):
global _original_stdout
_original_stdout = stdout
def get_original_stdout():
return _original_stdout or sys.stdout
def unload(name):
try:
del sys.modules[name]
except KeyError:
pass
def unlink(filename):
import os
try:
os.unlink(filename)
except OSError:
pass
def forget(modname):
'''"Forget" a module was ever imported by removing it from sys.modules and
deleting any .pyc and .pyo files.'''
unload(modname)
import os
for dirname in sys.path:
unlink(os.path.join(dirname, modname + os.extsep + 'pyc'))
# Deleting the .pyo file cannot be within the 'try' for the .pyc since
# the chance exists that there is no .pyc (and thus the 'try' statement
# is exited) but there is a .pyo file.
unlink(os.path.join(dirname, modname + os.extsep + 'pyo'))
def is_resource_enabled(resource):
"""Test whether a resource is enabled. Known resources are set by
regrtest.py."""
return use_resources is not None and resource in use_resources
def requires(resource, msg=None):
"""Raise ResourceDenied if the specified resource is not available.
If the caller's module is __main__ then automatically return True. The
possibility of False being returned occurs when regrtest.py is executing."""
# see if the caller's module is __main__ - if so, treat as if
# the resource was set
if sys._getframe().f_back.f_globals.get("__name__") == "__main__":
return
if not is_resource_enabled(resource):
if msg is None:
msg = "Use of the `%s' resource not enabled" % resource
raise ResourceDenied(msg)
def bind_port(sock, host='', preferred_port=54321):
"""Try to bind the sock to a port. If we are running multiple
tests and we don't try multiple ports, the test can fails. This
makes the test more robust."""
import socket, errno
# Find some random ports that hopefully no one is listening on.
# Ideally each test would clean up after itself and not continue listening
# on any ports. However, this isn't the case. The last port (0) is
# a stop-gap that asks the O/S to assign a port. Whenever the warning
# message below is printed, the test that is listening on the port should
# be fixed to close the socket at the end of the test.
# Another reason why we can't use a port is another process (possibly
# another instance of the test suite) is using the same port.
for port in [preferred_port, 9907, 10243, 32999, 0]:
try:
sock.bind((host, port))
if port == 0:
port = sock.getsockname()[1]
return port
except socket.error, (err, msg):
if err != errno.EADDRINUSE:
raise
print >>sys.__stderr__, \
' WARNING: failed to listen on port %d, trying another' % port
raise TestFailed, 'unable to find port to listen on'
FUZZ = 1e-6
def fcmp(x, y): # fuzzy comparison function
if type(x) == type(0.0) or type(y) == type(0.0):
try:
x, y = coerce(x, y)
fuzz = (abs(x) + abs(y)) * FUZZ
if abs(x-y) <= fuzz:
return 0
except:
pass
elif type(x) == type(y) and type(x) in (type(()), type([])):
for i in range(min(len(x), len(y))):
outcome = fcmp(x[i], y[i])
if outcome != 0:
return outcome
return cmp(len(x), len(y))
return cmp(x, y)
try:
unicode
have_unicode = 1
except NameError:
have_unicode = 0
is_jython = sys.platform.startswith('java')
import os
# Filename used for testing
if os.name == 'java':
# Jython disallows @ in module names
TESTFN = '$test'
elif os.name == 'riscos':
TESTFN = 'testfile'
else:
TESTFN = '@test'
# Unicode name only used if TEST_FN_ENCODING exists for the platform.
if have_unicode:
# Assuming sys.getfilesystemencoding()!=sys.getdefaultencoding()
# TESTFN_UNICODE is a filename that can be encoded using the
# file system encoding, but *not* with the default (ascii) encoding
if isinstance('', unicode):
# python -U
# XXX perhaps unicode() should accept Unicode strings?
TESTFN_UNICODE = "@test-\xe0\xf2"
else:
# 2 latin characters.
TESTFN_UNICODE = unicode("@test-\xe0\xf2", "latin-1")
TESTFN_ENCODING = sys.getfilesystemencoding()
# TESTFN_UNICODE_UNENCODEABLE is a filename that should *not* be
# able to be encoded by *either* the default or filesystem encoding.
# This test really only makes sense on Windows NT platforms
# which have special Unicode support in posixmodule.
if (not hasattr(sys, "getwindowsversion") or
sys.getwindowsversion()[3] < 2): # 0=win32s or 1=9x/ME
TESTFN_UNICODE_UNENCODEABLE = None
else:
# Japanese characters (I think - from bug 846133)
TESTFN_UNICODE_UNENCODEABLE = eval('u"@test-\u5171\u6709\u3055\u308c\u308b"')
try:
# XXX - Note - should be using TESTFN_ENCODING here - but for
# Windows, "mbcs" currently always operates as if in
# errors=ignore' mode - hence we get '?' characters rather than
# the exception. 'Latin1' operates as we expect - ie, fails.
# See [ 850997 ] mbcs encoding ignores errors
TESTFN_UNICODE_UNENCODEABLE.encode("Latin1")
except UnicodeEncodeError:
pass
else:
print \
'WARNING: The filename %r CAN be encoded by the filesystem. ' \
'Unicode filename tests may not be effective' \
% TESTFN_UNICODE_UNENCODEABLE
# Make sure we can write to TESTFN, try in /tmp if we can't
fp = None
try:
fp = open(TESTFN, 'w+')
except IOError:
TMP_TESTFN = os.path.join('/tmp', TESTFN)
try:
fp = open(TMP_TESTFN, 'w+')
TESTFN = TMP_TESTFN
del TMP_TESTFN
except IOError:
print ('WARNING: tests will fail, unable to write to: %s or %s' %
(TESTFN, TMP_TESTFN))
if fp is not None:
fp.close()
unlink(TESTFN)
del os, fp
def findfile(file, here=__file__):
"""Try to find a file on sys.path and the working directory. If it is not
found the argument passed to the function is returned (this does not
necessarily signal failure; could still be the legitimate path)."""
import os
if os.path.isabs(file):
return file
path = sys.path
path = [os.path.dirname(here)] + path
for dn in path:
fn = os.path.join(dn, file)
if os.path.exists(fn): return fn
return file
def verify(condition, reason='test failed'):
"""Verify that condition is true. If not, raise TestFailed.
The optional argument reason can be given to provide
a better error text.
"""
if not condition:
raise TestFailed(reason)
def vereq(a, b):
"""Raise TestFailed if a == b is false.
This is better than verify(a == b) because, in case of failure, the
error message incorporates repr(a) and repr(b) so you can see the
inputs.
Note that "not (a == b)" isn't necessarily the same as "a != b"; the
former is tested.
"""
if not (a == b):
raise TestFailed, "%r == %r" % (a, b)
def sortdict(dict):
"Like repr(dict), but in sorted order."
items = dict.items()
items.sort()
reprpairs = ["%r: %r" % pair for pair in items]
withcommas = ", ".join(reprpairs)
return "{%s}" % withcommas
def check_syntax(statement):
try:
compile(statement, '<string>', 'exec')
except SyntaxError:
pass
else:
print 'Missing SyntaxError: "%s"' % statement
def open_urlresource(url):
import urllib, urlparse
import os.path
filename = urlparse.urlparse(url)[2].split('/')[-1] # '/': it's URL!
for path in [os.path.curdir, os.path.pardir]:
fn = os.path.join(path, filename)
if os.path.exists(fn):
return open(fn)
requires('urlfetch')
print >> get_original_stdout(), '\tfetching %s ...' % url
fn, _ = urllib.urlretrieve(url, filename)
return open(fn)
#=======================================================================
# Decorator for running a function in a different locale, correctly resetting
# it afterwards.
def run_with_locale(catstr, *locales):
def decorator(func):
def inner(*args, **kwds):
try:
import locale
category = getattr(locale, catstr)
orig_locale = locale.setlocale(category)
except AttributeError:
# if the test author gives us an invalid category string
raise
except:
# cannot retrieve original locale, so do nothing
locale = orig_locale = None
else:
for loc in locales:
try:
locale.setlocale(category, loc)
break
except:
pass
# now run the function, resetting the locale on exceptions
try:
return func(*args, **kwds)
finally:
if locale and orig_locale:
locale.setlocale(category, orig_locale)
inner.func_name = func.func_name
inner.__doc__ = func.__doc__
return inner
return decorator
#=======================================================================
# Big-memory-test support. Separate from 'resources' because memory use should be configurable.
# Some handy shorthands. Note that these are used for byte-limits as well
# as size-limits, in the various bigmem tests
_1M = 1024*1024
_1G = 1024 * _1M
_2G = 2 * _1G
# Hack to get at the maximum value an internal index can take.
class _Dummy:
def __getslice__(self, i, j):
return j
MAX_Py_ssize_t = _Dummy()[:]
def set_memlimit(limit):
import re
global max_memuse
sizes = {
'k': 1024,
'm': _1M,
'g': _1G,
't': 1024*_1G,
}
m = re.match(r'(\d+(\.\d+)?) (K|M|G|T)b?$', limit,
re.IGNORECASE | re.VERBOSE)
if m is None:
raise ValueError('Invalid memory limit %r' % (limit,))
memlimit = int(float(m.group(1)) * sizes[m.group(3).lower()])
if memlimit > MAX_Py_ssize_t:
memlimit = MAX_Py_ssize_t
if memlimit < _2G - 1:
raise ValueError('Memory limit %r too low to be useful' % (limit,))
max_memuse = memlimit
def bigmemtest(minsize, memuse, overhead=5*_1M):
"""Decorator for bigmem tests.
'minsize' is the minimum useful size for the test (in arbitrary,
test-interpreted units.) 'memuse' is the number of 'bytes per size' for
the test, or a good estimate of it. 'overhead' specifies fixed overhead,
independant of the testsize, and defaults to 5Mb.
The decorator tries to guess a good value for 'size' and passes it to
the decorated test function. If minsize * memuse is more than the
allowed memory use (as defined by max_memuse), the test is skipped.
Otherwise, minsize is adjusted upward to use up to max_memuse.
"""
def decorator(f):
def wrapper(self):
if not max_memuse:
# If max_memuse is 0 (the default),
# we still want to run the tests with size set to a few kb,
# to make sure they work. We still want to avoid using
# too much memory, though, but we do that noisily.
maxsize = 5147
self.failIf(maxsize * memuse + overhead > 20 * _1M)
else:
maxsize = int((max_memuse - overhead) / memuse)
if maxsize < minsize:
# Really ought to print 'test skipped' or something
if verbose:
sys.stderr.write("Skipping %s because of memory "
"constraint\n" % (f.__name__,))
return
# Try to keep some breathing room in memory use
maxsize = max(maxsize - 50 * _1M, minsize)
return f(self, maxsize)
wrapper.minsize = minsize
wrapper.memuse = memuse
wrapper.overhead = overhead
return wrapper
return decorator
def bigaddrspacetest(f):
"""Decorator for tests that fill the address space."""
def wrapper(self):
if max_memuse < MAX_Py_ssize_t:
if verbose:
sys.stderr.write("Skipping %s because of memory "
"constraint\n" % (f.__name__,))
else:
return f(self)
return wrapper
#=======================================================================
# Preliminary PyUNIT integration.
import unittest
class BasicTestRunner:
def run(self, test):
result = unittest.TestResult()
test(result)
return result
def run_suite(suite, testclass=None):
"""Run all TestCases in their own individual TestSuite"""
if not junit_xml_dir:
# Splitting tests apart slightly changes the handling of the
# TestFailed message
return _run_suite(suite, testclass)
failed = False
for test in suite:
suite = unittest.TestSuite()
suite.addTest(test)
try:
_run_suite(suite, testclass)
except TestFailed, e:
if not failed:
failed = e
if failed:
raise failed
def _run_suite(suite, testclass=None):
"""Run tests from a unittest.TestSuite-derived class."""
if junit_xml_dir:
from junit_xml import JUnitXMLTestRunner
runner = JUnitXMLTestRunner(junit_xml_dir)
elif verbose:
runner = unittest.TextTestRunner(sys.stdout, verbosity=2)
else:
runner = BasicTestRunner()
result = runner.run(suite)
if not result.wasSuccessful():
if len(result.errors) == 1 and not result.failures:
err = result.errors[0][1]
elif len(result.failures) == 1 and not result.errors:
err = result.failures[0][1]
else:
if testclass is None:
msg = "errors occurred; run in verbose mode for details"
else:
msg = "errors occurred in %s.%s" \
% (testclass.__module__, testclass.__name__)
raise TestFailed(msg)
raise TestFailed(err)
def run_unittest(*classes):
"""Run tests from unittest.TestCase-derived classes."""
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, (unittest.TestSuite, unittest.TestCase)):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
if len(classes)==1:
testclass = classes[0]
else:
testclass = None
run_suite(suite, testclass)
#=======================================================================
# doctest driver.
def run_doctest(module, verbosity=None):
"""Run doctest on the given module. Return (#failures, #tests).
If optional argument verbosity is not specified (or is None), pass
test_support's belief about verbosity on to doctest. Else doctest's
usual behavior is used (it searches sys.argv for -v).
"""
import doctest
if verbosity is None:
verbosity = verbose
else:
verbosity = None
# Direct doctest output (normally just errors) to real stdout; doctest
# output shouldn't be compared by regrtest.
save_stdout = sys.stdout
sys.stdout = get_original_stdout()
if junit_xml_dir:
from junit_xml import Tee, write_doctest
save_stderr = sys.stderr
sys.stdout = stdout = Tee(sys.stdout)
sys.stderr = stderr = Tee(sys.stderr)
try:
start = time.time()
try:
f, t = doctest.testmod(module, verbose=verbosity)
except:
took = time.time() - start
if junit_xml_dir:
write_doctest(junit_xml_dir, module.__name__, took, 'error',
sys.exc_info(), stdout.getvalue(),
stderr.getvalue())
raise
took = time.time() - start
if f:
if junit_xml_dir:
write_doctest(junit_xml_dir, module.__name__, took, 'failure',
stdout=stdout.getvalue(),
stderr=stderr.getvalue())
raise TestFailed("%d of %d doctests failed" % (f, t))
finally:
sys.stdout = save_stdout
if junit_xml_dir:
write_doctest(junit_xml_dir, module.__name__, took,
stdout=stdout.getvalue(), stderr=stderr.getvalue())
if verbose:
print 'doctest (%s) ... %d tests with zero failures' % (module.__name__, t)
return f, t
#=======================================================================
# Threading support to prevent reporting refleaks when running regrtest.py -R
def threading_setup():
import threading
return len(threading._active), 0
def threading_cleanup(num_active, num_limbo):
import threading
import time
_MAX_COUNT = 10
count = 0
while len(threading._active) != num_active and count < _MAX_COUNT:
count += 1
time.sleep(0.1)
def reap_children():
"""Use this function at the end of test_main() whenever sub-processes
are started. This will help ensure that no extra children (zombies)
stick around to hog resources and create problems when looking
for refleaks.
"""
# Reap all our dead child processes so we don't leave zombies around.
# These hog resources and might be causing some of the buildbots to die.
import os
if hasattr(os, 'waitpid'):
any_process = -1
while True:
try:
# This will raise an exception on Windows. That's ok.
pid, status = os.waitpid(any_process, os.WNOHANG)
if pid == 0:
break
except:
break
| apache-2.0 | -4,226,911,653,028,837,000 | 33.710526 | 95 | 0.584837 | false |
markgw/jazzparser | src/jazzparser/formalisms/music_halfspan/songtools.py | 1 | 20939 | """Interactive shell tools for the Halfspan formalism.
These tools concern song recognition and allow utilities for recognising
songs to be called from the shell.
"""
"""
============================== License ========================================
Copyright (C) 2008, 2010-12 University of Edinburgh, Mark Granroth-Wilding
This file is part of The Jazz Parser.
The Jazz Parser is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
The Jazz Parser is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with The Jazz Parser. If not, see <http://www.gnu.org/licenses/>.
============================ End license ======================================
"""
__author__ = "Mark Granroth-Wilding <[email protected]>"
from jazzparser.shell.tools import Tool
from jazzparser.shell import ShellError
from jazzparser.utils.options import ModuleOption, options_help_text
from jazzparser.utils.strings import str_to_bool
class LoadCorpusTool(Tool):
"""
Tool to load a corpus of tonal space analyses of songs. These may then
be used for song recognition. This must be called before other song
recognition tools will work.
A corpus may be created from the chord corpus using the bin/data/parsegs.py
to parse the chord corpus and store the analyses in a file.
"""
name = "Load analysis set"
commands = ['loadsongs']
usage = ('loadsongs <name>', "load the named tonal space analysis corpus")
help = """\
Loads a tonal space analysis corpus by name. This corpus may then be used by
other tools which require a song corpus.
These corpora are built using the script bin/data/parsegs.py.
"""
def run(self, args, state):
from jazzparser.data.tonalspace import TonalSpaceAnalysisSet
if len(args) != 1:
raise ShellError, "Please give the name of a tonal space analysis "\
"set. Available sets are: %s" % \
", ".join(TonalSpaceAnalysisSet.list())
try:
# Try loading the named set
songset = TonalSpaceAnalysisSet.load(args[0])
except Exception, err:
raise ShellError, "Error loading tonal space analysis set: %s" % \
err
print "Loaded tonal space analysis set '%s'" % args[0]
# Store this in the state so other tools can use it
state.data['songset'] = songset
class ListSongsTool(Tool):
name = "List songs"
commands = ['songs']
usage = ('songs', "list songs in loaded songset")
help = """\
List all the song names in the loaded tonal space analysis songset.
"""
def run(self, args, state):
# Try getting song data
songset = state.get_data("songset",
help_msg="Use command 'loadsongs' to load a songset")
print "\n".join(["%d. %s" % (num,name) for (num,name) in \
enumerate(songset.songs)])
class PrintAnalysisTool(Tool):
name = "Print analysis"
commands = ['songanal']
usage = ('songanal <songnum>', "display the tonal space analysis for song "\
"number <songnum> in the loaded songset")
help = """\
Prints the tonal space path that is the analysis of a song from a loaded
songset.
"""
def run(self, args, state):
from jazzparser.formalisms.music_halfspan.semantics import semantics_to_coordinates
if len(args) == 0:
raise ShellError, "Give a song number"
# Get the song from the dataset
song = get_song(int(args[0]), state)
print "Analysis of '%s'" % song[0]
print "\nSemantics"
# Display the semantics
print song[1]
print "\nTonal space path"
# Also display the TS coordinates
print semantics_to_coordinates(song[1])
class ResultSongTSEditDistanceTool(Tool):
name = "Compare result"
commands = ['songcomparets', 'songcompts']
usage = ('songcomparets <result-num> <song-num>', "compare a parse result "\
"to a song in the database using the tonal space edit distance metric")
help = """\
Compares a parse result to a specific song in the database using the tonal
space edit distance metric and outputs the alignment distance.
See also:
songcomparedep: to compare a result to a song in terms of dependency
recovery.
"""
tool_options = Tool.tool_options + [
ModuleOption('local', filter=str_to_bool,
usage="local=B, where B is true or false",
default=False,
help_text="Use local alignment to score the similarity "\
"of the tonal space paths instead of global"),
ModuleOption('song', filter=str_to_bool,
usage="tosong=B, where B is true or false",
default=False,
help_text="Compare the numbered song in the corpus to the "\
"second song, instead of comparing the numbered result "\
"to the song"),
ModuleOption('alignment', filter=str_to_bool,
usage="alignment=B, where B is true or false",
default=False,
help_text="Output the full alignment, with the two step "\
"lists above one another"),
]
def run(self, args, state):
from jazzparser.formalisms.music_halfspan.evaluation import \
tonal_space_local_alignment, tonal_space_alignment, \
arrange_alignment
if len(args) < 2:
raise ShellError, "Give a result number and a song number"
resnum = int(args[0])
songnum = int(args[1])
song = get_song(songnum, state)
songsem = song[1]
if self.options['song']:
# Compare a song instead of a result
compsong = get_song(resnum, state)
resultsem = compsong[1]
print "Comparing '%s' to '%s'" % (compsong[0], song[0])
else:
# Normal behaviour: compare a result to a song
if resnum >= len(state.results):
raise ShellError, "No result number %d" % resnum
result = state.results[resnum]
resultsem = result.semantics
print "Comparing result %d to '%s'" % (resnum, song[0])
# Do the comparison
if self.options['local']:
ops, song_steps, result_steps, distance = \
tonal_space_local_alignment(songsem.lf, resultsem.lf)
else:
ops, song_steps, result_steps, distance = \
tonal_space_alignment(songsem.lf, resultsem.lf, distance=True)
print "Steps in '%s':" % song[0]
print song_steps
if self.options['song']:
print "Steps in '%s'" % compsong[0]
else:
print "Steps in result path:"
print result_steps
print "Alignment operations:"
print ops
if self.options['alignment']:
print "Full alignment:"
# Print the alignment in three rows
WRAP_TO = 70
wrapped_rows = []
current_row = []
current_width = 0
# Wrap the rows
for cells in arrange_alignment(song_steps, result_steps, ops):
if len(cells[0]) + current_width > WRAP_TO:
# Start a new row
wrapped_rows.append(current_row)
current_row = []
current_width = 0
current_row.append(cells)
current_width += len(cells[0])
# Add the incomplete last row
wrapped_rows.append(current_row)
for row in wrapped_rows:
lefts, rights, opses = zip(*row)
print " ".join(lefts)
print " ".join(rights)
print " ".join(opses)
print
print "Distance: %s" % distance
class ResultSongDependencyRecoveryTool(Tool):
name = "Compare result"
commands = ['songcomparedep', 'songdep']
usage = ('songcomparedep <result-num> <song-num>', "compare a parse result "\
"to a song in the database using the tonal space edit distance metric")
help = """\
Compares a parse result to a specific song in the database in terms of
dependency recovery and outputs the recall, precision and f-score.
See also:
songcomparets: to compare a result to a song in terms of tonal space path
edit distance.
"""
tool_options = Tool.tool_options + [
ModuleOption('song', filter=str_to_bool,
usage="tosong=B, where B is true or false",
default=False,
help_text="Compare the numbered song in the corpus to the "\
"second song, instead of comparing the numbered result "\
"to the song"),
]
def run(self, args, state):
from jazzparser.formalisms.music_halfspan.semantics.distance import \
MaximalDependencyAlignment
if len(args) < 2:
raise ShellError, "Give a result number and a song number"
resnum = int(args[0])
songnum = int(args[1])
song = get_song(songnum, state)
songsem = song[1]
if self.options['song']:
# Compare a song instead of a result
compsong = get_song(resnum, state)
resultsem = compsong[1]
print "Comparing '%s' to '%s'" % (compsong[0], song[0])
else:
# Normal behaviour: compare a result to a song
if resnum >= len(state.results):
raise ShellError, "No result number %d" % resnum
result = state.results[resnum]
resultsem = result.semantics
print "Comparing result %d to '%s'" % (resnum, song[0])
# Compare the two logical forms on the basis of overlapping dependencies
options = {
'output' : 'recall',
}
recall_metric = MaximalDependencyAlignment(options=options)
options = {
'output' : 'precision',
}
precision_metric = MaximalDependencyAlignment(options=options)
recall = recall_metric.distance(resultsem, songsem)
precision = precision_metric.distance(resultsem, songsem)
# Print out each comparison
print "Recall: %s" % recall
print "Precision: %s" % precision
print "F-score: %s" % (2.0*recall*precision / (recall+precision))
class RecogniseSongTool(Tool):
name = "Recognise song"
commands = ['findsong', 'song']
usage = ('findsong [<result-num>]', "find the closest matching song "\
"in the loaded songset")
help = """\
Compares a parse result (the top probability one by default) to all the songs
in the loaded songset and finds the closest matches by tonal space path
similarity. Outputs a list of the closest matches.
"""
tool_options = Tool.tool_options + [
ModuleOption('average', filter=int,
usage="average=N, where B is an integer",
help_text="Average the distance measure over that given "\
"by the top N results (starting at the result given "\
"in the first argument, if given)"),
ModuleOption('metric',
usage="metric=M, where M is the name of an available metric",
help_text="Select a metric to make the comparison with. "\
"Call with metric=help to get a list of metrics"),
ModuleOption('mopts',
usage="mopts=OPT=VAL:OPT=VAL:...",
help_text="Options to pass to the metric. Use mopts=help "\
"to see a list of options"),
]
def run(self, args, state):
from jazzparser.formalisms.music_halfspan.evaluation import \
tonal_space_local_alignment, tonal_space_distance
from jazzparser.formalisms.music_halfspan import Formalism
metric_name = self.options['metric']
if metric_name == "help":
# Print a list of available metrics
print ", ".join([metric.name for metric in Formalism.semantics_distance_metrics])
return
if len(args) == 0:
resnum = 0
else:
resnum = int(args[0])
if self.options['average'] and self.options['average'] > 1:
# Average the distance over several results
resnums = range(resnum, resnum+self.options['average'])
else:
# Just a single result
resnums = [resnum]
resultsems = []
for resnum in resnums:
# Get the result semantics that we're going to try to match
if resnum >= len(state.results):
raise ShellError, "No result number %d" % resnum
result = state.results[resnum]
resultsems.append(result.semantics)
# Get the loaded songset containing the song corpus
songset = state.get_data("songset",
help_msg="Use command 'loadsongs' to load a songset")
# Load the appropriate metric
if metric_name is None:
# Use the first in the list as default
metric_cls = Formalism.semantics_distance_metrics[0]
else:
for m in Formalism.semantics_distance_metrics:
if m.name == metric_name:
metric_cls = m
break
else:
# No metric found matching this name
print "No metric '%s'" % metric_name
sys.exit(1)
print "Using distance metric: %s\n" % metric_cls.name
# Now process the metric options
moptstr = self.options['mopts']
if moptstr is not None:
if moptstr == "help":
# Output this metric's option help
print options_help_text(metric_cls.OPTIONS,
intro="Available options for metric '%s'" % metric_cls.name)
return
else:
moptstr = ""
mopts = ModuleOption.process_option_string(moptstr)
# Instantiate the metric with these options
metric = metric_cls(options=mopts)
song_distances = {}
# Try matching against each song
for resultsem in resultsems:
for name,song in songset.analyses:
distance = metric.distance(resultsem, song)
song_distances.setdefault(name, []).append(distance)
# Average the scores
distances = []
for name,costs in song_distances.items():
ave_cost = sum(costs)/float(len(costs))
distances.append((ave_cost,name))
# Sort so the closest ones come first
distances.sort(key=lambda x:x[0])
# Output all the songs, ordered by similarity, with their distance
for i,(distance,name) in enumerate(distances):
print "%d> %s (%s)" % (i, name, distance)
class SongSelfSimilarityTool(Tool):
"""
For fooling around with comparing songs to themselves to see what happens.
"""
name = "Self similarity"
commands = ['selfsim']
usage = ('selfsim <song-num>', "")
help = ""
tool_options = Tool.tool_options + [
ModuleOption('local', filter=str_to_bool,
usage="local=B, where B is true or false",
default=False,
help_text="Sort results by local alignment score, not "\
"global"),
]
def run(self, args, state):
from jazzparser.formalisms.music_halfspan.evaluation import \
tonal_space_local_alignment, tonal_space_distance
songnum = int(args[0])
name,song = get_song(songnum, state)
songset = state.get_data("songset")
distances = []
# Try comparing this song to each song in the set
for other_name,other_song in songset.analyses:
# Align locally and globally
ops,steps1,steps2,local_distance = \
tonal_space_local_alignment(other_song.lf, song.lf)
global_distance = \
tonal_space_distance(other_song.lf, song.lf)
distances.append((other_name, local_distance, global_distance))
# Sort the results
if self.options['local']:
distances.sort(key=lambda x:x[1])
else:
distances.sort(key=lambda x:x[2])
# Print out each one
print "Aligned %s with:" % name
for other_name, local_distance, global_distance in distances:
print "%s: local: %s, global: %s" % \
(other_name,local_distance,global_distance)
class SongTreeTool(Tool):
"""
Converts a song's semantics to a tree. Mainly just for debugging.
"""
name = "Song tree"
commands = ['tree']
usage = ('tree <song-num>', "converts the semantics of the song to a tree "\
"representation")
tool_options = Tool.tool_options + [
ModuleOption('res', filter=str_to_bool,
usage="res=B, where B is true or false",
default=False,
help_text="Show a result, instead of a corpus song"),
]
help = """\
Converts the semantics of the numbered song to its tree representation that
will be used for comparison to other logical forms. This is mainly for
debugging and has no use in itself.
"""
def run(self, args, state):
from jazzparser.formalisms.music_halfspan.harmstruct import \
semantics_to_dependency_trees
if self.options['res']:
resnum = int(args[0])
res = state.results[resnum]
song = res.semantics
print "Dependency tree for result %d\n" % resnum
else:
songnum = int(args[0])
name,song = get_song(songnum, state)
print "Dependency tree for '%s'\n" % name
print "Semantics:"
print song
print "\nTrees:"
for t in semantics_to_dependency_trees(song):
print t
class SongDependencyGraphTool(Tool):
"""
Converts a song's semantics to a tree. Mainly just for debugging.
"""
name = "Song dependency graph"
commands = ['depgraph', 'dep']
usage = ('depgraph <song-num>', "converts the semantics of the song to a "\
"dependency graph representation")
tool_options = Tool.tool_options + [
ModuleOption('res', filter=str_to_bool,
usage="res=B, where B is true or false",
default=False,
help_text="Show a result, instead of a corpus song"),
]
help = """\
Converts the semantics of the numbered song to its tree representation that
will be used for comparison to other logical forms. This is mainly for
debugging and has no use in itself.
"""
def run(self, args, state):
from jazzparser.formalisms.music_halfspan.harmstruct import \
semantics_to_dependency_graph
if self.options['res']:
resnum = int(args[0])
res = state.results[resnum]
song = res.semantics
print "Dependency graph for result %d\n" % resnum
else:
songnum = int(args[0])
name,song = get_song(songnum, state)
print "Dependency graph for '%s'\n" % name
print "Semantics:"
print song
print
graph, times = semantics_to_dependency_graph(song)
print graph
def get_song(num, state):
"""
Retreive a song from the loaded songset by number. Utility function used
by tools above.
"""
songset = state.get_data("songset",
help_msg="Use command 'loadsongs' to load a songset")
if num >= len(songset):
raise ShellError, "There is no song %d. Use the 'songs' command to "\
"see a list of songs" % num
else:
return songset.analyses[num]
| gpl-3.0 | -6,989,043,982,802,696,000 | 38.433145 | 93 | 0.570562 | false |
tobiz/OGN-Flight-Logger_V2 | settings.py | 1 | 9941 |
#-------------------------------------
# OGN-Flight-Logger Settings
#-------------------------------------
# Python APRS/OGN program to log flight times, durations, maximum heights achieved and tracks
#
# This python program creates an SQlite db of flights from a given location and aircraft list
# (the later two parameters are to be be developed into a more generalised format).#
#
# At the moment this is very much 'in development'#
#
# To install OGN Flight Logger the following prerequisites are required
# - python-tz
# - sqlite3
# - libfap
#
# If installing on an arm based system this can be achieved by:
#
# sudo apt-get install python-tz sqlite3
# wget http://www.pakettiradio.net/downloads/libfap/1.5/libfap6_1.5_armhf.deb
# sudo dpkg -i libfap*.deb
#
#-------------------------------------
# Setting values
#
# The values APRS_SERVER_HOST and APRS_SERVER_PORT are FIXED
# All other values should be set for a specific location and USER/PASSCODE
# Failure to change USER/PASSCODE results in an error
#-------------------------------------
#
# APRS_SERVER_HOST = 'rotate.aprs2.net'
# APRS_SERVER_PORT = 14580
APRS_SERVER_HOST = 'aprs.glidernet.org'
APRS_SERVER_PORT = 14580
#
# Please get your own Username and Passcode from http://www.george-smart.co.uk/wiki/APRS_Callpass
# DO NOT USE THE VALUES IN THIS FILE AS IT WILL STOP A PREVIOUS INVOCATION WORKING CORRECTLY
#
APRS_USER = 'PythonEx' # Username
APRS_PASSCODE = 1234 # Passcode. See http://www.george-smart.co.uk/wiki/APRS_Callpass
#
# Check that APRS_USER and APRS_PASSCODE are set
#
assert len(APRS_USER) > 3 and len(str(APRS_PASSCODE)) > 0, 'Please set APRS_USER and APRS_PASSCODE in settings.py.'
#
# User defined configuration values
#
#
# This value for base Directory for relative files, ie:
# - flogger_schema-1.0.4.sql
# - logs
# - tracks
import sys, os
file = sys.argv[0]
pathname = os.path.dirname(file)
#FLOGGER_BS = "/home/pjr/git_neon/OGN-Flight-Logger_V2/"
FLOGGER_BS = pathname + "/"
#FLOGGER_BS = "/home/pi/workspace/OGN-Flight-Logger_V2.1/"
FLOGGER_MODE = "test" # Test or live mode
FLOGGER_DB_SCHEMA = FLOGGER_BS + "flogger_schema-1.0.4.sql" # File holding SQLite3 database schema
#FLOGGER_QNH = 340 # QNH ie ASL in metres for airfield at lat/logitude, if set to 0, elevation is automatically looked up. This is Sutton Bank
FLOGGER_QNH = 0 # QNH ie ASL in metres for airfield at lat/logitude, if set to 0, elevation is automatically looked up. This is Sutton Bank
FLOGGER_LATITUDE, FLOGGER_LONGITUDE = '+54.228833', '-1.209639' # Latitude, longitude of named OGN receiver airfield
#FLOGGER_AIRFIELD_DETAILS = "" # Location details for use by geocoder. If blank, "" use LAT, LONG etc
FLOGGER_AIRFIELD_DETAILS = "Yorkshire Gliding Club UK" # Location details for use by geocoder. If blank, "" use LAT, LONG etc
FLOGGER_MIN_FLIGHT_TIME = "0:4:0" # Minimum time for duration to be considered a flight, hh:mm:ss
FLOGGER_KEEPALIVE_TIME = 900 # Interval in seconds for sending tcp/ip keep alive on socket connection
FLOGGER_DB_NAME = "flogger.sql3.2" # Name of file for flogger SQLite3 database
FLOGGER_FLARMNET_DB_URL = "http://www.flarmnet.org/files/data.fln" # URL of Flarmnet database
#FLOGGER_OGN_DB_URL = "http://ddb.glidernet.org/download" # URL of OGN Flarm database or blank for don't use
FLOGGER_OGN_DB_URL = "http://ddb.glidernet.org/download/?t=1" # URL of OGN Flarm database or blank for don't use
#FLOGGER_OGN_DB_URL = "" # URL of OGN Flarm to registration mapping database
#FLOGGER_AIRFIELD_NAME = "SuttonBnk" # Name of Flarm base station for airfield. NOTE MUST BE PROVIDED
FLOGGER_AIRFIELD_NAME = "SUTTON BANK" # Name of Flarm base station for airfield. NOTE MUST BE PROVIDED AS in flarmdb record
# If blank, "" then all aircraft in db are included in logs & tracks
#FLOGGER_FLEET_CHECK = "Y" # Checks Flarm ID is for aircraft fleet of FLOGGER_AIRFIELD_NAME if "Y"
FLOGGER_FLEET_CHECK = "N" # Checks Flarm ID is for aircraft fleet of FLOGGER_AIRFIELD_NAME if "Y"
FLOGGER_QFE_MIN = 100 # Minimum altitude in metres attained for inclusion as a flight, ie ~300 ft
FLOGGER_LOG_PATH = FLOGGER_BS + "logs" # Path where log files are stored
FLOGGER_TRACKS = "Y" # If Y flight tracks are recorded. Default is N, ie No tracks logged
FLOGGER_TRACKS_FOLDER = FLOGGER_BS + "tracks" # Folder for .gpx files for flight tracks
FLOGGER_V_SMALL = 10.0 # Lowest moving speed to be considered as zero kph
FLOGGER_NAME = "OGN_Flogger" # Name to be displayed on APRS
FLOGGER_VER = "0.2.3" # Flogger version number
FLOGGER_RAD = "50" # APRS radius in km from base station in AIRFIELD_DETAILS
FLOGGER_FLIGHTS_LOG = FLOGGER_BS + "" # Folder for csv file of daily flights record
FLOGGER_DATA_RETENTION = 3 # Number of days to keep .csv files, ie delete, if "0" keep all files
FLOGGER_LOG_TUGS = "Y" # Don't log tug flights if "N"
FLOGGER_TRACKS_IGC = "N" # Dump flight tracks in IGC format if "Y" else no
FLOGGER_LOG_TIME_DELTA = -1 # Number of hours before sunset to start processing flight log
FLOGGER_SMTP_SERVER_URL = '' # URL of smtp server for sending email
FLOGGER_SMTP_SERVER_PORT = 25 # smtp server port number, normally 25
FLOGGER_SMTP_TX = "" # Flight log sender email addrs
FLOGGER_SMTP_RX = "" # Flight log receiver email addrs
FLOGGER_AIRFIELD_LIMIT = 2000 # Distance from airfield centre considered a 'Land Out' in metres
FLOGGER_LANDOUT_MODE = "email" # Send land out msg by "email", "SMS", or "" don't send
FLOGGER_TAKEOFF_EMAIL = "Y" # Send email for each take off if Yes else no
FLOGGER_LANDING_EMAIL = "Y" # Send email for each landing if Yes else no
FLOGGER_LOG_LAUNCH_FAILURES = "N" # Log launch failures, ie below min time & min height
FLOGGER_LOCATION_HORIZON = '-0:34' # Adjustments for angle to horizon for sunset
FLOGGER_V_TAKEOFF_MIN = 10 # Min ground speed considered as takenoff. ogn-live is (55Km/h)
FLOGGER_V_LANDING_MIN = 10 # Min ground speed considered as landed. ogn-live is (40Km/h)
FLOGGER_DT_TUG_LAUNCH = 20 # Delta t(sec) between glider and tug takeoff times to be tug launched
FLOGGER_DUPLICATE_FLIGHT_DELTA_T = "0:1:00" # Delta between two landing & takeoff times of same aircraft to be different flights
FLOGGER_DUPLICATE_FLIGHT_DELTA = 90 # Delta time (secs) for duplicate flights
#
# The following fields are used to determine if data from APRS is a position packet from any 1 of up to 4 OGN receivers base stations.
# The OGN receiver areas can overlap and if more then 1 is supplied it will increase the accuracy of both the data and track results
# The list of OGN receivers can be found at http://wiki.glidernet.org/list-of-receivers. The field values are strings for any
# APRS AIRFIELDS code value. One or more must be specified.
# If a value is not needed use a null string, ie "". Coordinates for the primary OGN receiver station are either supplied
# by FLOGGER_LATITUDE, FLOGGER_LONGITUDE values or if these are not supplied then those returned by a geolocator
# service using FLOGGER_AIRFIELD_DETAILS. The primary OGN receiver base station coordinates together with the value
# of FLOGGER_RAD are used to filter the data received from APRS.
#
#FLOGGER_APRS_BASE_1 = "SuttonBnk"
#FLOGGER_APRS_BASE_2 = "UKPOC"
#FLOGGER_APRS_BASE_3 = "UKRUF"
#FLOGGER_APRS_BASE_4 = "Linton"
FLOGGER_APRS_BASES = ["SuttonBnk", "UKPOC", "UKRUF", "Linton", "Riponhill"]
# Coded 001-099: Gliders,
# 101-199: Tugs,
# 201-299: Motor Gliders,
# 301-399: Other
# Note. No reason for coding these values other than, 'why not!'
FLOGGER_FLEET_LIST = {"G-CHEF":1, "G-CHVR":2, "G-CKFN":3, "G-CKJH":4,
"G-CKLW":5, "G-CJVZ":6, "G-DDKC":7, "G-DDPO":8,
"G-BETM":101, "G-CIOF":102, "G-MOYR":103, "G-BJIV": 104,
"G-OSUT":201,
}
#
# Aircraft types in OGN Database, see https://github.com/glidernet/ogn-ddb/blob/master/index.php#L87
#
FLOGGER_AIRCRAFT_CAT = [
'None' # 0 = Blank
'Gliders/motoGliders', # 1
'Planes', # 2
'Ultralights', # 3
'Helicoters', # 4
'Drones/UAV', # 5
'Others', # 6
]
| gpl-3.0 | 1,513,637,528,252,753,400 | 62.318471 | 192 | 0.574489 | false |
StephanII/accelerator-toolkit | magnets.py | 1 | 3901 | from base import Device
import math as math
class SectorBendingMagnet(Device):
def __init__(self, nomenclature="", width=0., height=0., length=0., angle=0.):
Device.__init__(self, nomenclature, width, height, length)
self.angle = angle
def __repr__(self):
r = str(self) + "("
r += "width=" + str(self.width) + "m, "
r += "height=" + str(self.height) + "m, "
r += "length=" + str(self.length) + "m, "
r += "angle=" + str(self.angle) + "rad)"
return r
def transport(self, ion):
if self.angle == 0:
ion.x += self.length * ion.dx
ion.y += self.length * ion.dy
else:
radius = self.length / self.angle
cos_angle = math.cos(self.angle)
sin_angle = math.sin(self.angle)
x = cos_angle * ion.x
x += radius * sin_angle * ion.dx
x += radius * (1. - cos_angle) * ion.dp
dx = -(1. / radius) * sin_angle * ion.x
dx += cos_angle * ion.dx + sin_angle * ion.dp
y = ion.y + self.length * ion.dy
dl = -sin_angle * ion.x
dl -= radius * (1. - cos_angle) * ion.dx
dl -= radius * (self.length - radius * sin_angle) * ion.dp
ion.x = x
ion.dx = dx
ion.y = y
ion.dl = dl
self.forward_if_not_lost(ion)
class RectangleBendingMagnet(Device):
pass
class HorizontalKickerMagnet(Device):
def __init__(self, nomenclature="", width=0., height=0., angle=0.):
Device.__init__(self, nomenclature, width, height)
self.angle = angle
def __repr__(self):
r = str(self) + "("
r += "width=" + str(self.width) + "m, "
r += "height=" + str(self.height) + "m, "
r += "length=" + str(self.length) + "m, "
r += "angle=" + str(self.angle) + "rad)"
return r
def transport(self, ion):
ion.dx += self.angle
self.forward_if_not_lost(ion)
# -----------------------------------------------------------------------------------------
#
#
class QuadrupoleMagnet(Device):
def __init__(self, nomenclature="", width=0., height=0., length=0., strength=0.):
Device.__init__(self, nomenclature, width, height, length)
self.strength = strength
def __repr__(self):
r = str(self) + "("
r += "width=" + str(self.width) + "m, "
r += "height=" + str(self.height) + "m, "
r += "length=" + str(self.length) + "m, "
r += "strength=" + str(self.strength) + "rad)"
return r
def transport(self, ion):
sqrts = math.sqrt(abs(self.strength))
omega = self.length * sqrts
cosomega = math.cos(omega)
coshomega = math.cosh(omega)
sinomega = math.sin(omega)
sinhomega = math.sinh(omega)
if self.strength < 0:
x = cosomega * ion.x + (sinomega / sqrts) * ion.dx
dx = -sinomega * sqrts * ion.x + cosomega * ion.dx
y = coshomega * ion.y + (sinhomega / sqrts) * ion.dy
dy = sinhomega * sqrts * ion.y + coshomega * ion.dy
ion.x = x
ion.dx = dx
ion.y = y
ion.dy = dy
elif self.strength > 0:
x = coshomega * ion.x + (sinhomega / sqrts) * ion.dx
dx = sinhomega * sqrts * ion.x + coshomega * ion.dx
y = cosomega * ion.y + (sinomega / sqrts) * ion.dy
dy = -sinomega * sqrts * ion.y + cosomega * ion.dy
ion.x = x
ion.dx = dx
ion.y = y
ion.dy = dy
else:
ion.x += self.length * ion.dx
ion.y += self.length * ion.dy
self.forward_if_not_lost(ion)
# -----------------------------------------------------------------------------------------
#
class SixtupoleMagnet(Device):
pass
| mit | -7,058,308,463,578,992,000 | 28.55303 | 91 | 0.471161 | false |
anortef/calico | calico/felix/test/test_frules.py | 1 | 11864 | # -*- coding: utf-8 -*-
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.test.test_frules
~~~~~~~~~~~~~~~~~~~~~~~~~
Tests of iptables rules generation function.
"""
import logging
from mock import Mock, patch, call, ANY
from calico.felix import frules
from calico.felix.config import Config
from calico.felix.fiptables import IptablesUpdater
from calico.felix.frules import (
profile_to_chain_name, rules_to_chain_rewrite_lines, UnsupportedICMPType,
_rule_to_iptables_fragment
)
from calico.felix.test.base import BaseTestCase
_log = logging.getLogger(__name__)
DEFAULT_MARK = ('--append chain-foo --match comment '
'--comment "Mark as not matched" --jump MARK --set-mark 1')
RULES_TESTS = [
([{"src_net": "10.0.0.0/8"},], 4,
["--append chain-foo --source 10.0.0.0/8 --jump RETURN",
DEFAULT_MARK]),
([{"protocol": "icmp",
"src_net": "10.0.0.0/8",
"icmp_type": 7,
"icmp_code": 123},], 4,
["--append chain-foo --protocol icmp --source 10.0.0.0/8 "
"--match icmp --icmp-type 7/123 "
"--jump RETURN",
DEFAULT_MARK]),
([{"protocol": "icmp",
"src_net": "10.0.0.0/8",
"icmp_type": 7},], 4,
["--append chain-foo --protocol icmp --source 10.0.0.0/8 "
"--match icmp --icmp-type 7 "
"--jump RETURN",
DEFAULT_MARK]),
([{"protocol": "icmpv6",
"src_net": "1234::beef",
"icmp_type": 7},], 6,
["--append chain-foo --protocol icmpv6 --source 1234::beef "
"--match icmp6 --icmpv6-type 7 "
"--jump RETURN",
DEFAULT_MARK]),
([{"protocol": "tcp",
"src_tag": "tag-foo",
"src_ports": ["0:12", 13]}], 4,
["--append chain-foo --protocol tcp "
"--match set --match-set ipset-foo src "
"--match multiport --source-ports 0:12,13 --jump RETURN",
DEFAULT_MARK]),
([{"protocol": "tcp",
"src_ports": [0, "2:3", 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]}], 4,
["--append chain-foo --protocol tcp "
"--match multiport --source-ports 0,2:3,4,5,6,7,8,9,10,11,12,13,14,15 "
"--jump RETURN",
"--append chain-foo --protocol tcp "
"--match multiport --source-ports 16,17 "
"--jump RETURN",
DEFAULT_MARK]),
]
IP_SET_MAPPING = {
"tag-foo": "ipset-foo",
"tag-bar": "ipset-bar",
}
class TestRules(BaseTestCase):
def test_profile_to_chain_name(self):
self.assertEqual(profile_to_chain_name("inbound", "prof1"),
"felix-p-prof1-i")
self.assertEqual(profile_to_chain_name("outbound", "prof1"),
"felix-p-prof1-o")
def test_split_port_lists(self):
self.assertEqual(
frules._split_port_lists([1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15]),
[['1', '2', '3', '4', '5', '6', '7', '8', '9',
'10', '11', '12', '13', '14', '15']]
)
self.assertEqual(
frules._split_port_lists([1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16]),
[['1', '2', '3', '4', '5', '6', '7', '8', '9',
'10', '11', '12', '13', '14', '15'],
['16']]
)
self.assertEqual(
frules._split_port_lists([1, "2:3", 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17]),
[['1', '2:3', '4', '5', '6', '7', '8', '9',
'10', '11', '12', '13', '14', '15'],
['16', '17']]
)
def test_rules_generation(self):
for rules, ip_version, expected_output in RULES_TESTS:
fragments = rules_to_chain_rewrite_lines(
"chain-foo",
rules,
ip_version,
IP_SET_MAPPING,
on_allow="RETURN",
)
self.assertEqual(fragments, expected_output)
def test_bad_icmp_type(self):
with self.assertRaises(UnsupportedICMPType):
_rule_to_iptables_fragment("foo", {"icmp_type": 255}, 4, {})
def test_bad_protocol_with_ports(self):
with self.assertRaises(AssertionError):
_rule_to_iptables_fragment("foo", {"protocol": "10",
"src_ports": [1]}, 4, {})
def test_build_input_chain(self):
chain, deps = frules._build_input_chain("tap+",
"123.0.0.1",
1234,
546, 547,
False,
"DROP")
self.assertEqual(chain, [
'--append felix-INPUT ! --in-interface tap+ --jump RETURN',
'--append felix-INPUT --match conntrack --ctstate INVALID --jump DROP',
'--append felix-INPUT --match conntrack --ctstate RELATED,ESTABLISHED --jump ACCEPT',
'--append felix-INPUT --protocol tcp --destination 123.0.0.1 --dport 1234 --jump ACCEPT',
'--append felix-INPUT --protocol udp --sport 546 --dport 547 --jump ACCEPT',
'--append felix-INPUT --protocol udp --dport 53 --jump ACCEPT',
'--append felix-INPUT --jump DROP',
])
self.assertEqual(deps, set())
def test_build_input_chain_ipip(self):
chain, deps = frules._build_input_chain("tap+",
"123.0.0.1",
1234,
546, 547,
False,
"DROP",
"felix-hosts")
self.assertEqual(chain, [
'--append felix-INPUT --protocol ipencap --match set ! --match-set felix-hosts src --jump DROP',
'--append felix-INPUT ! --in-interface tap+ --jump RETURN',
'--append felix-INPUT --match conntrack --ctstate INVALID --jump DROP',
'--append felix-INPUT --match conntrack --ctstate RELATED,ESTABLISHED --jump ACCEPT',
'--append felix-INPUT --protocol tcp --destination 123.0.0.1 --dport 1234 --jump ACCEPT',
'--append felix-INPUT --protocol udp --sport 546 --dport 547 --jump ACCEPT',
'--append felix-INPUT --protocol udp --dport 53 --jump ACCEPT',
'--append felix-INPUT --jump DROP',
])
self.assertEqual(deps, set())
def test_build_input_chain_return(self):
chain, deps = frules._build_input_chain("tap+",
None,
None,
546, 547,
True,
"RETURN")
self.assertEqual(chain, [
'--append felix-INPUT ! --in-interface tap+ --jump RETURN',
'--append felix-INPUT --match conntrack --ctstate INVALID --jump DROP',
'--append felix-INPUT --match conntrack --ctstate RELATED,ESTABLISHED --jump ACCEPT',
'--append felix-INPUT --jump ACCEPT --protocol ipv6-icmp --icmpv6-type 130',
'--append felix-INPUT --jump ACCEPT --protocol ipv6-icmp --icmpv6-type 131',
'--append felix-INPUT --jump ACCEPT --protocol ipv6-icmp --icmpv6-type 132',
'--append felix-INPUT --jump ACCEPT --protocol ipv6-icmp --icmpv6-type 133',
'--append felix-INPUT --jump ACCEPT --protocol ipv6-icmp --icmpv6-type 135',
'--append felix-INPUT --jump ACCEPT --protocol ipv6-icmp --icmpv6-type 136',
'--append felix-INPUT --protocol udp --sport 546 --dport 547 --jump ACCEPT',
'--append felix-INPUT --protocol udp --dport 53 --jump ACCEPT',
'--append felix-INPUT --jump felix-FROM-ENDPOINT',
])
self.assertEqual(deps, set(["felix-FROM-ENDPOINT"]))
@patch("calico.felix.futils.check_call", autospec=True)
@patch("calico.felix.frules.devices", autospec=True)
@patch("calico.felix.frules.HOSTS_IPSET_V4", autospec=True)
def test_install_global_rules(self, m_ipset, m_devices, m_check_call):
m_devices.interface_exists.return_value = False
m_devices.interface_up.return_value = False
m_config = Mock(spec=Config)
m_config.IP_IN_IP_ENABLED = True
m_config.METADATA_IP = "123.0.0.1"
m_config.METADATA_PORT = 1234
m_config.DEFAULT_INPUT_CHAIN_ACTION = "RETURN"
m_config.IFACE_PREFIX = "tap"
m_v4_upd = Mock(spec=IptablesUpdater)
m_v6_upd = Mock(spec=IptablesUpdater)
m_v4_nat_upd = Mock(spec=IptablesUpdater)
frules.install_global_rules(m_config, m_v4_upd, m_v6_upd, m_v4_nat_upd)
m_ipset.ensure_exists.assert_called_once_with()
self.assertEqual(
m_check_call.mock_calls,
[
call(["ip", "tunnel", "add", "tunl0", "mode", "ipip"]),
call(["ip", "link", "set", "tunl0", "up"]),
]
)
expected_chains = {
'felix-INPUT': [
'--append felix-INPUT ! --in-interface tap+ --jump RETURN',
'--append felix-INPUT --match conntrack --ctstate INVALID --jump DROP',
'--append felix-INPUT --match conntrack --ctstate RELATED,ESTABLISHED --jump ACCEPT',
'--append felix-INPUT --protocol tcp --destination 123.0.0.1 --dport 1234 --jump ACCEPT',
'--append felix-INPUT --protocol udp --sport 68 --dport 67 --jump ACCEPT',
'--append felix-INPUT --protocol udp --dport 53 --jump ACCEPT',
'--append felix-INPUT --jump felix-FROM-ENDPOINT'
],
'felix-FORWARD': [
'--append felix-FORWARD --in-interface tap+ --match conntrack --ctstate INVALID --jump DROP',
'--append felix-FORWARD --out-interface tap+ --match conntrack --ctstate INVALID --jump DROP',
'--append felix-FORWARD --in-interface tap+ --match conntrack --ctstate RELATED,ESTABLISHED --jump RETURN',
'--append felix-FORWARD --out-interface tap+ --match conntrack --ctstate RELATED,ESTABLISHED --jump RETURN',
'--append felix-FORWARD --jump felix-FROM-ENDPOINT --in-interface tap+',
'--append felix-FORWARD --jump felix-TO-ENDPOINT --out-interface tap+',
'--append felix-FORWARD --jump ACCEPT --in-interface tap+',
'--append felix-FORWARD --jump ACCEPT --out-interface tap+'
]
}
m_v4_upd.rewrite_chains.assert_called_once_with(
expected_chains,
{
'felix-INPUT': set(['felix-FROM-ENDPOINT']),
'felix-FORWARD': set([
'felix-FROM-ENDPOINT',
'felix-TO-ENDPOINT'
])
},
async=False
)
self.assertEqual(
m_v4_upd.ensure_rule_inserted.mock_calls,
[
call("INPUT --jump felix-INPUT", async=False),
call("FORWARD --jump felix-FORWARD", async=False),
]
) | apache-2.0 | 8,944,739,970,673,061,000 | 42.944444 | 124 | 0.517701 | false |
dstufft/converge | converge/worker.py | 1 | 2830 | # Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
from converge import marconi as queue
from converge import tasks
def main(args):
# Get our configuration
config = {
k[9:]: v
for k, v in os.environ.items()
if k.upper().startswith("CONVERGE_")
}
task = None
try:
# Do Our Busy Loop
while True:
# grab a Task from the queue
task = queue.claim(
config["RACKSPACE_USER"],
config["RACKSPACE_APIKEY"],
config["QUEUE"],
region=config["RACKSPACE_REGION"],
)
if task is not None:
# Do Our Task
if task["body"]["event"] == "revision.process":
tasks.process_revision(config, task["body"]["revision"])
else:
raise ValueError(
"Unknown event '{}'".format(task["body"]["event"])
)
# Delete the task now that it's been processed
queue.delete(
config["RACKSPACE_USER"],
config["RACKSPACE_APIKEY"],
config["QUEUE"],
task,
region=config["RACKSPACE_REGION"],
)
task = None
else:
# If there were no tasks, wait for 5 seconds and try again
time.sleep(5)
except KeyboardInterrupt:
print("Exiting converge.worker...")
# Release any claims we have as we are shutting down
if task is not None:
queue.unclaim(
config["RACKSPACE_USER"],
config["RACKSPACE_APIKEY"],
config["QUEUE"],
task,
region=config["RACKSPACE_REGION"],
)
return
except:
# Release any claims we have as we hit an error
if task is not None:
queue.unclaim(
config["RACKSPACE_USER"],
config["RACKSPACE_APIKEY"],
config["QUEUE"],
task,
region=config["RACKSPACE_REGION"],
)
raise
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| apache-2.0 | 7,776,162,057,832,432,000 | 29.106383 | 76 | 0.525088 | false |
seanpue/al340 | lessons/textanalysis/Untitled0.py | 1 | 1100 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import nltk
%matplotlib inline
# <codecell>
import os
from nltk.corpus.reader.plaintext import PlaintextCorpusReader
corpusdir = 'data/texts/' # Directory of corpus.
corpus0 = PlaintextCorpusReader(corpusdir, '.*')
corpus = nltk.Text(corpus0.words())
# <codecell>
corpus.concordance('girls')
# <codecell>
corpus.concordance("'", lines=all)
# <codecell>
len(set(corpus))
# <codecell>
len(corpus)
# <codecell>
corpus.common_contexts(['general'])
# <codecell>
from nltk.corpus import stopwords
stopwords = stopwords.words(‘english’)
# <codecell>
corpus.dispersion_plot(["women","girls","fire"])
# <codecell>
import mpld3
# <codecell>
mpld3.enable_notebook()
# <codecell>
corpus.dispersion_plot(["women","girls","fire"], )
# <codecell>
len(corpus)
# <codecell>
len(set(corpus)) / len(corpus)
# <codecell>
corpus[0:100]
# <codecell>
fdist1 = nltk.FreqDist(corpus)
# <codecell>
fdist1.most_common(50)
# <codecell>
fdist1.plot(50, cumulative=True)
# <codecell>
corpus[w.upper() for w in corpus]
| mit | -1,315,354,380,263,225,000 | 11.454545 | 62 | 0.686131 | false |
karrtikr/ete | ete3/tools/phylobuild_lib/interface.py | 1 | 16365 | # #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: [email protected]
#
#
# #END_LICENSE#############################################################
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
import re
import time
from signal import signal, SIGWINCH, SIGKILL, SIGTERM
from collections import deque
from textwrap import TextWrapper
import six.moves.queue
import threading
from .logger import get_main_log
from .utils import GLOBALS, clear_tempdir, terminate_job_launcher, pjoin, pexist
from .errors import *
import six
from six import StringIO
MAIN_LOG = False
# try:
# import curses
# except ImportError:
# NCURSES = False
# else:
# NCURSES = True
NCURSES = False
# CONVERT shell colors to the same curses palette
SHELL_COLORS = {
"10": '\033[1;37;41m', # white on red
"11": '\033[1;37;43m', # white on orange
"12": '\033[1;37;45m', # white on magenta
"16": '\033[1;37;46m', # white on blue
"13": '\033[1;37;40m', # black on white
"06": '\033[1;34m', # light blue
"05": '\033[1;31m', # light red
"03": '\033[1;32m', # light green
"8": '\033[1;33m', # yellow
"7": '\033[36m', # cyan
"6": '\033[34m', # blue
"3": '\033[32m', # green
"4": '\033[33m', # orange
"5": '\033[31m', # red
"2": "\033[35m", # magenta
"1": "\033[0m", # white
"0": "\033[0m", # end
}
def safe_int(x):
try:
return int(x)
except TypeError:
return x
def shell_colorify_match(match):
return SHELL_COLORS[match.groups()[2]]
class ExcThread(threading.Thread):
def __init__(self, bucket, *args, **kargs):
threading.Thread.__init__(self, *args, **kargs)
self.bucket = bucket
def run(self):
try:
threading.Thread.run(self)
except Exception:
self.bucket.put(sys.exc_info())
raise
class Screen(StringIO):
# tags used to control color of strings and select buffer
TAG = re.compile("@@((\d+),)?(\d+):", re.MULTILINE)
def __init__(self, windows):
StringIO.__init__(self)
self.windows = windows
self.autoscroll = {}
self.pos = {}
self.lines = {}
self.maxsize = {}
self.stdout = None
self.logfile = None
self.wrapper = TextWrapper(width=80, initial_indent="",
subsequent_indent=" ",
replace_whitespace=False)
if NCURSES:
for windex in windows:
h, w = windows[windex][0].getmaxyx()
self.maxsize[windex] = (h, w)
self.pos[windex] = [0, 0]
self.autoscroll[windex] = True
self.lines[windex] = 0
def scroll(self, win, vt, hz=0, refresh=True):
line, col = self.pos[win]
hz_pos = col + hz
if hz_pos < 0:
hz_pos = 0
elif hz_pos >= 1000:
hz_pos = 999
vt_pos = line + vt
if vt_pos < 0:
vt_pos = 0
elif vt_pos >= 1000:
vt_pos = 1000 - 1
if line != vt_pos or col != hz_pos:
self.pos[win] = [vt_pos, hz_pos]
if refresh:
self.refresh()
def scroll_to(self, win, vt, hz=0, refresh=True):
line, col = self.pos[win]
hz_pos = hz
if hz_pos < 0:
hz_pos = 0
elif hz_pos >= 1000:
hz_pos = 999
vt_pos = vt
if vt_pos < 0:
vt_pos = 0
elif vt_pos >= 1000:
vt_pos = 1000 - 1
if line != vt_pos or col != hz_pos:
self.pos[win] = [vt_pos, hz_pos]
if refresh:
self.refresh()
def refresh(self):
for windex, (win, dim) in six.iteritems(self.windows):
h, w, sy, sx = dim
line, col = self.pos[windex]
if h is not None:
win.touchwin()
win.noutrefresh(line, col, sy+1, sx+1, sy+h-2, sx+w-2)
else:
win.noutrefresh()
curses.doupdate()
def write(self, text):
if six.PY3:
text = str(text)
else:
if isinstance(text, six.text_type):
#text = text.encode(self.stdout.encoding)
text = text.encode("UTF-8")
if NCURSES:
self.write_curses(text)
if self.logfile:
text = re.sub(self.TAG, "", text)
self.write_log(text)
else:
if GLOBALS["color_shell"]:
text = re.sub(self.TAG, shell_colorify_match, text)
else:
text = re.sub(self.TAG, "", text)
self.write_normal(text)
if self.logfile:
self.write_log(text)
def write_log(self, text):
self.logfile.write(text)
self.logfile.flush()
def write_normal(self, text):
#_text = '\n'.join(self.wrapper.wrap(text))
#self.stdout.write(_text+"\n")
self.stdout.write(text)
def write_curses(self, text):
formatstr = deque()
for m in re.finditer(self.TAG, text):
x1, x2 = m.span()
cindex = safe_int(m.groups()[2])
windex = safe_int(m.groups()[1])
formatstr.append([x1, x2, cindex, windex])
if not formatstr:
formatstr.append([None, 0, 1, 1])
if formatstr[0][1] == 0:
stop, start, cindex, windex = formatstr.popleft()
if windex is None:
windex = 1
else:
stop, start, cindex, windex = None, 0, 1, 1
while start is not None:
if formatstr:
next_stop, next_start, next_cindex, next_windex = formatstr.popleft()
else:
next_stop, next_start, next_cindex, next_windex = None, None, cindex, windex
face = curses.color_pair(cindex)
win, (h, w, sy, sx) = self.windows[windex]
ln, cn = self.pos[windex]
# Is this too inefficient?
new_lines = text[start:next_stop].count("\n")
self.lines[windex] += new_lines
if self.lines[windex] > self.maxsize[windex]:
_y, _x = win.getyx()
for _i in self.lines[windex]-self.maxsize(windex):
win.move(0,0)
win.deleteln()
win.move(_y, _x)
# Visual scroll
if self.autoscroll[windex]:
scroll = self.lines[windex] - ln - h
if scroll > 0:
self.scroll(windex, scroll, refresh=False)
try:
win.addstr(text[start:next_stop], face)
except curses.error:
win.addstr("???")
start = next_start
stop = next_stop
cindex = next_cindex
if next_windex is not None:
windex = next_windex
self.refresh()
def resize_screen(self, s, frame):
import sys,fcntl,termios,struct
data = fcntl.ioctl(self.stdout.fileno(), termios.TIOCGWINSZ, '1234')
h, w = struct.unpack('hh', data)
win = self.windows
#main = curses.initscr()
#h, w = main.getmaxyx()
#win[0] = (main, (None, None, 0, 0))
#curses.resizeterm(h, w)
win[0][0].resize(h, w)
win[0][0].clear()
info_win, error_win, debug_win = setup_layout(h, w)
win[1][1] = info_win
win[2][1] = error_win
win[3][1] = debug_win
self.refresh()
def init_curses(main_scr):
if not NCURSES or not main_scr:
# curses disabled, no multi windows
return None
# Colors
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_MAGENTA, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(4, curses.COLOR_YELLOW, curses.COLOR_BLACK)
curses.init_pair(5, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(6, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(10, curses.COLOR_WHITE, curses.COLOR_RED)
curses.init_pair(11, curses.COLOR_WHITE, curses.COLOR_YELLOW)
curses.init_pair(12, curses.COLOR_WHITE, curses.COLOR_MAGENTA)
WIN = {}
main = main_scr
h, w = main.getmaxyx()
WIN[0] = (main, (None, None, 0, 0))
# Creates layout
info_win, error_win, debug_win = setup_layout(h, w)
WIN[1] = [curses.newpad(5000, 1000), info_win]
WIN[2] = [curses.newpad(5000, 1000), error_win]
WIN[3] = [curses.newpad(5000, 1000), debug_win]
#WIN[1], WIN[11] = newwin(h-1, w/2, 1,1)
#WIN[2], WIN[12] = newwin(h-dbg_h-1, (w/2)-1, 1, (w/2)+2)
#WIN[3], WIN[13] = newwin(dbg_h-1, (w/2)-1, h-dbg_h+1, (w/2)+2)
for windex, (w, dim) in six.iteritems(WIN):
#w = WIN[i]
#w.bkgd(str(windex))
w.bkgd(" ")
w.keypad(1)
w.idlok(True)
w.scrollok(True)
return WIN
def clear_env():
try:
terminate_job_launcher()
except:
pass
base_dir = GLOBALS["basedir"]
lock_file = pjoin(base_dir, "alive")
try:
os.remove(lock_file)
except Exception:
print("could not remove lock file %s" %lock_file, file=sys.stderr)
clear_tempdir()
def app_wrapper(func, args):
global NCURSES
base_dir = GLOBALS.get("scratch_dir", GLOBALS["basedir"])
lock_file = pjoin(base_dir, "alive")
if not args.enable_ui:
NCURSES = False
if not pexist(lock_file) or args.clearall:
open(lock_file, "w").write(time.ctime())
else:
clear_env()
print('\nThe same process seems to be running. Use --clearall or remove the lock file "alive" within the output dir', file=sys.stderr)
sys.exit(-1)
try:
if NCURSES:
curses.wrapper(main, func, args)
else:
main(None, func, args)
except ConfigError as e:
if GLOBALS.get('_background_scheduler', None):
GLOBALS['_background_scheduler'].terminate()
print("\nConfiguration Error:", e, file=sys.stderr)
clear_env()
sys.exit(-1)
except DataError as e:
if GLOBALS.get('_background_scheduler', None):
GLOBALS['_background_scheduler'].terminate()
print("\nData Error:", e, file=sys.stderr)
clear_env()
sys.exit(-1)
except KeyboardInterrupt:
# Control-C is also grabbed by the back_launcher, so it is no necessary
# to terminate from here
print("\nProgram was interrupted.", file=sys.stderr)
if args.monitor:
print(("VERY IMPORTANT !!!: Note that launched"
" jobs will keep running as you provided the --monitor flag"), file=sys.stderr)
clear_env()
sys.exit(-1)
except:
if GLOBALS.get('_background_scheduler', None):
GLOBALS['_background_scheduler'].terminate()
clear_env()
raise
else:
if GLOBALS.get('_background_scheduler', None):
GLOBALS['_background_scheduler'].terminate()
clear_env()
def main(main_screen, func, args):
""" Init logging and Screen. Then call main function """
global MAIN_LOG
# Do I use ncurses or basic terminal interface?
screen = Screen(init_curses(main_screen))
# prints are handled by my Screen object
screen.stdout = sys.stdout
if args.logfile:
screen.logfile = open(os.path.join(GLOBALS["basedir"], "npr.log"), "w")
sys.stdout = screen
sys.stderr = screen
# Start logger, pointing to the selected screen
if not MAIN_LOG:
MAIN_LOG = True
log = get_main_log(screen, [28,26,24,22,20,10][args.verbosity])
# Call main function as lower thread
if NCURSES:
screen.refresh()
exceptions = six.moves.queue.Queue()
t = ExcThread(bucket=exceptions, target=func, args=[args])
t.daemon = True
t.start()
ln = 0
chars = "\\|/-\\|/-"
cbuff = 1
try:
while 1:
try:
exc = exceptions.get(block=False)
except six.moves.queue.Empty:
pass
else:
exc_type, exc_obj, exc_trace = exc
# deal with the exception
#print exc_trace, exc_type, exc_obj
raise exc_obj
mwin = screen.windows[0][0]
key = mwin.getch()
mwin.addstr(0, 0, "%s (%s) (%s) (%s)" %(key, screen.pos, ["%s %s" %(i,w[1]) for i,w in list(screen.windows.items())], screen.lines) + " "*50)
mwin.refresh()
if key == 113:
# Fixes the problem of prints without newline char
raise KeyboardInterrupt("Q Pressed")
if key == 9:
cbuff += 1
if cbuff>3:
cbuff = 1
elif key == curses.KEY_UP:
screen.scroll(cbuff, -1)
elif key == curses.KEY_DOWN:
screen.scroll(cbuff, 1)
elif key == curses.KEY_LEFT:
screen.scroll(cbuff, 0, -1)
elif key == curses.KEY_RIGHT:
screen.scroll(cbuff, 0, 1)
elif key == curses.KEY_NPAGE:
screen.scroll(cbuff, 10)
elif key == curses.KEY_PPAGE:
screen.scroll(cbuff, -10)
elif key == curses.KEY_END:
screen.scroll_to(cbuff, 999, 0)
elif key == curses.KEY_HOME:
screen.scroll_to(cbuff, 0, 0)
elif key == curses.KEY_RESIZE:
screen.resize_screen(None, None)
else:
pass
except:
# fixes the problem of restoring screen when last print
# did not contain a newline char. WTF!
print("\n")
raise
#while 1:
# if ln >= len(chars):
# ln = 0
# #screen.windows[0].addstr(0,0, chars[ln])
# #screen.windows[0].refresh()
# time.sleep(0.2)
# ln += 1
else:
func(args)
def setup_layout(h, w):
# Creates layout
header = 4
start_x = 0
start_y = header
h -= start_y
w -= start_x
h1 = h/2 + h%2
h2 = h/2
if w > 160:
# _______
# | |___|
# |___|___|
w1 = w/2 + w%2
w2 = w/2
info_win = [h, w1, start_y, start_x]
error_win = [h1, w2, start_y, w1]
debug_win = [h2, w2, h1, w1]
else:
# ___
# |___|
# |___|
# |___|
h2a = h2/2 + h2%2
h2b = h2/2
info_win = [h1, w, start_y, start_x]
error_win = [h2a, w, h1, start_x]
debug_win = [h2b, w, h1+h2a, start_x]
return info_win, error_win, debug_win
| gpl-3.0 | 3,725,017,766,903,037,000 | 29.935728 | 157 | 0.524962 | false |
phenoxim/nova | nova/api/openstack/placement/handler.py | 1 | 9631 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handlers for placement API.
Individual handlers are associated with URL paths in the
ROUTE_DECLARATIONS dictionary. At the top level each key is a Routes
compliant path. The value of that key is a dictionary mapping
individual HTTP request methods to a Python function representing a
simple WSGI application for satisfying that request.
The ``make_map`` method processes ROUTE_DECLARATIONS to create a
Routes.Mapper, including automatic handlers to respond with a
405 when a request is made against a valid URL with an invalid
method.
"""
import routes
import webob
from oslo_log import log as logging
from nova.api.openstack.placement import exception
from nova.api.openstack.placement.handlers import aggregate
from nova.api.openstack.placement.handlers import allocation
from nova.api.openstack.placement.handlers import allocation_candidate
from nova.api.openstack.placement.handlers import inventory
from nova.api.openstack.placement.handlers import resource_class
from nova.api.openstack.placement.handlers import resource_provider
from nova.api.openstack.placement.handlers import root
from nova.api.openstack.placement.handlers import trait
from nova.api.openstack.placement.handlers import usage
from nova.api.openstack.placement import policy
from nova.api.openstack.placement import util
from nova.i18n import _
LOG = logging.getLogger(__name__)
# URLs and Handlers
# NOTE(cdent): When adding URLs here, do not use regex patterns in
# the path parameters (e.g. {uuid:[0-9a-zA-Z-]+}) as that will lead
# to 404s that are controlled outside of the individual resources
# and thus do not include specific information on the why of the 404.
ROUTE_DECLARATIONS = {
'/': {
'GET': root.home,
},
# NOTE(cdent): This allows '/placement/' and '/placement' to
# both work as the root of the service, which we probably want
# for those situations where the service is mounted under a
# prefix (as it is in devstack). While weird, an empty string is
# a legit key in a dictionary and matches as desired in Routes.
'': {
'GET': root.home,
},
'/resource_classes': {
'GET': resource_class.list_resource_classes,
'POST': resource_class.create_resource_class
},
'/resource_classes/{name}': {
'GET': resource_class.get_resource_class,
'PUT': resource_class.update_resource_class,
'DELETE': resource_class.delete_resource_class,
},
'/resource_providers': {
'GET': resource_provider.list_resource_providers,
'POST': resource_provider.create_resource_provider
},
'/resource_providers/{uuid}': {
'GET': resource_provider.get_resource_provider,
'DELETE': resource_provider.delete_resource_provider,
'PUT': resource_provider.update_resource_provider
},
'/resource_providers/{uuid}/inventories': {
'GET': inventory.get_inventories,
'POST': inventory.create_inventory,
'PUT': inventory.set_inventories,
'DELETE': inventory.delete_inventories
},
'/resource_providers/{uuid}/inventories/{resource_class}': {
'GET': inventory.get_inventory,
'PUT': inventory.update_inventory,
'DELETE': inventory.delete_inventory
},
'/resource_providers/{uuid}/usages': {
'GET': usage.list_usages
},
'/resource_providers/{uuid}/aggregates': {
'GET': aggregate.get_aggregates,
'PUT': aggregate.set_aggregates
},
'/resource_providers/{uuid}/allocations': {
'GET': allocation.list_for_resource_provider,
},
'/allocations': {
'POST': allocation.set_allocations,
},
'/allocations/{consumer_uuid}': {
'GET': allocation.list_for_consumer,
'PUT': allocation.set_allocations_for_consumer,
'DELETE': allocation.delete_allocations,
},
'/allocation_candidates': {
'GET': allocation_candidate.list_allocation_candidates,
},
'/traits': {
'GET': trait.list_traits,
},
'/traits/{name}': {
'GET': trait.get_trait,
'PUT': trait.put_trait,
'DELETE': trait.delete_trait,
},
'/resource_providers/{uuid}/traits': {
'GET': trait.list_traits_for_resource_provider,
'PUT': trait.update_traits_for_resource_provider,
'DELETE': trait.delete_traits_for_resource_provider
},
'/usages': {
'GET': usage.get_total_usages,
},
}
def dispatch(environ, start_response, mapper):
"""Find a matching route for the current request.
If no match is found, raise a 404 response.
If there is a matching route, but no matching handler
for the given method, raise a 405.
"""
result = mapper.match(environ=environ)
if result is None:
raise webob.exc.HTTPNotFound(
json_formatter=util.json_error_formatter)
# We can't reach this code without action being present.
handler = result.pop('action')
environ['wsgiorg.routing_args'] = ((), result)
return handler(environ, start_response)
def handle_405(environ, start_response):
"""Return a 405 response when method is not allowed.
If _methods are in routing_args, send an allow header listing
the methods that are possible on the provided URL.
"""
_methods = util.wsgi_path_item(environ, '_methods')
headers = {}
if _methods:
# Ensure allow header is a python 2 or 3 native string (thus
# not unicode in python 2 but stay a string in python 3)
# In the process done by Routes to save the allowed methods
# to its routing table they become unicode in py2.
headers['allow'] = str(_methods)
# Use Exception class as WSGI Application. We don't want to raise here.
response = webob.exc.HTTPMethodNotAllowed(
_('The method specified is not allowed for this resource.'),
headers=headers, json_formatter=util.json_error_formatter)
return response(environ, start_response)
def make_map(declarations):
"""Process route declarations to create a Route Mapper."""
mapper = routes.Mapper()
for route, targets in declarations.items():
allowed_methods = []
for method in targets:
mapper.connect(route, action=targets[method],
conditions=dict(method=[method]))
allowed_methods.append(method)
allowed_methods = ', '.join(allowed_methods)
mapper.connect(route, action=handle_405, _methods=allowed_methods)
return mapper
class PlacementHandler(object):
"""Serve Placement API.
Dispatch to handlers defined in ROUTE_DECLARATIONS.
"""
def __init__(self, **local_config):
# NOTE(cdent): Local config currently unused.
self._map = make_map(ROUTE_DECLARATIONS)
def __call__(self, environ, start_response):
# All requests but '/' require admin.
if environ['PATH_INFO'] != '/':
context = environ['placement.context']
# TODO(cdent): Using is_admin everywhere (except /) is
# insufficiently flexible for future use case but is
# convenient for initial exploration.
if not policy.placement_authorize(context, 'placement'):
raise webob.exc.HTTPForbidden(
_('admin required'),
json_formatter=util.json_error_formatter)
# Check that an incoming request with a content-length header
# that is an integer > 0 and not empty, also has a content-type
# header that is not empty. If not raise a 400.
clen = environ.get('CONTENT_LENGTH')
try:
if clen and (int(clen) > 0) and not environ.get('CONTENT_TYPE'):
raise webob.exc.HTTPBadRequest(
_('content-type header required when content-length > 0'),
json_formatter=util.json_error_formatter)
except ValueError as exc:
raise webob.exc.HTTPBadRequest(
_('content-length header must be an integer'),
json_formatter=util.json_error_formatter)
try:
return dispatch(environ, start_response, self._map)
# Trap the NotFound exceptions raised by the objects used
# with the API and transform them into webob.exc.HTTPNotFound.
except exception.NotFound as exc:
raise webob.exc.HTTPNotFound(
exc, json_formatter=util.json_error_formatter)
# Remaining uncaught exceptions will rise first to the Microversion
# middleware, where any WebOb generated exceptions will be caught and
# transformed into legit HTTP error responses (with microversion
# headers added), and then to the FaultWrapper middleware which will
# catch anything else and transform them into 500 responses.
# NOTE(cdent): There should be very few uncaught exceptions which are
# not WebOb exceptions at this stage as the handlers are contained by
# the wsgify decorator which will transform those exceptions to
# responses itself.
| apache-2.0 | 7,792,931,749,543,994,000 | 40.15812 | 78 | 0.668986 | false |
christianbrodbeck/nipype | nipype/interfaces/tests/test_utility.py | 1 | 2130 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os
import shutil
from tempfile import mkdtemp
from nipype.testing import assert_equal, assert_true
from nipype.interfaces import utility
import nipype.pipeline.engine as pe
def test_rename():
tempdir = os.path.realpath(mkdtemp())
origdir = os.getcwd()
os.chdir(tempdir)
# Test very simple rename
_ = open("file.txt", "w").close()
rn = utility.Rename(in_file="file.txt", format_string="test_file1.txt")
res = rn.run()
outfile = os.path.join(tempdir, "test_file1.txt")
yield assert_equal, res.outputs.out_file, outfile
yield assert_true, os.path.exists(outfile)
# Now a string-formatting version
rn = utility.Rename(in_file="file.txt", format_string="%(field1)s_file%(field2)d", keep_ext=True)
# Test .input field creation
yield assert_true, hasattr(rn.inputs, "field1")
yield assert_true, hasattr(rn.inputs, "field2")
# Set the inputs
rn.inputs.field1 = "test"
rn.inputs.field2 = 2
res = rn.run()
outfile = os.path.join(tempdir, "test_file2.txt")
yield assert_equal, res.outputs.out_file, outfile
yield assert_true, os.path.exists(outfile)
# Clean up
os.chdir(origdir)
shutil.rmtree(tempdir)
def test_function():
tempdir = os.path.realpath(mkdtemp())
origdir = os.getcwd()
os.chdir(tempdir)
def gen_random_array(size):
import numpy as np
return np.random.rand(size, size)
f1 = pe.MapNode(utility.Function(input_names=['size'], output_names=['random_array'], function=gen_random_array), name='random_array', iterfield=['size'])
f1.inputs.size = [2, 3, 5]
wf = pe.Workflow(name="test_workflow")
def increment_array(in_array):
return in_array + 1
f2 = pe.MapNode(utility.Function(input_names=['in_array'], output_names=['out_array'], function=increment_array), name='increment_array', iterfield=['in_array'])
wf.connect(f1, 'random_array', f2, 'in_array')
wf.run()
# Clean up
os.chdir(origdir)
shutil.rmtree(tempdir)
| bsd-3-clause | -4,245,517,544,868,425,700 | 30.323529 | 165 | 0.664319 | false |
babble/babble | include/jython/Lib/test/test_javashell.py | 1 | 5503 | import unittest
from test import test_support
from org.python.core import PyFile
import re
import os
import javashell
# testCmds is a list of (command, expectedOutput)
# each command is executed twice, once in unitialized environment and
# once with initialized environment
# smaller set of commands for simple test
testCmds = [
("echo hello world", "hello world"),
]
# turn off output from javashell.__warn
javashell.__warn = lambda *args: None
def dprint( *args ):
#print args
pass
# can instead set testCmds = fullTestCmds
# Note that the validation is incomplete for several of these
# - they should validate depending on platform and pre-post, but
# they don't.
# can assign testCmds = fullTestCmds for more extensive tests
key, value = "testKey", "testValue"
fullTestCmds = [
# no quotes, should output both words
("echo hello world", "hello world"),
# should print PATH (on NT)
("echo PATH=%PATH%", "(PATH=.*;.*)|(PATH=%PATH%)"),
# should print 'testKey=%testKey%' on NT before initialization,
# should print 'testKey=' on 95 before initialization,
# and 'testKey=testValue' after
("echo %s=%%%s%%" % (key,key),
"(%s=)" % (key,)),
# should print PATH (on Unix)
( "echo PATH=$PATH", "PATH=.*" ),
# should print 'testKey=testValue' on Unix after initialization
( "echo %s=$%s" % (key,key),
"(%s=$%s)|(%s=)|(%s=%s)" % (key, key, key, key, value ) ),
# should output quotes on NT but not on Unix
( 'echo "hello there"', '"?hello there"?' ),
# should print 'why' to stdout.
( r'''jython -c "import sys;sys.stdout.write( 'why\n' )"''', "why" ),
# should print 'why' to stderr.
# doesn't work on NT because of quoting issues.
# Have to add the print to give some output to stdout...
# empty string matches everything...
( r'''jython -c "import sys;sys.stderr.write('why\n');print " ''',
'' )
]
class JavaShellTest(unittest.TestCase):
"""This test validates the subshell functionality (javashell, os.environ, popen*).
Does some white box as well as black box testing.
"""
def _testCmds( self, _shellEnv, testCmds, whichEnv ):
"""test commands (key) and compare output to expected output (value).
this actually executes all the commands twice, testing the return
code by calling system(), and testing some of the output by calling
execute()
"""
for cmd, pattern in testCmds:
dprint( "\nExecuting '%s' with %s environment" % (cmd, whichEnv))
p = javashell.shellexecute(cmd)
line = PyFile( p.getInputStream() ).readlines()[0]
assert re.match( pattern, line ), \
"expected match for %s, got %s" % ( pattern, line )
dprint( "waiting for", cmd, "to complete")
assert not p.waitFor(), \
"%s failed with %s environment" % (cmd, whichEnv)
def testSystem( self ):
"""test system and environment functionality"""
org = os.environ
self._testCmds( javashell._shellEnv, testCmds, "default" )
# trigger initialization of environment
os.environ[ key ] = value
assert org.get( key, None ) == value, \
"expected stub to have %s set" % key
assert os.environ.get( key, None ) == value, \
"expected real os.environment to have %s set" % key
# if environment is initialized and jython gets ARGS=-i, it thinks
# it is running in interactive mode, and fails to exit until
# process.getOutputStream().close()
try:
del os.environ[ "ARGS" ]
except KeyError:
pass
# test system using the non-default environment
self._testCmds( javashell._shellEnv, testCmds, "initialized" )
assert os.environ.has_key( "PATH" ), \
"expected environment to have PATH attribute " \
"(this may not apply to all platforms!)"
def testPutEnv( self ):
"Put an environment variable and ensure that spawned processes see the change"
value = "Value we set"
os.putenv( "NEWVARIABLE", value )
newValue = os.popen( "echo $NEWVARIABLE" ).read().strip()
if newValue == "$NEWVARIABLE":
newValue = os.popen( "echo %NEWVARIABLE%" ).read().strip()
if newValue == "%NEWVARIABLE%":
raise test_support.TestSkipped( "Unable to find a subshell to execute echo" )
assert newValue == value, (
"Expected (%s) to equal value we set (%s)" % (
newValue, value
))
def testFormatUnicodeCommand(self):
shell = javashell._ShellEnv(cmd=['runner'])
self.assertEqual(shell._formatCmd('echo hello'), ['runner', 'echo hello'])
self.assertEqual(shell._formatCmd(u'echo world'), ['runner', u'echo world'])
def testExecuteUnicodeCommandWithRedirection(self):
process = javashell.shellexecute(u'nonexcmd 2>&1')
stdout = process.getOutputStream().toString()
process.waitFor()
self.assertNotEqual(stdout, "", "Redirecting 2>&1 failed with unicode cmd")
def test_main():
test_support.run_unittest(JavaShellTest)
if __name__ == "__main__":
test_main()
| apache-2.0 | -7,787,827,928,684,043,000 | 38.028369 | 89 | 0.590224 | false |
fallen/artiq | artiq/language/scan.py | 1 | 4762 | """
Implementation and management of scan objects.
A scan object (e.g. :class:`artiq.language.scan.LinearScan`) represents a
one-dimensional sweep of a numerical range. Multi-dimensional scans are
constructed by combining several scan objects.
Iterate on a scan object to scan it, e.g. ::
for variable in self.scan:
do_something(variable)
Iterating multiple times on the same scan object is possible, with the scan
restarting at the minimum value each time. Iterating concurrently on the
same scan object (e.g. via nested loops) is also supported, and the
iterators are independent from each other.
Scan objects are supported both on the host and the core device.
"""
from random import Random, shuffle
import inspect
from artiq.language.core import *
from artiq.language.environment import NoDefault, DefaultMissing
__all__ = ["ScanObject",
"NoScan", "LinearScan", "RandomScan", "ExplicitScan",
"Scannable"]
class ScanObject:
pass
class NoScan(ScanObject):
"""A scan object that yields a single value."""
def __init__(self, value):
self.value = value
@portable
def _gen(self):
yield self.value
@portable
def __iter__(self):
return self._gen()
def describe(self):
return {"ty": "NoScan", "value": self.value}
class LinearScan(ScanObject):
"""A scan object that yields a fixed number of increasing evenly
spaced values in a range."""
def __init__(self, min, max, npoints):
self.min = min
self.max = max
self.npoints = npoints
@portable
def _gen(self):
r = self.max - self.min
d = self.npoints - 1
for i in range(self.npoints):
yield r*i/d + self.min
@portable
def __iter__(self):
return self._gen()
def describe(self):
return {"ty": "LinearScan",
"min": self.min, "max": self.max, "npoints": self.npoints}
class RandomScan(ScanObject):
"""A scan object that yields a fixed number of randomly ordered evenly
spaced values in a range."""
def __init__(self, min, max, npoints, seed=0):
self.sequence = list(LinearScan(min, max, npoints))
shuffle(self.sequence, Random(seed).random)
@portable
def __iter__(self):
return iter(self.sequence)
def describe(self):
return {"ty": "RandomScan",
"min": self.min, "max": self.max, "npoints": self.npoints}
class ExplicitScan(ScanObject):
"""A scan object that yields values from an explicitly defined sequence."""
def __init__(self, sequence):
self.sequence = sequence
@portable
def __iter__(self):
return iter(self.sequence)
def describe(self):
return {"ty": "ExplicitScan", "sequence": self.sequence}
_ty_to_scan = {
"NoScan": NoScan,
"LinearScan": LinearScan,
"RandomScan": RandomScan,
"ExplicitScan": ExplicitScan
}
class Scannable:
"""An argument (as defined in :class:`artiq.language.environment`) that
takes a scan object.
:param global_min: The minimum value taken by the scanned variable, common
to all scan modes. The user interface takes this value to set the
range of its input widgets.
:param global_max: Same as global_min, but for the maximum value.
:param global_step: The step with which the value should be modified by
up/down buttons in a user interface.
:param unit: A string representing the unit of the scanned variable, for user
interface (UI) purposes.
:param ndecimals: The number of decimals a UI should use.
"""
def __init__(self, default=NoDefault, unit="",
global_step=1.0, global_min=None, global_max=None,
ndecimals=2):
if default is not NoDefault:
self.default_value = default
self.unit = unit
self.global_step = global_step
self.global_min = global_min
self.global_max = global_max
self.ndecimals = ndecimals
def default(self):
if not hasattr(self, "default_value"):
raise DefaultMissing
return self.default_value
def process(self, x):
cls = _ty_to_scan[x["ty"]]
args = dict()
for arg in inspect.getargspec(cls).args[1:]:
if arg in x:
args[arg] = x[arg]
return cls(**args)
def describe(self):
d = {"ty": "Scannable"}
if hasattr(self, "default_value"):
d["default"] = self.default_value.describe()
d["unit"] = self.unit
d["global_step"] = self.global_step
d["global_min"] = self.global_min
d["global_max"] = self.global_max
d["ndecimals"] = self.ndecimals
return d
| gpl-3.0 | 1,219,659,166,398,376,400 | 28.395062 | 81 | 0.625367 | false |
restful-open-annotation/oa-adapter | formats/json_format.py | 1 | 2113 | #!/usr/bin/env python
"""JSON content-type support for Open Annotation."""
__author__ = 'Sampo Pyysalo'
__license__ = 'MIT'
import json
# Default values for rendering options
PRETTYPRINT_DEFAULT = True
KEEPCONTEXT_DEFAULT = False
# Short name for this format.
format_name = 'json'
# The MIME types associated with this format.
mimetypes = ['application/json']
def from_jsonld(data, options=None):
"""Render JSON-LD data into JSON string.
This is intended to be used as a mimerender render function
(see http://mimerender.readthedocs.org/en/latest/).
If options['prettyprint'] is True, renders the data so that it is
more easily readable by humans.
If options['keepcontext'] is True, includes the JSON-LD @context
in the JSON data if present.
Args:
data: dict containing JSON-LD data in expanded JSON-LD form
(see http://www.w3.org/TR/json-ld/#expanded-document-form).
options: dict of rendering options, or None for defaults.
Returns:
String representing the rendered data.
"""
if options is None:
options = {}
# @context is not considered part of the JSON format
keepcontext = options.get('keepcontext', KEEPCONTEXT_DEFAULT)
if not keepcontext and '@context' in data:
del data['@context']
prettyprint = options.get('prettyprint', PRETTYPRINT_DEFAULT)
if prettyprint:
return json.dumps(data, indent=2, separators=(',', ': '))+'\n'
else:
return json.dumps(data)
def to_jsonld(data, options=None):
"""Parse JSON data into JSON-LD.
Args:
data: string in JSON format.
options: dict of parsing options, or None for defaults.
Returns:
dict containing JSON-LD data in expanded JSON-LD form
(see http://www.w3.org/TR/json-ld/#expanded-document-form).
"""
if options is None:
options = {}
encoding = options.get('encoding')
if encoding is None:
jsonld = json.loads(data)
else:
jsonld = json.loads(data, encoding=encoding)
# TODO: add context and expand
return jsonld
| mit | 113,083,809,037,946,270 | 27.173333 | 71 | 0.658779 | false |
seprich/py-bson-rpc | bsonrpc/concurrent.py | 1 | 2990 | # -*- coding: utf-8 -*-
'''
This module provides a collection of concurrency related
object generators. These generators will create either
native threading based or greenlet based objects depending
on which threading_model is selected.
'''
from bsonrpc.options import ThreadingModel
__license__ = 'http://mozilla.org/MPL/2.0/'
def _spawn_thread(fn, *args, **kwargs):
from threading import Thread
t = Thread(target=fn, args=args, kwargs=kwargs)
t.start()
return t
def _spawn_greenlet(fn, *args, **kwargs):
from gevent import Greenlet
g = Greenlet(fn, *args, **kwargs)
g.start()
return g
def spawn(threading_model, fn, *args, **kwargs):
if threading_model == ThreadingModel.GEVENT:
return _spawn_greenlet(fn, *args, **kwargs)
if threading_model == ThreadingModel.THREADS:
return _spawn_thread(fn, *args, **kwargs)
def _new_queue(*args, **kwargs):
from six.moves.queue import Queue
return Queue(*args, **kwargs)
def _new_gevent_queue(*args, **kwargs):
from gevent.queue import Queue
return Queue(*args, **kwargs)
def new_queue(threading_model, *args, **kwargs):
if threading_model == ThreadingModel.GEVENT:
return _new_gevent_queue(*args, **kwargs)
if threading_model == ThreadingModel.THREADS:
return _new_queue(*args, **kwargs)
def _new_thread_lock(*args, **kwargs):
from threading import Lock
return Lock(*args, **kwargs)
def _new_gevent_lock(*args, **kwargs):
from gevent.lock import Semaphore
return Semaphore(*args, **kwargs)
def new_lock(threading_model, *args, **kwargs):
if threading_model == ThreadingModel.GEVENT:
return _new_gevent_lock(*args, **kwargs)
if threading_model == ThreadingModel.THREADS:
return _new_thread_lock(*args, **kwargs)
class Promise(object):
def __init__(self, event):
object.__setattr__(self, '_event', event)
object.__setattr__(self, '_value', None)
def __getattr__(self, name):
return getattr(self._event, name)
def __setattr__(self, name, value):
if hasattr(self._event, name):
object.__setattr__(self._event, name, value)
else:
object.__setattr__(self, name, value)
@property
def value(self):
return self._value
def set(self, value):
object.__setattr__(self, '_value', value)
self._event.set()
def wait(self, timeout=None):
if not self._event.wait(timeout):
raise RuntimeError(
u'Promise timeout after %.02f seconds.' % timeout)
return self._value
def _new_thread_event():
from threading import Event
return Event()
def _new_gevent_event():
from gevent.event import Event
return Event()
def new_promise(threading_model):
if threading_model == ThreadingModel.GEVENT:
return Promise(_new_gevent_event())
if threading_model == ThreadingModel.THREADS:
return Promise(_new_thread_event())
| mpl-2.0 | 622,220,974,657,172,900 | 25.696429 | 66 | 0.644147 | false |
MaxTakahashi/hammr | hammr/utils/publish_utils.py | 1 | 14372 | # Copyright 2007-2015 UShareSoft SAS, All rights reserved
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ussclicore.utils import printer
from uforge.objects.uforge import *
def publish_vcd(pimage, builder):
# doing field verification
if not "orgName" in builder:
printer.out("orgName in vcd builder not found", printer.ERROR)
return
if not "catalogName" in builder:
printer.out("catalogName in vcd builder not found", printer.ERROR)
return
if not "imageName" in builder:
printer.out("imageName in vcd builder not found", printer.ERROR)
return
pimage.credAccount.organizationName = builder["orgName"]
pimage.credAccount.catalogId = builder["catalogName"]
pimage.credAccount.displayName = builder["imageName"]
return pimage
def publish_vcenter(builder):
pimage = PublishImageVSphere()
# doing field verification
if not "displayName" in builder:
printer.out("displayName in vcenter builder not found", printer.ERROR)
return
if not "esxHost" in builder:
printer.out("esxHost in vcenter builder not found", printer.ERROR)
return
if not "datastore" in builder:
printer.out("datastore in vcenter builder not found", printer.ERROR)
return
if "network" in builder:
pimage.network = builder["network"]
pimage.displayName = builder["displayName"]
pimage.esxHost = builder["esxHost"]
pimage.datastore = builder["datastore"]
return pimage
def publish_cloudstack(pimage, builder):
# doing field verification
if not "imageName" in builder:
printer.out("imageName in cloudstack builder not found", printer.ERROR)
return
if not "zone" in builder:
printer.out("zone in cloudstack builder not found", printer.ERROR)
return
if "publicImage" in builder:
pimage.credAccount.publicImage = True if (builder["publicImage"] == "true") else False
if "featured" in builder:
pimage.credAccount.featuredEnabled = True if (builder["featured"] == "true") else False
pimage.credAccount.displayName = builder["imageName"]
pimage.credAccount.zoneName = builder["zone"]
return pimage
def publish_cloudstack_qcow2(pimage, builder):
return publish_cloudstack(pimage, builder)
def publish_cloudstack_vhd(pimage, builder):
return publish_cloudstack(pimage, builder)
def publish_cloudstack_ova(pimage, builder):
return publish_cloudstack(pimage, builder)
def publish_susecloud(pimage, builder):
# doing field verification
if not "imageName" in builder:
printer.out("imageName in susecloud builder not found", printer.ERROR)
return
if not "tenant" in builder:
printer.out("tenant in susecloud builder not found", printer.ERROR)
return
if "description" in builder:
pimage.credAccount.description = builder["description"]
pimage.credAccount.displayName = builder["imageName"]
pimage.credAccount.tenantName = builder["tenant"]
if "publicImage" in builder:
pimage.credAccount.publicImage = True if (builder["publicImage"] == "true") else False
# if "paraVirtualMode" in builder:
# pimage.credAccount. = True if (builder["paraVirtualMode"]=="true") else False
return pimage
def publish_openstack(builder):
pimage = PublishImageOpenStack()
# doing field verification
if not "displayName" in builder:
printer.out("displayName in openstack builder not found", printer.ERROR)
return
if not "tenantName" in builder:
printer.out("TenantName in openstack builder not found", printer.ERROR)
return
pimage.displayName = builder["displayName"]
pimage.tenantName = builder["tenantName"]
if "publicImage" in builder:
pimage.credAccount.publicImage = True if (builder["publicImage"] == "true") else False
if "keystoneDomain" in builder:
pimage.keystoneDomain = builder["keystoneDomain"]
return
if "keystoneProject" in builder:
pimage.keystoneProject = builder["keystoneProject"]
return
return pimage
def publish_openstackqcow2(builder):
return publish_openstack(builder)
def publish_openstackvhd(pimage, builder):
return publish_openstack(pimage, builder)
def publish_openstackvmdk(pimage, builder):
return publish_openstack(pimage, builder)
def publish_openstackvdi(pimage, builder):
return publish_openstack(pimage, builder)
def publish_aws(builder):
pimage = PublishImageAws()
# doing field verification
if not "bucket" in builder:
printer.out("bucket in AWS builder not found", printer.ERROR)
return
if not "region" in builder:
printer.out("region in AMI builder not found", printer.ERROR)
return
pimage.bucket = builder["bucket"]
pimage.region = builder["region"]
return pimage
def publish_azure(builder):
if "blob" in builder or "container" in builder:
printer.out("Azure Resource Manager publish")
return publish_azure_arm(builder)
else:
printer.out("Azure classic publish")
return publish_azure_classic(builder)
def publish_azure_classic(builder):
pimage = PublishImageAzure()
# doing field verification
if not "storageAccount" in builder:
printer.out("storageAccount in Microsoft Azure not found", printer.ERROR)
return
if not "region" in builder:
printer.out("region in Microsoft Azure not found", printer.ERROR)
return
pimage.storageAccount = builder["storageAccount"]
pimage.region = builder["region"]
return pimage
def publish_azure_arm(builder):
pimage = PublishImageAzureResourceManager()
if not "storageAccount" in builder:
printer.out("storageAccount not found", printer.ERROR)
return
if not "container" in builder:
printer.out("container not found", printer.ERROR)
return
if not "blob" in builder:
printer.out("blob not found", printer.ERROR)
return
if not "displayName" in builder:
printer.out("displayName not found", printer.ERROR)
return
if "resourceGroup" in builder:
pimage.resourceGroup = builder["resourceGroup"]
pimage.storageAccount = builder["storageAccount"]
pimage.container = builder["container"]
pimage.blob = builder["blob"]
pimage.displayName = builder["displayName"]
return pimage
def publish_flexiant(builder):
pimage = PublishImageFlexiant()
# doing field verification
if not "diskOffering" in builder:
printer.out("diskOffering in flexiant builder not found", printer.ERROR)
return
if not "virtualDatacenterName" in builder:
printer.out("virtualDatacenterName in flexiant builder not found", printer.ERROR)
return
if not "machineImageName" in builder:
printer.out("machineImageName in flexiant builder not found", printer.ERROR)
return
pimage.diskOffering = builder["diskOffering"]
pimage.virtualDatacenterName = builder["virtualDatacenterName"]
pimage.machineImageName = builder["machineImageName"]
return pimage
def publish_flexiant_kvm(pimage, builder):
return publish_flexiant(pimage, builder)
def publish_flexiant_ova(pimage, builder):
return publish_flexiant(pimage, builder)
def publish_flexiantraw(builder):
return publish_flexiant(builder)
def publish_abiquo(pimage, builder):
# doing field verification
if not "enterprise" in builder:
printer.out("enterprise in abiquo builder not found", printer.ERROR)
return
if not "datacenter" in builder:
printer.out("datacenter in abiquo builder not found", printer.ERROR)
return
if not "productName" in builder:
printer.out("productName in abiquo builder not found", printer.ERROR)
return
if not "category" in builder:
printer.out("category in abiquo builder not found", printer.ERROR)
return
if not "description" in builder:
printer.out("description in abiquo builder not found", printer.ERROR)
return
pimage.credAccount.datacenterName = builder["datacenter"]
pimage.credAccount.displayName = builder["productName"]
pimage.credAccount.category = builder["category"]
pimage.credAccount.organizationName = builder["enterprise"]
pimage.credAccount.description = builder["description"]
return pimage
def publish_nimbula(pimage, builder):
# doing field verification
if not "imageListName" in builder:
printer.out("imageListName in nimbula builder not found", printer.ERROR)
return
if not "imageVersion" in builder:
printer.out("imageVersion in nimbula builder not found", printer.ERROR)
return
if not "description" in builder:
printer.out("description in nimbula builder not found", printer.ERROR)
return
pimage.credAccount.imageVersion = builder["imageVersion"]
pimage.credAccount.description = builder["description"]
pimage.credAccount.listName = builder["imageListName"]
return pimage
def publish_nimbula_kvm(pimage, builder):
return publish_nimbula(pimage, builder)
def publish_nimbula_esx(pimage, builder):
return publish_nimbula(pimage, builder)
def publish_eucalyptus(pimage, builder):
# doing field verification
if not "imageName" in builder:
printer.out("imageName in Eucalyptus builder not found", printer.ERROR)
return
if not "description" in builder:
printer.out("description in Eucalyptus builder not found", printer.ERROR)
return
if not "bucket" in builder:
printer.out("bucket in Eucalyptus builder not found", printer.ERROR)
return
pimage.credAccount.displayName = builder["imageName"]
pimage.credAccount.bucket = builder["bucket"]
pimage.credAccount.description = builder["description"]
if "ramdisk" in builder and "kernelId" in builder:
pimage.credAccount.ramdiskId = builder["ramdisk"]
pimage.credAccount.kernelId = builder["kernelId"]
return pimage
def publish_eucalyptus_kvm(pimage, builder):
return publish_eucalyptus(pimage, builder)
def publish_eucalyptus_xen(pimage, builder):
return publish_eucalyptus(pimage, builder)
def publish_gce(pimage, builder):
# doing field verification
if not "computeZone" in builder:
printer.out("computeZone in GCE builder not found", printer.ERROR)
return
if not "bucketLocation" in builder:
printer.out("bucketLocation in GCE builder not found", printer.ERROR)
return
if not "bucket" in builder:
printer.out("bucket in GCE builder not found", printer.ERROR)
return
if not "projectId" in builder:
printer.out("projectId in GCE builder not found", printer.ERROR)
return
if not "storageClass" in builder:
printer.out("storageClass in GCE builder not found", printer.ERROR)
return
if not "diskNamePrefix" in builder:
printer.out("diskNamePrefix in AMI builder not found", printer.ERROR)
return
if "description" in builder:
pimage.credAccount.description = builder["description"]
pimage.credAccount.bucket = builder["bucket"]
pimage.credAccount.tenantName = builder["projectId"]
pimage.credAccount.category = builder["storageClass"]
pimage.credAccount.displayName = builder["diskNamePrefix"]
pimage.credAccount.zoneName = builder["computeZone"]
pimage.publishLocation = builder["bucketLocation"]
return pimage
def publish_outscale(pimage, builder):
# doing field verification
if not "zone" in builder:
printer.out("zone in outscale builder not found", printer.ERROR)
return
if not "description" in builder:
pimage.credAccount.description = builder["description"]
pimage.credAccount.zoneName = builder["zone"]
return pimage
def publish_k5vmdk(builder):
pimage = PublishImageK5()
# doing field verification
if not "displayName" in builder:
printer.out("displayName in k5 builder not found", printer.ERROR)
return
if not "domain" in builder:
printer.out("domain in k5 builder not found", printer.ERROR)
return
if not "project" in builder:
printer.out("project in k5 builder not found", printer.ERROR)
return
if not "region" in builder:
printer.out("region in k5 builder not found", printer.ERROR)
return
pimage.displayName = builder["displayName"]
pimage.keystoneDomain = builder["domain"]
pimage.keystoneProject = builder["project"]
pimage.publishLocation = builder["region"]
return pimage
def publish_docker(builder):
pimage = PublishImageDocker()
if not "namespace" in builder:
printer.out("namespace in Docker builder is missing", printer.ERROR)
return
if not "repositoryName" in builder:
printer.out("repositoryName in Docker builder is missing", printer.ERROR)
return
if not "tagName" in builder:
printer.out("tagName in Docker builder is missing", printer.ERROR)
return
pimage.namespace = builder["namespace"]
pimage.repositoryName = builder["repositoryName"]
pimage.tagName = builder["tagName"]
return pimage
def publish_oracleraw(builder):
pimage = PublishImageOracle()
if not "displayName" in builder:
printer.out("displayName in Oracle builder is missing", printer.ERROR)
return
if not "computeEndPoint" in builder:
printer.out("computeEndPoint in Oracle builder is missing", printer.ERROR)
return
pimage.displayName = builder["displayName"]
pimage.computeEndPoint = builder["computeEndPoint"]
return pimage
| apache-2.0 | 8,318,043,477,958,264,000 | 31.515837 | 95 | 0.695519 | false |