repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
wmttom/rehichao | rehichao.py | 1 | 1326 | # -*- coding: utf-8 -*-
from intstr import IntStr
from redis import StrictRedis
redis_keyer = IntStr(
'!"#$&()+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ^_`abcdefghijklmnopqrstuvwxyz{|}~'
)
REDIS_KEY_ID = 'RedisKeyId'
REDIS_KEY = 'RedisKey'
REDIS_ID_KEY = 'RedisIdKey'
_EXIST = set()
class RedisKey(object):
def __init__(self, REDIS_CONFIG):
self.redis = StrictRedis(*REDIS_CONFIG)
def __getattr__(self, attr):
def _(name=''):
return self(attr, name)
return _
def __call__(self, attr, name=''):
key = attr+name
redis = self.redis
if key in _EXIST:
print 'REDIS KEY IS ALREADY DEFINED %s !!!'%key
_EXIST.add(key)
if redis:
_key = redis.hget(REDIS_KEY, key)
if _key is None:
id = redis.incr(REDIS_KEY_ID)
_key = redis_keyer.encode(id)
if name and "%" in name:
_key = _key+"'"+name
p = redis.pipeline()
p.hset(REDIS_KEY, key, _key)
p.hset(REDIS_ID_KEY, _key, key)
p.execute()
return self.redis, _key
if __name__ == "__main__":
import sys
if sys.getdefaultencoding() == 'ascii':
reload(sys)
sys.setdefaultencoding('utf-8')
| apache-2.0 | 6,936,459,794,541,497,000 | 25.52 | 90 | 0.520362 | false |
terrai/rastercube | rastercube/jgrid/utils.py | 1 | 10119 | """
Utility functions related to jGrid2
"""
import gdal
import numpy as np
import numpy.ma as ma
import rastercube.imutils as imutils
import rastercube.gdal_utils as gdal_utils
def fracs_for_poly_bbox_xy(header, polygon_xy):
"""
Returns fractions covered by the given polygon. This is based on the
polygon's bounding box.
"""
assert np.all([header.in_bounds_xy(p) for p in polygon_xy]), \
"Polygon not contained in jgrid"
xy_from, xy_to = polygon_xy.min(axis=0), polygon_xy.max(axis=0)
return header.fracs_for_rect_xy(xy_from, xy_to)
def fracs_for_poly_bbox_latlng(header, polygon_latlng):
poly_xy = np.array([header.latlng2xy(p) for p in polygon_latlng])
return fracs_for_poly_bbox_xy(header, poly_xy)
def load_poly_xy_from_jgrid(header, polygon_xy, **kwargs):
"""
Given a header and a polygon (*assumed* to be fully contained in the
jgrid), returns a masked array containing the jgrid data in the polygon.
The returned masked array has the shape of the polygon bounding box but
only pixels inside the polygon are unmasked
"""
assert np.all([header.in_bounds_xy(p) for p in polygon_xy]), \
"Polygon not contained in jgrid"
xy_from, xy_to = polygon_xy.min(axis=0), polygon_xy.max(axis=0)
ndvi_data = header.load_slice_xy(xy_from, xy_to, **kwargs)
poly_mask = imutils.rasterize_poly(polygon_xy - xy_from, ndvi_data.shape)
return ndvi_data, poly_mask, xy_from
def load_poly_latlng_from_jgrid(header, polygon_latlng, **kwargs):
"""
Like `load_poly_xy_from_jgrid`, but the polygon is given in latlng
"""
poly_xy = np.array([header.latlng2xy(p) for p in polygon_latlng])
return load_poly_xy_from_jgrid(header, poly_xy, **kwargs)
def load_poly_latlng_from_multi_jgrids(headers, polygon, **kwargs):
"""
Given a set of jgrid header, loads the given polygon from all all grids
and reproject all of them on the first one.
Returns:
xy_from: A single xy_from
Followed by a list of data/mask pairs :
data0, mask0, data1, mask1, data2, mask2, ...
"""
header0 = headers[0]
data0, mask0, xy_from0 = load_poly_latlng_from_jgrid(header0, polygon,
**kwargs)
retval = [xy_from0, data0, mask0]
for _h in headers[1:]:
_data, _mask, _xy_from = load_poly_latlng_from_jgrid(_h, polygon,
**kwargs)
# only reproject if needed
if (not _h.spatialref.IsSame(header0.spatialref)) or \
(_h.geot != header0.geot):
_data, _mask = reproject_jgrid_on_jgrid(
header0, xy_from0, data0.shape,
_h, _xy_from, _data, _mask
)
retval.append(_data)
retval.append(_mask)
return retval
def poly_latlng_for_frac(header, frac_num):
"""
Returns the latlng polygon corresponding to a given fraction
"""
poly = [
header.xy2latlng((header.x_start(frac_num),
header.y_start(frac_num))),
header.xy2latlng((header.x_end(frac_num),
header.y_start(frac_num))),
header.xy2latlng((header.x_end(frac_num),
header.y_end(frac_num))),
header.xy2latlng((header.x_start(frac_num),
header.y_end(frac_num)))
]
return np.array(poly)
def headers_are_same_geogrid(header1, header2):
"""
Given two headers, verify that they are in the same projection with the
same geotransform and the same fraction sizes
"""
return header1.spatialref.IsSame(header2.spatialref) and \
(header1.geot == header2.geot) and \
header1.width == header2.width and \
header1.height == header2.height and \
header1.frac_width == header2.frac_width and \
header1.frac_height == header2.frac_height
def load_frac_from_multi_jgrids(headers, frac_num, **kwargs):
"""
Given a set of jgrid headers and a frac_num in headers[0], loads the
corresponding area from all headers
Returns:
xy_from: A single xy_from
Followed by a list of data/mask pairs :
data0, mask0, data1, mask1, data2, mask2, ...
"""
header0 = headers[0]
xy_from0 = (header0.x_start(frac_num), header0.y_start(frac_num))
data0 = header0.load_frac_by_num(frac_num, **kwargs)
mask0 = np.ones((data0.shape[0], data0.shape[1]), dtype=np.bool)
frac_poly = poly_latlng_for_frac(header0, frac_num)
retval = [xy_from0, data0, mask0]
for _h in headers[1:]:
if headers_are_same_geogrid(header0, _h):
print 'Headers in same geogrid'
_data = _h.load_frac_by_num(frac_num, **kwargs)
_mask = np.ones((_data.shape[0], _data.shape[1]), dtype=np.bool)
else:
_data, _mask, _xy_from = load_poly_latlng_from_jgrid(
_h, frac_poly, **kwargs)
_data, _mask = reproject_jgrid_on_jgrid(
header0, xy_from0, data0.shape,
_h, _xy_from, _data, _mask
)
retval.append(_data)
retval.append(_mask)
return retval
def latlng_for_grid(header, xy_from, shape):
"""
For each point in the grid, computes its latlng coordinates, returning
a (shape[0], shape[1], 2) array
"""
yx = np.indices(shape)
yx[0] += xy_from[1]
yx[1] += xy_from[0]
latlng = [header.xy2latlng((x, y))
for y, x in zip(yx[0].reshape(-1), yx[1].reshape(-1))]
return np.array(latlng).reshape(shape[0], shape[1], 2)
def slice_and_reproject_to_grid(header, xy_from, grid_shape, src_ds,
interpolation='near'):
"""
Helper function which takes a jgrid slice (so Header, xy_from, grid_shape)
and a GDAL dataset and slice/reprojects the GDAL dataset to the jgrid
slice.
This is typically useful to reproject some arbitrary TIFF file on some
part of the NDVI worldgrid.
Args:
header: A jgrid3.Header
xy_from: the (x, y) at which the subgrid starts in the given header
grid_shape: the (height, width) of the subgrid
src_ds: The source GDAL dataset to reproject
interpolation: The resampling mode : one of 'near', 'mode', 'average'
Returns:
A masked array containing the reprojected values
"""
# https://jgomezdans.github.io/gdal_notes/reprojection.html
# http://www.gdal.org/gdalwarper_8h.html#ad36462e8d5d34642df7f9ea1cfc2fec4
src_wkt = src_ds.GetProjectionRef()
nbands = src_ds.RasterCount
src_dtype = src_ds.GetRasterBand(1).DataType
# print 'src dtype : %s' % gdal.GetDataTypeName(src_dtype)
mem_drv = gdal.GetDriverByName('MEM')
dst_ds = mem_drv.Create('', grid_shape[1], grid_shape[0], nbands,
src_dtype)
dst_geo = header.geot_for_xyfrom(xy_from)
dst_ds.SetGeoTransform(dst_geo)
dst_ds.SetProjection(header.spatialref.ExportToWkt())
# NoData handling when using ReprojectImage with a MEM target ds is
# a bit tricky. See those discussions :
# https://trac.osgeo.org/gdal/ticket/6404
# http://gis.stackexchange.com/q/158503
# We have to fill each band with the nodata value before doing the
# reprojectimage because the bands are initialized with 0
ndv = None
for i in range(1, nbands + 1):
src_b = src_ds.GetRasterBand(i)
if ndv is not None and not np.isnan(ndv):
assert src_b.GetNoDataValue() == ndv, \
"All bands of the source dataset should have the same NODATA"
else:
ndv = src_b.GetNoDataValue()
dst_b = dst_ds.GetRasterBand(i)
if ndv is not None:
dst_b.SetNoDataValue(ndv)
dst_b.Fill(ndv)
if interpolation == 'near':
gdal_mode = gdal.GRA_NearestNeighbour
elif interpolation == 'mode':
gdal_mode = gdal.GRA_Mode
elif interpolation == 'average':
gdal_mode = gdal.GRA_Average
else:
raise ValueError("Invalid interpolation mode %s" % interpolation)
res = gdal.ReprojectImage(
src_ds,
dst_ds,
src_ds.GetProjectionRef(),
dst_ds.GetProjectionRef(),
gdal_mode
)
assert res == 0, 'Error reprojecting, res=%d' % res
dst_arr = dst_ds.ReadAsArray()
# GDAL ReadAsArray returns (bands, height, width) but we expect
# (height, width, bands)
if len(dst_arr.shape) == 3:
dst_arr = dst_arr.transpose(1, 2, 0)
# TODO: This assumes that the no data value is the same for all bands
if ndv is not None:
dst_arr = ma.masked_where(dst_arr == ndv, dst_arr)
else:
dst_arr = ma.asarray(dst_arr)
return dst_arr
def gdal_ds_from_jgrid_slice(header, xy_from, data):
"""
Returns a GDAL in-memory dataset that maps the provided jgrid slice.
Note that the dataset only keeps a reference to the data array.
"""
ds = gdal_utils.gdal_ds_from_array(data)
ds.SetGeoTransform(header.geot_for_xyfrom(xy_from))
ds.SetProjection(header.spatialref.ExportToWkt())
return ds
def reproject_jgrid_on_jgrid(target_header, target_xy_from, target_shape,
src_header, src_xy_from, src_data, src_mask):
"""
Reproject a source jgrid on a target jgrid
"""
data_ds = gdal_ds_from_jgrid_slice(src_header, src_xy_from, src_data)
# This requires a mask copy because GDAL doesn't support bool
# Also, GDAL ignores 0 during reproject, so add 1 to the mask here
src_mask = src_mask.astype(np.uint8) + 1
mask_ds = gdal_ds_from_jgrid_slice(src_header, src_xy_from, src_mask)
new_data = slice_and_reproject_to_grid(target_header, target_xy_from,
target_shape, data_ds)
new_mask = slice_and_reproject_to_grid(target_header, target_xy_from,
target_shape, mask_ds)
# recover the boolean mask
new_mask = new_mask > 1
return new_data, new_mask
| mit | 5,451,843,363,966,093,000 | 35.139286 | 78 | 0.615278 | false |
updownlife/multipleK | dependencies/biopython-1.65/Tests/test_Phylo_NeXML.py | 1 | 4000 | # Copyright (C) 2013 by Ben Morris ([email protected])
# based on code by Eric Talevich ([email protected])
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Unit tests for the NeXML and NeXMLIO modules.
"""
import os
import tempfile
import unittest
import Bio.Phylo as bp
from Bio.Phylo import NeXML, NeXMLIO
# Example NeXML files
nexml_files = (
'characters.xml',
'edgelabels.xml',
'meta_taxa.xml',
'meta_types.xml',
'nexml.xml',
'phenoscape.xml',
'sets.xml',
'taxa.xml',
'timetree.xml',
'tolweb.xml',
'treebase-record.xml',
'trees-uris.xml',
'trees.xml',
)
tree_counts = {
'taxa.xml': 0,
'timetree.xml': 38,
'phenoscape.xml': 0,
'nexml.xml': 0,
'meta_types.xml': 0,
'meta_taxa.xml': 0,
'trees.xml': 2,
'characters.xml': 0,
}
# Temporary file name for Writer tests below
DUMMY = tempfile.mktemp()
# ---------------------------------------------------------
# Parser tests
def _test_parse_factory(source):
"""Generate a test method for parse()ing the given source.
The generated function extracts each phylogenetic tree using the parse()
function and counts the total number of trees extracted.
"""
filename = os.path.join('NeXML/', source)
if source in tree_counts:
count = tree_counts[source]
else:
count = 1
def test_parse(self):
trees = list(bp._io.parse(filename, 'nexml'))
self.assertEqual(len(trees), count)
test_parse.__doc__ = "Parse the phylogenies in %s." % source
return test_parse
def _test_write_factory(source):
"""Tests for serialization of objects to NeXML format.
Modifies the globally defined filenames in order to run the other parser
tests on files (re)generated by NeXMLIO's own writer.
"""
filename = os.path.join('NeXML/', source)
if source in tree_counts:
count = tree_counts[source]
else:
count = 1
def test_write(self):
"""Parse, rewrite and retest an example file."""
with open(filename, 'rb') as infile:
t1 = next(NeXMLIO.Parser(infile).parse())
with open(DUMMY, 'w+b') as outfile:
NeXMLIO.write([t1], outfile)
with open(DUMMY, 'rb') as infile:
t2 = next(NeXMLIO.Parser(infile).parse())
def assert_property(prop_name):
p1 = sorted([getattr(n, prop_name) for n in t1.get_terminals() if getattr(n, prop_name)])
p2 = sorted([getattr(n, prop_name) for n in t2.get_terminals() if getattr(n, prop_name)])
self.assertEqual(p1, p2)
for prop_name in ('name', 'branch_length', 'confidence'):
assert_property(prop_name)
test_write.__doc__ = "Write and re-parse the phylogenies in %s." % source
return test_write
class ParseTests(unittest.TestCase):
"""Tests for proper parsing of example NeXML files."""
for n, ex in enumerate(nexml_files):
parse_test = _test_parse_factory(ex)
parse_test.__name__ = 'test_parse_%s' % n
setattr(ParseTests, parse_test.__name__, parse_test)
class WriterTests(unittest.TestCase):
pass
for n, ex in enumerate(nexml_files):
count = 1
if ex in tree_counts:
count = tree_counts[ex]
if count > 0:
write_test = _test_write_factory(ex)
write_test.__name__ = 'test_write_%s' % n
setattr(WriterTests, write_test.__name__, write_test)
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
# Clean up the temporary file
if os.path.exists(DUMMY):
os.remove(DUMMY)
| gpl-2.0 | -7,884,696,575,435,090,000 | 29.534351 | 101 | 0.5825 | false |
gfyoung/pandas | pandas/tests/strings/test_find_replace.py | 1 | 20811 | from datetime import datetime
import re
import numpy as np
import pytest
import pandas as pd
from pandas import Index, Series, _testing as tm
def test_contains():
values = np.array(
["foo", np.nan, "fooommm__foo", "mmm_", "foommm[_]+bar"], dtype=np.object_
)
values = Series(values)
pat = "mmm[_]+"
result = values.str.contains(pat)
expected = Series(np.array([False, np.nan, True, True, False], dtype=np.object_))
tm.assert_series_equal(result, expected)
result = values.str.contains(pat, regex=False)
expected = Series(np.array([False, np.nan, False, False, True], dtype=np.object_))
tm.assert_series_equal(result, expected)
values = Series(np.array(["foo", "xyz", "fooommm__foo", "mmm_"], dtype=object))
result = values.str.contains(pat)
expected = Series(np.array([False, False, True, True]))
assert result.dtype == np.bool_
tm.assert_series_equal(result, expected)
# case insensitive using regex
values = Series(np.array(["Foo", "xYz", "fOOomMm__fOo", "MMM_"], dtype=object))
result = values.str.contains("FOO|mmm", case=False)
expected = Series(np.array([True, False, True, True]))
tm.assert_series_equal(result, expected)
# case insensitive without regex
result = Series(values).str.contains("foo", regex=False, case=False)
expected = Series(np.array([True, False, True, False]))
tm.assert_series_equal(result, expected)
# mixed
mixed = Series(
np.array(
["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0],
dtype=object,
)
)
rs = mixed.str.contains("o")
xp = Series(
np.array(
[False, np.nan, False, np.nan, np.nan, True, np.nan, np.nan, np.nan],
dtype=np.object_,
)
)
tm.assert_series_equal(rs, xp)
rs = mixed.str.contains("o")
xp = Series([False, np.nan, False, np.nan, np.nan, True, np.nan, np.nan, np.nan])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series(np.array(["foo", np.nan, "fooommm__foo", "mmm_"], dtype=np.object_))
pat = "mmm[_]+"
result = values.str.contains(pat)
expected = Series(np.array([False, np.nan, True, True], dtype=np.object_))
tm.assert_series_equal(result, expected)
result = values.str.contains(pat, na=False)
expected = Series(np.array([False, False, True, True]))
tm.assert_series_equal(result, expected)
values = Series(np.array(["foo", "xyz", "fooommm__foo", "mmm_"], dtype=np.object_))
result = values.str.contains(pat)
expected = Series(np.array([False, False, True, True]))
assert result.dtype == np.bool_
tm.assert_series_equal(result, expected)
def test_contains_for_object_category():
# gh 22158
# na for category
values = Series(["a", "b", "c", "a", np.nan], dtype="category")
result = values.str.contains("a", na=True)
expected = Series([True, False, False, True, True])
tm.assert_series_equal(result, expected)
result = values.str.contains("a", na=False)
expected = Series([True, False, False, True, False])
tm.assert_series_equal(result, expected)
# na for objects
values = Series(["a", "b", "c", "a", np.nan])
result = values.str.contains("a", na=True)
expected = Series([True, False, False, True, True])
tm.assert_series_equal(result, expected)
result = values.str.contains("a", na=False)
expected = Series([True, False, False, True, False])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, "category"])
@pytest.mark.parametrize("null_value", [None, np.nan, pd.NA])
@pytest.mark.parametrize("na", [True, False])
def test_startswith(dtype, null_value, na):
# add category dtype parametrizations for GH-36241
values = Series(
["om", null_value, "foo_nom", "nom", "bar_foo", null_value, "foo"],
dtype=dtype,
)
result = values.str.startswith("foo")
exp = Series([False, np.nan, True, False, False, np.nan, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith("foo", na=na)
exp = Series([False, na, True, False, False, na, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(
["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0],
dtype=np.object_,
)
rs = Series(mixed).str.startswith("f")
xp = Series([False, np.nan, False, np.nan, np.nan, True, np.nan, np.nan, np.nan])
tm.assert_series_equal(rs, xp)
@pytest.mark.parametrize("dtype", [None, "category"])
@pytest.mark.parametrize("null_value", [None, np.nan, pd.NA])
@pytest.mark.parametrize("na", [True, False])
def test_endswith(dtype, null_value, na):
# add category dtype parametrizations for GH-36241
values = Series(
["om", null_value, "foo_nom", "nom", "bar_foo", null_value, "foo"],
dtype=dtype,
)
result = values.str.endswith("foo")
exp = Series([False, np.nan, False, False, True, np.nan, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith("foo", na=na)
exp = Series([False, na, False, False, True, na, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(
["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0],
dtype=object,
)
rs = Series(mixed).str.endswith("f")
xp = Series([False, np.nan, False, np.nan, np.nan, False, np.nan, np.nan, np.nan])
tm.assert_series_equal(rs, xp)
def test_replace():
values = Series(["fooBAD__barBAD", np.nan])
result = values.str.replace("BAD[_]*", "", regex=True)
exp = Series(["foobar", np.nan])
tm.assert_series_equal(result, exp)
result = values.str.replace("BAD[_]*", "", n=1, regex=True)
exp = Series(["foobarBAD", np.nan])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(
["aBAD", np.nan, "bBAD", True, datetime.today(), "fooBAD", None, 1, 2.0]
)
rs = Series(mixed).str.replace("BAD[_]*", "", regex=True)
xp = Series(["a", np.nan, "b", np.nan, np.nan, "foo", np.nan, np.nan, np.nan])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE, regex=True)
tm.assert_series_equal(result, exp)
# GH 13438
msg = "repl must be a string or callable"
for klass in (Series, Index):
for repl in (None, 3, {"a": "b"}):
for data in (["a", "b", None], ["a", "b", "c", "ad"]):
values = klass(data)
with pytest.raises(TypeError, match=msg):
values.str.replace("a", repl)
def test_replace_callable():
# GH 15055
values = Series(["fooBAD__barBAD", np.nan])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace("[a-z][A-Z]{2}", repl, n=2, regex=True)
exp = Series(["foObaD__baRbaD", np.nan])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
p_err = (
r"((takes)|(missing)) (?(2)from \d+ to )?\d+ "
r"(?(3)required )positional arguments?"
)
repl = lambda: None
with pytest.raises(TypeError, match=p_err):
values.str.replace("a", repl)
repl = lambda m, x: None
with pytest.raises(TypeError, match=p_err):
values.str.replace("a", repl)
repl = lambda m, x, y=None: None
with pytest.raises(TypeError, match=p_err):
values.str.replace("a", repl)
# test regex named groups
values = Series(["Foo Bar Baz", np.nan])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group("middle").swapcase()
result = values.str.replace(pat, repl, regex=True)
exp = Series(["bAR", np.nan])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex():
# GH 15446
values = Series(["fooBAD__barBAD", np.nan])
# test with compiled regex
pat = re.compile(r"BAD_*")
result = values.str.replace(pat, "", regex=True)
exp = Series(["foobar", np.nan])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, "", n=1, regex=True)
exp = Series(["foobarBAD", np.nan])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(
["aBAD", np.nan, "bBAD", True, datetime.today(), "fooBAD", None, 1, 2.0]
)
rs = Series(mixed).str.replace(pat, "", regex=True)
xp = Series(["a", np.nan, "b", np.nan, np.nan, "foo", np.nan, np.nan, np.nan])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(["fooBAD__barBAD__bad", np.nan])
pat = re.compile(r"BAD_*")
with pytest.raises(ValueError, match="case and flags cannot be"):
result = values.str.replace(pat, "", flags=re.IGNORECASE)
with pytest.raises(ValueError, match="case and flags cannot be"):
result = values.str.replace(pat, "", case=False)
with pytest.raises(ValueError, match="case and flags cannot be"):
result = values.str.replace(pat, "", case=True)
# test with callable
values = Series(["fooBAD__barBAD", np.nan])
repl = lambda m: m.group(0).swapcase()
pat = re.compile("[a-z][A-Z]{2}")
result = values.str.replace(pat, repl, n=2)
exp = Series(["foObaD__baRbaD", np.nan])
tm.assert_series_equal(result, exp)
def test_replace_literal():
# GH16808 literal replace (regex=False vs regex=True)
values = Series(["f.o", "foo", np.nan])
exp = Series(["bao", "bao", np.nan])
result = values.str.replace("f.", "ba", regex=True)
tm.assert_series_equal(result, exp)
exp = Series(["bao", "foo", np.nan])
result = values.str.replace("f.", "ba", regex=False)
tm.assert_series_equal(result, exp)
# Cannot do a literal replace if given a callable repl or compiled
# pattern
callable_repl = lambda m: m.group(0).swapcase()
compiled_pat = re.compile("[a-z][A-Z]{2}")
msg = "Cannot use a callable replacement when regex=False"
with pytest.raises(ValueError, match=msg):
values.str.replace("abc", callable_repl, regex=False)
msg = "Cannot use a compiled regex as replacement pattern with regex=False"
with pytest.raises(ValueError, match=msg):
values.str.replace(compiled_pat, "", regex=False)
def test_match():
# New match behavior introduced in 0.13
values = Series(["fooBAD__barBAD", np.nan, "foo"])
result = values.str.match(".*(BAD[_]+).*(BAD)")
exp = Series([True, np.nan, False])
tm.assert_series_equal(result, exp)
values = Series(["fooBAD__barBAD", "BAD_BADleroybrown", np.nan, "foo"])
result = values.str.match(".*BAD[_]+.*BAD")
exp = Series([True, True, np.nan, False])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(
[
"aBAD_BAD",
np.nan,
"BAD_b_BAD",
True,
datetime.today(),
"foo",
None,
1,
2.0,
]
)
rs = Series(mixed).str.match(".*(BAD[_]+).*(BAD)")
xp = Series([True, np.nan, True, np.nan, np.nan, False, np.nan, np.nan, np.nan])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# na GH #6609
res = Series(["a", 0, np.nan]).str.match("a", na=False)
exp = Series([True, False, False])
tm.assert_series_equal(exp, res)
res = Series(["a", 0, np.nan]).str.match("a")
exp = Series([True, np.nan, np.nan])
tm.assert_series_equal(exp, res)
values = Series(["ab", "AB", "abc", "ABC"])
result = values.str.match("ab", case=False)
expected = Series([True, True, True, True])
tm.assert_series_equal(result, expected)
def test_fullmatch():
# GH 32806
values = Series(["fooBAD__barBAD", "BAD_BADleroybrown", np.nan, "foo"])
result = values.str.fullmatch(".*BAD[_]+.*BAD")
exp = Series([True, False, np.nan, False])
tm.assert_series_equal(result, exp)
# Make sure that the new string arrays work
string_values = Series(
["fooBAD__barBAD", "BAD_BADleroybrown", np.nan, "foo"], dtype="string"
)
result = string_values.str.fullmatch(".*BAD[_]+.*BAD")
# Result is nullable boolean with StringDtype
string_exp = Series([True, False, np.nan, False], dtype="boolean")
tm.assert_series_equal(result, string_exp)
values = Series(["ab", "AB", "abc", "ABC"])
result = values.str.fullmatch("ab", case=False)
expected = Series([True, True, False, False])
tm.assert_series_equal(result, expected)
def test_findall():
values = Series(["fooBAD__barBAD", np.nan, "foo", "BAD"])
result = values.str.findall("BAD[_]*")
exp = Series([["BAD__", "BAD"], np.nan, [], ["BAD"]])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(
[
"fooBAD__barBAD",
np.nan,
"foo",
True,
datetime.today(),
"BAD",
None,
1,
2.0,
]
)
rs = Series(mixed).str.findall("BAD[_]*")
xp = Series(
[
["BAD__", "BAD"],
np.nan,
[],
np.nan,
np.nan,
["BAD"],
np.nan,
np.nan,
np.nan,
]
)
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
def test_find():
values = Series(["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF", "XXXX"])
result = values.str.find("EF")
tm.assert_series_equal(result, Series([4, 3, 1, 0, -1]))
expected = np.array([v.find("EF") for v in values.values], dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind("EF")
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind("EF") for v in values.values], dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find("EF", 3)
tm.assert_series_equal(result, Series([4, 3, 7, 4, -1]))
expected = np.array([v.find("EF", 3) for v in values.values], dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind("EF", 3)
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind("EF", 3) for v in values.values], dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find("EF", 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.find("EF", 3, 6) for v in values.values], dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind("EF", 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.rfind("EF", 3, 6) for v in values.values], dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with pytest.raises(TypeError, match="expected a string object, not int"):
result = values.str.find(0)
with pytest.raises(TypeError, match="expected a string object, not int"):
result = values.str.rfind(0)
def test_find_nan():
values = Series(["ABCDEFG", np.nan, "DEFGHIJEF", np.nan, "XXXX"])
result = values.str.find("EF")
tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1]))
result = values.str.rfind("EF")
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find("EF", 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.rfind("EF", 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find("EF", 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
result = values.str.rfind("EF", 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
def test_translate():
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(["abcdefg", "abcc", "cdddfg", "cdefggg"])
table = str.maketrans("abc", "cde")
result = s.str.translate(table)
expected = klass(["cdedefg", "cdee", "edddfg", "edefggg"])
_check(result, expected)
# Series with non-string values
s = Series(["a", "b", "c", 1.2])
expected = Series(["c", "d", "e", np.nan])
result = s.str.translate(table)
tm.assert_series_equal(result, expected)
def test_contains_moar():
# PR #1179
s = Series(["A", "B", "C", "Aaba", "Baca", "", np.nan, "CABA", "dog", "cat"])
result = s.str.contains("a")
expected = Series(
[False, False, False, True, True, False, np.nan, False, False, True]
)
tm.assert_series_equal(result, expected)
result = s.str.contains("a", case=False)
expected = Series(
[True, False, False, True, True, False, np.nan, True, False, True]
)
tm.assert_series_equal(result, expected)
result = s.str.contains("Aa")
expected = Series(
[False, False, False, True, False, False, np.nan, False, False, False]
)
tm.assert_series_equal(result, expected)
result = s.str.contains("ba")
expected = Series(
[False, False, False, True, False, False, np.nan, False, False, False]
)
tm.assert_series_equal(result, expected)
result = s.str.contains("ba", case=False)
expected = Series(
[False, False, False, True, True, False, np.nan, True, False, False]
)
tm.assert_series_equal(result, expected)
def test_contains_nan():
# PR #14171
s = Series([np.nan, np.nan, np.nan], dtype=np.object_)
result = s.str.contains("foo", na=False)
expected = Series([False, False, False], dtype=np.bool_)
tm.assert_series_equal(result, expected)
result = s.str.contains("foo", na=True)
expected = Series([True, True, True], dtype=np.bool_)
tm.assert_series_equal(result, expected)
result = s.str.contains("foo", na="foo")
expected = Series(["foo", "foo", "foo"], dtype=np.object_)
tm.assert_series_equal(result, expected)
result = s.str.contains("foo")
expected = Series([np.nan, np.nan, np.nan], dtype=np.object_)
tm.assert_series_equal(result, expected)
def test_replace_moar():
# PR #1179
s = Series(["A", "B", "C", "Aaba", "Baca", "", np.nan, "CABA", "dog", "cat"])
result = s.str.replace("A", "YYY")
expected = Series(
["YYY", "B", "C", "YYYaba", "Baca", "", np.nan, "CYYYBYYY", "dog", "cat"]
)
tm.assert_series_equal(result, expected)
result = s.str.replace("A", "YYY", case=False)
expected = Series(
[
"YYY",
"B",
"C",
"YYYYYYbYYY",
"BYYYcYYY",
"",
np.nan,
"CYYYBYYY",
"dog",
"cYYYt",
]
)
tm.assert_series_equal(result, expected)
result = s.str.replace("^.a|dog", "XX-XX ", case=False, regex=True)
expected = Series(
[
"A",
"B",
"C",
"XX-XX ba",
"XX-XX ca",
"",
np.nan,
"XX-XX BA",
"XX-XX ",
"XX-XX t",
]
)
tm.assert_series_equal(result, expected)
def test_match_findall_flags():
data = {
"Dave": "[email protected]",
"Steve": "[email protected]",
"Rob": "[email protected]",
"Wes": np.nan,
}
data = Series(data)
pat = r"([A-Z0-9._%+-]+)@([A-Z0-9.-]+)\.([A-Z]{2,4})"
result = data.str.extract(pat, flags=re.IGNORECASE, expand=True)
assert result.iloc[0].tolist() == ["dave", "google", "com"]
result = data.str.match(pat, flags=re.IGNORECASE)
assert result[0]
result = data.str.fullmatch(pat, flags=re.IGNORECASE)
assert result[0]
result = data.str.findall(pat, flags=re.IGNORECASE)
assert result[0][0] == ("dave", "google", "com")
result = data.str.count(pat, flags=re.IGNORECASE)
assert result[0] == 1
with tm.assert_produces_warning(UserWarning):
result = data.str.contains(pat, flags=re.IGNORECASE)
assert result[0]
| bsd-3-clause | 6,857,426,806,594,123,000 | 31.824921 | 88 | 0.587478 | false |
Rosuav/shed | BL1_find_items.py | 1 | 17462 | import argparse
import os.path
import struct
import inspect
from dataclasses import dataclass # ImportError? Upgrade to Python 3.7 or pip install dataclasses
class FunctionArg:
def __init__(self, desc="keyword", other_args=0):
self.desc = desc
self.functions = {}
self.other_args = other_args # Number of args given to the function that aren't from the cmdline
def __repr__(self): return self.desc
def __call__(self, func_or_arg):
if isinstance(func_or_arg, str):
# We've been given a command-line argument (argparse mode).
fn, *args = func_or_arg.split(":")
if fn not in self.functions:
raise argparse.ArgumentTypeError("Unrecognized %r - valid: %s"
% (fn, ', '.join(sorted(self.functions))))
func = self.functions[fn]
max = func.__code__.co_argcount - self.other_args
min = max - len(func.__defaults__ or ())
if func.__code__.co_flags & inspect.CO_VARARGS:
max = float("inf")
if min == max != len(args):
# Special case some messages for readability
if min == 0:
raise argparse.ArgumentTypeError("%s does not take arguments" % fn)
raise argparse.ArgumentTypeError("%s%s requires exactly %d arg%s" %
(fn, ":X" * min, min, "s" * (min!=1)))
if len(args) < min:
raise argparse.ArgumentTypeError("%s requires at least %d arg%s" % (fn, min, "s" * (min!=1)))
if len(args) > max:
raise argparse.ArgumentTypeError("%s requires at most %d arg%s" % (fn, max, "s" * (max!=1)))
return func, args
# Else assume we've been given a function to retain (decorator mode)
self.functions[func_or_arg.__name__] = func_or_arg
return func_or_arg
loot_filter = FunctionArg("filter", 1)
@loot_filter
def type(item, type): return type in item.type
del type # I want the filter to be called type, but not to override type()
@loot_filter
def eq(item, slot="any"):
if slot == "any": return item.slot > 0 # By default, show everything that's equipped.
return int(slot) == item.slot # Or say "eq:3" to select item in equip slot 3.
synthesizer = FunctionArg("synth", 1)
@synthesizer
def money(savefile): savefile.money += 5000000
@synthesizer
def burnammo(savefile):
for ammo in savefile.ammo:
if ammo.amount > 10: ammo.amount -= 1.0
@synthesizer
def fix_prison_jump(savefile):
"""Fix a weird glitch where you can't get into the Knoxx DLC prison
Marks the first goal (finding the spot to jump) as done. The glitch
had the first goal not done but the second done, and the mission
wouldn't progress.
"""
for block in savefile.missions:
for mission in block.missions:
if mission.progress == 1:
if mission.mission == "dlc3_MainMissions.MainMissions.M_dlc3_PrisonInfiltrate":
mission.goals[0] = ('None', 1)
print(mission)
@synthesizer
def create_shields(savefile):
"""Synthesize a bunch of similar shields to compare Quality values"""
for quality in range(6):
savefile.items.append(Item(
grade="gd_itemgrades.Gear.ItemGrade_Gear_Shield",
type='gd_shields.A_Item.Item_Shield',
pieces=[
"gd_shields.Body.body3b_power",
"gd_shields.LeftSide.leftside4",
"gd_shields.RightSide.rightside4",
"gd_shields.ManufacturerMaterials.Material_Torgue_3",
],
mfg='gd_manufacturers.Manufacturers.Torgue',
prefix="gd_shields.Prefix.Prefix_Max4_Impenetrable",
title="gd_shields.Title.Title_Torgue3_MachoShield",
unknown=1, quality=quality, level=0, slot=0, junk=0, locked=0,
))
@synthesizer
def create_class_mods(savefile, who):
# TODO: Deduplicate
if who.casefold() == "brick":
for quality in range(3, 6):
savefile.items.append(Item(
grade="gd_itemgrades.Gear.ItemGrade_Gear_ComDeck_Brick",
type='gd_CommandDecks.A_Item.Item_CommandDeck_Brick',
pieces=[
"gd_CommandDecks.Body_Brick.Brick_Warmonger",
"gd_CommandDecks.LeftSide.leftside6b",
"gd_CommandDecks.RightSide.rightside6",
"gd_CommandDecks.ManufacturerMaterials.Material_Torgue_2",
],
mfg='gd_manufacturers.Manufacturers.Torgue',
prefix="gd_CommandDecks.Prefix.Prefix_Brick_Warmonger",
title="gd_CommandDecks.Title.Title_ComDeckBrick",
unknown=1, quality=quality, level=0, slot=0, junk=0, locked=0,
))
elif who.casefold() == "lilith":
for quality in range(3, 6):
savefile.items.append(Item(
grade="gd_itemgrades.Gear.ItemGrade_Gear_ComDeck_Lilith",
type='gd_CommandDecks.A_Item.Item_CommandDeck_Lilith',
pieces=[
"gd_CommandDecks.Body_Lilith.Lilith_Mercenary",
"gd_CommandDecks.LeftSide.leftside6",
"gd_CommandDecks.RightSide.rightside6",
"gd_CommandDecks.ManufacturerMaterials.Material_Dahl_3",
],
mfg='gd_manufacturers.Manufacturers.Dahl',
prefix="gd_CommandDecks.Prefix.Prefix_Lilith_Mercenary",
title="gd_CommandDecks.Title.Title_ComDeckLilith",
unknown=1, quality=quality, level=0, slot=0, junk=0, locked=0,
))
else: raise ValueError("Dunno who you want class mods for - %r" % who)
@synthesizer
def create_cmod_variants(savefile):
import itertools
for quality, left, mfg, mat in itertools.product(range(3, 6), ["leftside6", "leftside6c"], ["Pangolin", "Maliwan"], "23"):
savefile.items.append(Item(
grade="gd_itemgrades.Gear.ItemGrade_Gear_ComDeck_Mordecai",
type='gd_CommandDecks.A_Item.Item_CommandDeck_Mordecai',
pieces=[
"gd_CommandDecks.Body_Mordecai.Mordecai_Survivor",
"gd_CommandDecks.LeftSide." + left,
"gd_CommandDecks.RightSide.rightside6",
"gd_CommandDecks.ManufacturerMaterials.Material_%s_%s" % (mfg, mat),
],
mfg='gd_manufacturers.Manufacturers.' + mfg,
prefix="gd_CommandDecks.Prefix.Prefix_Mordecai_Survivor",
title="gd_CommandDecks.Title.Title_ComDeckMordecai",
unknown=1, quality=quality, level=0, slot=0, junk=0, locked=0,
))
@synthesizer
def boost_weapons(savefile):
newweaps = []
for weapon in savefile.weapons:
if weapon.slot:
for quality in range(weapon.quality, 6):
newweap = Weapon(**vars(weapon))
newweap.quality = quality
newweap.slot = 0
# print(newweap)
newweaps.append(newweap)
savefile.weapons.extend(newweaps) # Don't change the list while we're iterating over it
class Consumable:
"""Like a bytes/str object but can be consumed a few bytes/chars at a time"""
def __init__(self, data):
self.data = data
self.eaten = 0
self.left = len(data)
def get(self, num):
"""Destructively read the next num bytes/chars of data"""
if num > self.left: raise ValueError("Out of data!")
ret = self.data[self.eaten : self.eaten + num]
self.eaten += num
self.left -= num
return ret
# Read integers, and some length-preceded string formats, assuming we have
# a collection of bytes here. Don't call these if the original data was text.
def int(self, size=4, order="little"): return int.from_bytes(self.get(size), order)
def hollerith(self, size=4, order="little"): return self.get(self.int(size, order))
def str(self): return self.hollerith().rstrip(b"\x00").decode("ascii")
def __len__(self): return self.left
def peek(self): return self.data[self.eaten:] # Doubles as "convert to bytes/str"
@classmethod
def from_bits(cls, data):
"""Create a bitfield consumable from packed eight-bit data"""
return cls(''.join(format(x, "08b") for x in data))
class SaveFileFormatError(Exception): pass
def decode_dataclass(data, typ):
if hasattr(typ, "__dataclass_fields__"):
values = {}
for field in typ.__dataclass_fields__.values():
values[field.name] = decode_dataclass(data, field.type)
return typ(**values)
if isinstance(typ, list):
return [decode_dataclass(data, typ[0]) for _ in range(data.int())]
if isinstance(typ, tuple):
return tuple(decode_dataclass(data, t) for t in typ)
if isinstance(typ, int):
return data.get(typ)
if isinstance(typ, bytes):
ret = data.get(len(typ))
assert ret == typ
return ret
if typ is int:
return data.int()
if isinstance(typ, range):
# Bounded integer
l = len(typ)
ret = data.int(1 if l <= 256 else 2 if l <= 65536 else 4)
# TODO: Support signed numbers eg range(-128, 127)
assert ret in typ
return ret
if typ is bytes:
return data.hollerith()
if typ is str:
return data.str()
if typ is float:
return struct.unpack("f", data.get(4))[0]
if typ is print:
print(data.peek()[:16], len(data))
return None
raise TypeError("need to implement: %r %r" % (type(typ), typ))
def encode_dataclass(data, typ):
if hasattr(typ, "__dataclass_fields__"):
ret = []
for field in typ.__dataclass_fields__.values():
ret.append(encode_dataclass(getattr(data, field.name), field.type))
return b"".join(ret)
if isinstance(typ, list):
return encode_dataclass(len(data), int) + b"".join(encode_dataclass(val, typ[0]) for val in data)
if isinstance(typ, tuple):
return b"".join(encode_dataclass(val, t) for val, t in zip(data, typ))
if isinstance(typ, int):
assert len(data) == typ
return data
if isinstance(typ, bytes):
assert data == typ
return data
if typ is int:
return data.to_bytes(4, "little")
if isinstance(typ, range):
# Bounded integer
l = len(typ)
assert data in typ
# TODO as above, signed integers
return data.to_bytes(1 if l <= 256 else 2 if l <= 65536 else 4, "little")
if typ is bytes:
return encode_dataclass(len(data), int) + data
if typ is str:
return encode_dataclass(data.encode("ascii") + b"\x00", bytes)
if typ is float:
return struct.pack("f", data)
if typ is print:
return b""
raise TypeError("need to implement: %r %r" % (type(type), typ))
# For anyone reading this file to try to understand the save file format:
# Firstly, be sure to also read the WillowTree# source code, which is more
# comprehensive but less comprehensible than this - you can find it at
# http://willowtree.sourceforge.net. Everything in here came either from my
# own explorations with a hex editor or from reading the WillowTree# source.
# Secondly, these classes represent different structures within the file;
# fields are laid out sequentially with no padding.
# Annotation Meaning
# int 32-bit unsigned integer
# float 32-bit IEEE binary floating-point
# bytes Hollerith byte string consisting of a 32-bit length followed
# by that many bytes of raw data
# str Hollerith text string: 32-bit length, that many bytes of ASCII
# data, then b"\0" (included in the length)
# b"..." Exactly those bytes. Used for signatures etc.
# range(N) Integer within the given range, taking up the minimum space
# (so a range(65536) is a 16-bit integer)
# AnyClassName One instance of the named class (potentially recursive)
# (x,y,z) The given values in that exact order. Identical in the file to
# having the same three annotations separately identified.
# [x] Hollerith array: 32-bit length, then that many instances of
# whatever is in the list (so [int] would make an array of ints).
@dataclass
class BankString:
mask: 1 # Always seems to use a mask of 32 or 0 (b" " or b"\0")
# Segments are usually just normal strings (complete with their NUL
# termination included in the length), but can have a length of zero.
segments: (bytes,)*6
def __repr__(self):
return repr(".".join(s.rstrip(b"\x00").decode("ascii") for s in self.segments if s))
@dataclass
class Skill:
name: str
level: int
progress: int # Possibly progress to next level?? Applies only to proficiencies.
state: int # Always either -1 or 1
@dataclass
class Ammo:
cat: str
pool: str
amount: float # WHY??? Ammo regen maybe???
capacity: int # 0 = base capacity, 1 = first upgrade, etc
@dataclass
class Item: # Can't find item level
grade: str
type: str
pieces: (str,) * 4
mfg: str
prefix: str
title: str
unknown: int
quality: range(65536)
level: range(65536)
slot: int # 1 if equipped or 0 for backpack
junk: int
locked: int
@dataclass
class Weapon:
grade: str
mfg: str
type: str
pieces: (str,) * 8
material: str
prefix: str
title: str
ammo: int
quality: range(65536)
level: range(65536)
slot: int # 1-4 or 0 for backpack
junk: int
locked: int
@dataclass
class BankItem: # Bank items have things in a different order. Weird.
type: str
grade: str
mfg: str
pieces: (str,) * 4
prefix: str
title: str
@dataclass
class Mission:
mission: str
progress: int # 1 = active, 2 = complete, 4 = turned in. Not sure if bitwise or enumeration.
unknown: (int, int)
goals: [(str, int)] # Always 0 of these for done missions
@dataclass
class MissionBlock:
id: int # Sequentially numbered blocks
current_mission: str # I think? Maybe?
missions: [Mission]
@dataclass
class Challenges:
outer_length: b"\x43\x05\0\0" # Length of this entire structure (not counting itself)
id: b"\3\0\0\0"
inner_length: b"\x3b\x05\0\0" # Length of the rest of the structure. Yes, exactly 8 less than outer_length.
@dataclass
class Challenge:
id: range(65536)
type: range(256) # Either 1 or 5, usually 1
value: int
count: b"\xbf\0" # Number of entries - it's 16-bit but otherwise same as saying [Challenge]
challenges: (Challenge,) * 191
@dataclass
class Savefile:
sig: b"WSG" # If it's not, this might be an Xbox save file
ver: b"\2\0\0\0" # If it's not, this might be a big-endian PS3 save file
type: b"PLYR"
revision: int
cls: str
level: int
xp: int
zeroes1: bytes(8) # Unspent skill points?
money: int
finished_once: int # 1 if you've finished the first playthrough
skills: [Skill]
vehicle_info: (int,) * 4 # Vehicle info
ammo: [Ammo]
items: [Item]
backpacksize: int
weaponslots: int
weapons: [Weapon]
challenges: Challenges
fasttravels: [str] # Doesn't include DLCs that have yet to be tagged up
last_location: str # You'll spawn at this location
zeroes4: bytes(12)
unknown7: int
zeroes5: bytes(4)
savefile_index: int # Possibly needs to correspond to the file name??
unknown8: b"\x27\0\0\0"
unknown8a: int # Higher on more-experienced players, up to 45 on completion of main plot
missions: [MissionBlock]
playtime: int
timestamp: str # Last saved? I think?
name: str
colours: (int, int, int)
enhancedblock: 0x55 # ???
unknown10: int
promocodes: [int]
promocodes_new: [int]
echo_recordings: [(int, [(str, int, int)])] # No idea what the ints mean, probably flags about having heard them or something
dlc_block_len: int # Total length of all the DLC blocks (up to just before zeroes6)
bank_sig: b"\x34\x12\x21\x43"
bank_block_len: int # == 5 + len(encoded(bank_weapons))
unknown12: b"\x02"
bank_capacity: int
bank_weapons: [(1, BankString, BankString, BankString, int, (BankString,)*11, bytes(7), 5, int)]
unknown13: 42
dlc_items: [Item] # DLC-only items??
dlc_weapons: [Weapon] # Ditto
unknown99: (int,) * 6
zeroes6: bytes(80)
def parse_savefile(fn):
with open(fn, "rb") as f: data = Consumable(f.read())
savefile = decode_dataclass(data, Savefile)
assert savefile.last_location in savefile.fasttravels
print("%s (level %d %s, $%d)" % (savefile.name, savefile.level, savefile.cls.split("_")[-1], savefile.money))
if args.loot_filter is not None:
for weapon in sorted(savefile.weapons + savefile.dlc_weapons, key=lambda w: w.slot or 5):
for filter, filterargs in args.loot_filter:
if not filter(weapon, *filterargs): break
else:
print("%d: [%d-%d] %s %s" % (weapon.slot, weapon.level, weapon.quality, weapon.prefix.split(".")[-1], weapon.title.split(".")[-1]))
for item in sorted(savefile.items + savefile.dlc_items, key=lambda w: w.slot or 5):
for filter, filterargs in args.loot_filter:
if not filter(item, *filterargs): break
else:
print("%d: [%d-%d] %s %s" % (item.slot, item.level, item.quality, item.prefix.split(".")[-1], item.title.split(".")[-1]))
# print(", ".join(hex(x) for x in savefile.unknown13))
# print(*savefile.bank_weapons, sep="\n")
print(savefile.bank_block_len, savefile.unknown12, savefile.bank_capacity)
print(savefile.bank_weapons)
assert len(data) == 0
assert encode_dataclass(savefile, Savefile) == data.data
if args.synth is not None:
savefile.name = "PATCHED"
for synth, synthargs in args.synth: synth(savefile, *synthargs)
synthesized = encode_dataclass(savefile, Savefile)
with open(os.path.basename(fn), "wb") as f: f.write(synthesized)
return ""
def main(args):
# TODO: Support the non-GOTY version too?
# TODO: Locate paths case insensitively in case there's differences
# GOTY non-enhanced: /steam/steamapps/compatdata/8980/pfx/drive_c/users/steamuser/My Documents/my games/borderlands/savedata
dir = os.path.expanduser(args.path + "/steam/steamapps/compatdata/729040/pfx/drive_c/users/steamuser/My Documents/My Games/Borderlands Game of the Year/Binaries/SaveData")
for fn in sorted(os.listdir(dir)):
if not fn.endswith(".sav"): continue
print(fn, end="... ")
try: print(parse_savefile(os.path.join(dir, fn)))
except SaveFileFormatError as e: print(e.args[0])
print()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Borderlands 1 save file reader")
parser.add_argument("--path", help="Set path to Steam", default="~/.steam")
# parser.add_argument("--pieces", help="Show the individual pieces inside weapons/items", action="store_true")
# parser.add_argument("--raw", help="Show the raw details of weapons/items (spammy - use loot filters)", action="store_true")
parser.add_argument("--synth", help="Synthesize a modified save file", type=synthesizer, nargs="*")
parser.add_argument("-l", "--loot-filter", help="Show loot, optionally filtered to only what's interesting", type=loot_filter, nargs="*")
# parser.add_argument("-f", "--file", help="Process only one save file")
args = parser.parse_args()
print(args)
main(args)
| mit | 1,392,985,254,175,123,700 | 35.684874 | 172 | 0.70181 | false |
j-be/laundry-dudes | server/laundrydude-server.py | 1 | 5181 | #!flask/bin/python
import datetime
import time
import smtplib
from email.mime.text import MIMEText
import domain
from sqlobject import SQLObjectNotFound
from flask import Flask, request, jsonify, abort
LED_THRESHOLD = 600
data_types = None
app = Flask(__name__)
washer_state = None
def _getTimeOfDay(dt):
return "%02d.%02d, %02d:%02d" % (dt.day, dt.month, dt.hour, dt.minute)
def changeState(new_state):
global washer_state
if new_state == washer_state.value:
return;
washer_state = domain.State(value=new_state)
if new_state == 3:
user = getCurrentUser()
if user is not None:
sendMail(user.email, "[LaundryDude] Washer almost done...",
"Hi %s,\n\nthe machine is spin-drying - your laundry should be "
"done soon.\n\nKind regards,\nyour LaundryDudes" % user.name)
if new_state == 4:
user = getCurrentUser()
if user is not None:
sendMail(user.email, "[LaundryDude] Washer done!",
"Hi %s,\n\nyour laundry is done!\n\nKind regards,\n"
"your LaundryDudes" % user.name)
def sendMail(to,subject,text):
fromAddress = '[email protected]'
msg = MIMEText(text)
msg['Subject'] = subject
msg['From'] = fromAddress
msg['To'] = to
# Send the mail
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
server.login(fromAddress, password)
server.sendmail(fromAddress, to, msg.as_string())
server.quit()
def getNextReservation():
try:
now = time.time()
select_clause = 'start > %s' % now
reservation = domain.Reservation.select(select_clause).orderBy('start').limit(1)
reservation = reservation.getOne()
return reservation
except SQLObjectNotFound:
return None
@app.route('/laundrydude/')
def index():
return app.send_static_file('index.html')
@app.route('/laundrydude/<path:file_name>')
def static_html_proxy(file_name):
return app.send_static_file(file_name)
@app.route('/laundrydude/css/<path:path>')
def static_css_proxy(path):
return app.send_static_file('css/' + path)
@app.route('/laundrydude/js/<path:path>')
def static_js_proxy(path):
return app.send_static_file('js/' + path)
@app.route('/laundrydude/img/<path:path>')
def static_img_proxy(path):
print path
return app.send_static_file('img/' + path)
@app.route('/laundrydude/api/data')
def get_data():
values = {}
for data_type in data_types.keys():
domain_cls = data_types[data_type]
values[data_type] = [(_getTimeOfDay(row.timestamp), row.value)
for row in domain_cls.select()]
return jsonify(values), 200
def getCurrentUser():
try:
rfid_tag = domain.RfidCard.select().orderBy('-id').limit(1).getOne().value
return domain.User.select('rfid == "' + rfid_tag + '"').getOne()
except SQLObjectNotFound:
return None
@app.route('/laundrydude/api/last-data')
def get_last_data():
values = {}
for data_type in ['h', 's', 't']:
domain_cls = data_types[data_type]
last_row = domain_cls.select().orderBy('-id').limit(1).getOne()
values[data_type] = (_getTimeOfDay(last_row.timestamp), last_row.value)
user = getCurrentUser()
if user:
values['u'] = user.name
reservation = getNextReservation()
if reservation:
reservation_info = {}
reservation_info['user'] = reservation.user
reservation_info['start'] = _getTimeOfDay(
datetime.datetime.fromtimestamp(reservation.start))
reservation_info['startTs'] = reservation.start
values['r'] = reservation_info
return jsonify(values), 200
@app.route('/laundrydude/api/blocker')
def get_blocker_state():
return "b=0", 200
@app.route('/laundrydude/api/clear')
def clear_db():
print "Clearing DB..."
for cls in data_types.values():
cls.deleteMany('id=id')
return jsonify({'e': 0}), 200
def handleStateChange(key, value):
if key == "l":
if value > LED_THRESHOLD and washer_state.value == 0:
changeState(1)
if value <= LED_THRESHOLD and washer_state.value == 1:
changeState(2)
if value > LED_THRESHOLD and washer_state.value > 1 and washer_state != 4:
changeState(4)
if value <= LED_THRESHOLD and washer_state.value == 4:
changeState(0)
elif key == "a":
if abs(value) - 1190 > 300 and washer_state.value == 2:
changeState(3)
@app.route('/laundrydude/api/data', methods=['POST'])
def save_data():
global washer_state
data_dict = request.form
if not data_dict:
abort(400)
for data_type in data_dict.keys():
if data_type == "r":
value = data_dict[data_type].strip()
else:
value = float(data_dict[data_type].strip())
data_types[data_type](value=value)
handleStateChange(data_type, value)
return jsonify({"e": 0}), 201
@app.route('/laundrydude/api/reservation', methods=['POST'])
def save_reservation():
start_time = time.mktime(time.strptime(
request.json['start'], "%Y-%m-%dT%H:%M:%S.000Z"))
print domain.Reservation(
user=request.json['title'],
start=start_time + 3600)
return jsonify({"e": 0}), 201
@app.route('/laundrydude/api/reservation', methods=['GET'])
def get_reservations():
return jsonify(domain.sqlresultToDictList(domain.Reservation.select())), 200
if __name__ == '__main__':
data_types = domain.createDb()
washer_state = domain.State(value=0)
sendMail('[email protected]', '[LaundryDudes] Started', '')
app.run(host='0.0.0.0', debug=True)
| mit | 107,147,871,397,105,260 | 25.569231 | 82 | 0.691565 | false |
jut-io/jut-python-tools | jut/api/integrations.py | 1 | 1068 | """
jut integrations api
"""
from jut import defaults
from jut.api import deployments, data_engine
def get_webhook_url(deployment_name,
space='default',
data_source='webhook',
token_manager=None,
app_url=defaults.APP_URL,
**fields):
"""
return the webhook URL for posting webhook data to
"""
import_url = data_engine.get_import_data_url(deployment_name,
app_url=app_url,
token_manager=token_manager)
api_key = deployments.get_apikey(deployment_name,
token_manager=token_manager,
app_url=app_url)
fields_string = '&'.join(['%s=%s' % (key, value)
for (key, value) in fields.items()])
return '%s/api/v1/import/webhook/?space=%s&data_source=%sk&apikey=%s&%s' % \
(import_url, space, data_source, api_key, fields_string)
| mit | 4,401,881,427,402,692,000 | 29.514286 | 80 | 0.4897 | false |
simright/flask-security | flask_security/decorators.py | 1 | 8042 | # -*- coding: utf-8 -*-
"""
flask_security.decorators
~~~~~~~~~~~~~~~~~~~~~~~~~
Flask-Security decorators module
:copyright: (c) 2012 by Matt Wright.
:license: MIT, see LICENSE for more details.
"""
import re
from collections import namedtuple
from functools import wraps
from flask import (abort, current_app, Response, request,
url_for, redirect, _request_ctx_stack)
from flask_login import current_user, login_required # pragma: no flakes
from flask_principal import RoleNeed, Permission, Identity, identity_changed
from werkzeug.local import LocalProxy
from werkzeug.routing import BuildError
from . import utils
# Convenient references
_security = LocalProxy(lambda: current_app.extensions['security'])
_default_unauthorized_html = """
<h1>Unauthorized</h1>
<p>The server could not verify that you are authorized to access the URL
requested. You either supplied the wrong credentials (e.g. a bad password),
or your browser doesn't understand how to supply the credentials required.</p>
"""
BasicAuth = namedtuple('BasicAuth', 'username, password')
def _get_unauthorized_response(text=None, headers=None):
text = text or _default_unauthorized_html
headers = headers or {}
return Response(text, 401, headers)
def _get_unauthorized_view():
view = utils.get_url(utils.config_value('UNAUTHORIZED_VIEW'))
if view:
if callable(view):
view = view()
else:
try:
view = url_for(view)
except BuildError:
view = None
utils.do_flash(*utils.get_message('UNAUTHORIZED'))
return redirect(view or request.referrer or '/')
abort(403)
def _check_token():
header_key = _security.token_authentication_header
args_key = _security.token_authentication_key
header_token = request.headers.get(header_key, None)
token = request.args.get(args_key, header_token)
if request.get_json(silent=True):
if not isinstance(request.json, list):
token = request.json.get(args_key, token)
user = _security.login_manager.token_callback(token)
if user and user.is_authenticated:
app = current_app._get_current_object()
_request_ctx_stack.top.user = user
identity_changed.send(app, identity=Identity(user.id))
return True
return False
def _check_http_auth():
auth = request.authorization or BasicAuth(username=None, password=None)
user = _security.datastore.find_user(email=auth.username)
if user and utils.verify_and_update_password(auth.password, user):
_security.datastore.commit()
app = current_app._get_current_object()
_request_ctx_stack.top.user = user
identity_changed.send(app, identity=Identity(user.id))
return True
return False
def http_auth_required(realm):
"""Decorator that protects endpoints using Basic HTTP authentication.
The username should be set to the user's email address.
:param realm: optional realm name"""
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if _check_http_auth():
return fn(*args, **kwargs)
if _security._unauthorized_callback:
return _security._unauthorized_callback()
else:
r = _security.default_http_auth_realm if callable(realm) else realm
h = {'WWW-Authenticate': 'Basic realm="%s"' % r}
return _get_unauthorized_response(headers=h)
return wrapper
if callable(realm):
return decorator(realm)
return decorator
def auth_token_required(fn):
"""Decorator that protects endpoints using token authentication. The token
should be added to the request by the client by using a query string
variable with a name equal to the configuration value of
`SECURITY_TOKEN_AUTHENTICATION_KEY` or in a request header named that of
the configuration value of `SECURITY_TOKEN_AUTHENTICATION_HEADER`
"""
@wraps(fn)
def decorated(*args, **kwargs):
if _check_token():
return fn(*args, **kwargs)
if _security._unauthorized_callback:
return _security._unauthorized_callback()
else:
return _get_unauthorized_response()
return decorated
def auth_required(*auth_methods):
"""
Decorator that protects enpoints through multiple mechanisms
Example::
@app.route('/dashboard')
@auth_required('token', 'session')
def dashboard():
return 'Dashboard'
:param auth_methods: Specified mechanisms.
"""
login_mechanisms = {
'token': lambda: _check_token(),
'basic': lambda: _check_http_auth(),
'session': lambda: current_user.is_authenticated
}
def wrapper(fn):
@wraps(fn)
def decorated_view(*args, **kwargs):
h = {}
mechanisms = [(method, login_mechanisms.get(method)) for method in auth_methods]
for method, mechanism in mechanisms:
if mechanism and mechanism():
return fn(*args, **kwargs)
elif method == 'basic':
r = _security.default_http_auth_realm
h['WWW-Authenticate'] = 'Basic realm="%s"' % r
if _security._unauthorized_callback:
return _security._unauthorized_callback()
else:
return _get_unauthorized_response(headers=h)
return decorated_view
return wrapper
def roles_required(*roles):
"""Decorator which specifies that a user must have all the specified roles.
Example::
@app.route('/dashboard')
@roles_required('admin', 'editor')
def dashboard():
return 'Dashboard'
The current user must have both the `admin` role and `editor` role in order
to view the page.
:param args: The required roles.
"""
def wrapper(fn):
@wraps(fn)
def decorated_view(*args, **kwargs):
perms = [Permission(RoleNeed(role)) for role in roles]
for perm in perms:
if not perm.can():
if _security._unauthorized_callback:
return _security._unauthorized_callback()
else:
return _get_unauthorized_view()
return fn(*args, **kwargs)
return decorated_view
return wrapper
def roles_accepted(*roles):
"""Decorator which specifies that a user must have at least one of the
specified roles. Example::
@app.route('/create_post')
@roles_accepted('editor', 'author')
def create_post():
return 'Create Post'
The current user must have either the `editor` role or `author` role in
order to view the page.
:param args: The possible roles.
"""
def wrapper(fn):
@wraps(fn)
def decorated_view(*args, **kwargs):
perm = Permission(*[RoleNeed(role) for role in roles])
if perm.can():
return fn(*args, **kwargs)
if _security._unauthorized_callback:
return _security._unauthorized_callback()
else:
return _get_unauthorized_view()
return decorated_view
return wrapper
def anonymous_user_required(f):
@wraps(f)
def wrapper(*args, **kwargs):
if current_user.is_authenticated:
return redirect(utils.get_url(_security.post_login_view))
return f(*args, **kwargs)
return wrapper
def verify_pag_source(func):
"""验证页面来源"""
@wraps(func)
def wrapper(*args, **kwargs):
regx = re.compile(r"https?://www.simright.(com|io)/phone_register")
refer = str(request.referrer)
rest = re.match(regx, refer)
if rest is not None:
return func(*args, **kwargs)
else:
raise Exception("Please request in a legal way")
return wrapper
| mit | 5,692,127,315,478,371,000 | 30.490196 | 92 | 0.615318 | false |
arantes555/oblivious-movie-gharial | config.py | 1 | 1105 | import os
from sys import maxsize
# Minimum relevance (in per cent of the total amount of documents) to accept a classifier
MIN_RELEVANCE = 0.001
# Max amount of reviews to retrieve
MAX_REVIEWS = maxsize
# Max amount of movies to analyze
MOVIES_TO_ANALYZE = 1500
# Movies to classify after the model is trained
MOVIES_TO_CLASSIFY = 100
READ_ALL_THEN_SHUFFLE = True
# Maximum amount of words in the dictionary
MAX_FEATURES = 1000
# Amount of topics to extract, keep it relatively low
N_TOPICS = 20
# Amount of words to display for each topic, doesn't affect anything except printing
N_TOP_WORDS = 15
# Parameter that controls spareness
BETA = 1e+2
AMAZON_REVIEWS_FILE = './resources/Movies_and_TV_5.json'
METADATA_FILE = './resources/meta_Movies_and_TV.json'
LANGUAGE_STOP_WORDS_PATH = './resources/stopwords/english'
PROJECT_STOP_WORDS_PATH = './resources/movies_stopwords'
NLTK_DATA_DIR = os.path.abspath('./resources/nltk_data/')
os.environ['NLTK_DATA'] = NLTK_DATA_DIR
# Amount of parallel jobs the computer can take (core amount x2 with hyper-threading)
N_JOBS = 8
FULL_TOPICS = True
| mit | 660,543,326,210,007,400 | 23.555556 | 89 | 0.751131 | false |
alexoneill/py3status | py3status/modules/github.py | 1 | 9985 | # -*- coding: utf-8 -*-
"""
Display Github notifications and issue/pull requests for a repo.
To check notifications a Github `username` and `personal access token` are
required. You can create a personal access token at
https://github.com/settings/tokens The only `scope` needed is `notifications`,
which provides readonly access to notifications.
The Github API is rate limited so setting `cache_timeout` too small may cause
issues see https://developer.github.com/v3/#rate-limiting for details
Configuration parameters:
auth_token: Github personal access token, needed to check notifications
see above.
(default None)
button_action: Button that when clicked opens the Github notification page
if notifications, else the project page for the repository if there is
one (otherwise the github home page). Setting to `None` disables.
(default 3)
button_refresh: Button that when clicked refreshes module.
Setting to `None` disables.
(default 2)
cache_timeout: How often we refresh this module in seconds
(default 60)
format: Format of output
*(default '{repo} {issues}/{pull_requests}{notifications}'
if username and auth_token provided else
'{repo} {issues}/{pull_requests}')*
format_notifications: Format of `{notification}` status placeholder.
(default ' N{notifications_count}')
notifications: Type of notifications can be `all` for all notifications or
`repo` to only get notifications for the repo specified. If repo is
not provided then all notifications will be checked.
(default 'all')
repo: Github repo to check
(default 'ultrabug/py3status')
url_api: Change only if using Enterprise Github, example https://github.domain.com/api/v3.
(default 'https://api.github.com')
url_base: Change only if using Enterprise Github, example https://github.domain.com.
(default 'https://github.com')
username: Github username, needed to check notifications.
(default None)
Format placeholders:
{issues} Number of open issues.
{notifications} Notifications. If no notifications this will be empty.
{notifications_count} Number of notifications. This is also the __Only__
placeholder available to `format_notifications`.
{pull_requests} Number of open pull requests
{repo} short name of the repository being checked. eg py3status
{repo_full} full name of the repository being checked. eg ultrabug/py3status
Examples:
```
# set github access credentials
github {
auth_token = '40_char_hex_access_token'
username = 'my_username'
}
# just check for any notifications
github {
auth_token = '40_char_hex_access_token'
username = 'my_username'
format = 'Github {notifications_count}'
}
```
@author tobes
SAMPLE OUTPUT
{'full_text': 'py3status 34/24'}
notification
{'full_text': 'py3status 34/24 N3', 'urgent': True}
"""
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
class Py3status:
"""
"""
# available configuration parameters
auth_token = None
button_action = 3
button_refresh = 2
cache_timeout = 60
format = None
format_notifications = ' N{notifications_count}'
notifications = 'all'
repo = 'ultrabug/py3status'
url_api = 'https://api.github.com'
url_base = 'https://github.com'
username = None
def post_config_hook(self):
self.first = True
self.notification_warning = False
self.repo_warning = False
self._issues = '?'
self._pulls = '?'
self._notify = '?'
# remove a trailing slash in the urls
self.url_api = self.url_api.strip('/')
self.url_base = self.url_base.strip('/')
def _init(self):
# Set format if user has not configured it.
if not self.format:
if self.username and self.auth_token:
# include notifications
self.format = '{repo} {issues}/{pull_requests}{notifications}'
else:
self.format = '{repo} {issues}/{pull_requests}'
def _github_count(self, url):
"""
Get counts for requests that return 'total_count' in the json response.
"""
if self.first:
return '?'
url = self.url_api + url + '&per_page=1'
# if we have authentication details use them as we get better
# rate-limiting.
if self.username and self.auth_token:
auth = (self.username, self.auth_token)
else:
auth = None
try:
info = self.py3.request(url, timeout=10, auth=auth)
except (self.py3.RequestException):
return
if info and info.status_code == 200:
return(int(info.json()['total_count']))
if info.status_code == 422:
if not self.repo_warning:
self.py3.notify_user('Github repo cannot be found.')
self.repo_warning = True
return '?'
def _notifications(self):
"""
Get the number of unread notifications.
"""
if not self.username or not self.auth_token:
if not self.notification_warning:
self.py3.notify_user('Github module needs username and '
'auth_token to check notifications.')
self.notification_warning = True
return '?'
if self.first:
return '?'
if self.notifications == 'all' or not self.repo:
url = self.url_api + '/notifications'
else:
url = self.url_api + '/repos/' + self.repo + '/notifications'
url += '?per_page=100'
try:
info = self.py3.request(url, timeout=10,
auth=(self.username, self.auth_token))
except (self.py3.RequestException):
return
if info.status_code == 200:
links = info.headers.get('Link')
if not links:
return len(info.json())
last_page = 1
for link in links.split(','):
if 'rel="last"' in link:
last_url = link[link.find('<') + 1:link.find('>')]
parsed = urlparse.urlparse(last_url)
last_page = int(urlparse.parse_qs(parsed.query)['page'][0])
if last_page == 1:
return len(info.json())
try:
last_page_info = self.py3.request(last_url, timeout=10,
auth=(self.username, self.auth_token))
except self.py3.RequestException:
return
return len(info.json()) * (last_page - 1) + len(last_page_info.json())
if info.status_code == 404:
if not self.repo_warning:
self.py3.notify_user('Github repo cannot be found.')
self.repo_warning = True
def github(self):
if self.first:
self._init()
status = {}
urgent = False
# issues
if self.repo and self.py3.format_contains(self.format, 'issues'):
url = '/search/issues?q=state:open+type:issue+repo:' + self.repo
self._issues = self._github_count(url) or self._issues
status['issues'] = self._issues
# pull requests
if self.repo and self.py3.format_contains(self.format, 'pull_requests'):
url = '/search/issues?q=state:open+type:pr+repo:' + self.repo
self._pulls = self._github_count(url) or self._pulls
status['pull_requests'] = self._pulls
# notifications
if self.py3.format_contains(self.format, 'notifications*'):
count = self._notifications()
# if we don't have a notification count, then use the last value
# that we did have.
if count is None:
count = self._notify
self._notify = count
if count and count != '?':
notify = self.py3.safe_format(
self.format_notifications,
{'notifications_count': count})
urgent = True
else:
notify = ''
status['notifications'] = notify
status['notifications_count'] = count
# repo
try:
status['repo'] = self.repo.split('/')[1]
except IndexError:
status['repo'] = 'Error'
status['repo_full'] = self.repo
if self.first:
cached_until = 0
self.first = False
else:
cached_until = self.py3.time_in(self.cache_timeout)
return {
'full_text': self.py3.safe_format(self.format, status),
'cached_until': cached_until,
'urgent': urgent
}
def on_click(self, event):
button = event['button']
if button == self.button_action:
# open github in browser
if self._notify and self._notify != '?':
# open github notifications page
url = self.url_base + '/notifications'
else:
if self.notifications == 'all' and not self.repo:
# open github.com if there are no unread notifications and no repo
url = self.url_base
else:
# open repo page if there are no unread notifications
url = self.url_base + '/' + self.repo
# open url in default browser
self.py3.command_run('xdg-open {}'.format(url))
self.py3.prevent_refresh()
elif button != self.button_refresh:
# only refresh the module if needed
self.py3.prevent_refresh()
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| bsd-3-clause | 7,620,734,784,100,960,000 | 35.441606 | 94 | 0.576565 | false |
tartley/flyinghigh-opengl-from-python | flyinghigh/component/camera.py | 1 | 1205 | from __future__ import division
from math import sin, cos
from .. import gl, glu
class CameraBase(object):
def __init__(self):
self.item = None
def reset(self):
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
class Camera(CameraBase):
def __init__(self):
super(Camera, self).__init__()
def look_at(self, lookat):
'''
lookat is a tuple (x, y, z), towards which the camera should point
'''
position = self.item.position
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
glu.gluLookAt(
position.x, position.y, position.z,
lookat.x, lookat.y, lookat.z,
0, 1, 0)
class CameraOrtho(CameraBase):
def __init__(self):
super(CameraOrtho, self).__init__()
self.angle = 0.0
def look_at(self, lookat):
'''
lookat is a tuple (x, y), towards which the camera should point
'''
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
glu.gluLookAt(
self.position.x, self.position.y, +1.0,
lookat.x, lookat.y, -1.0,
sin(self.angle), cos(self.angle), 0.0)
| bsd-3-clause | 3,079,669,999,636,923,400 | 22.173077 | 74 | 0.556846 | false |
slachiewicz/teryt2osm | teryt2osm/reporting.py | 1 | 8621 | # vi: encoding=utf-8
# teryt2osm - tool to merge TERYT data with OSM maps
# Copyright (C) 2009 Jacek Konieczny <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
Status and error reporting facilities.
"""
__version__ = "$Revision$"
import sys
import os
import codecs
import xml.etree.cElementTree as ElementTree
class Error(Exception):
pass
class ProgressError(Error):
pass
class ChannelError(Error):
pass
class Channel(object):
def __init__(self, name, location = None):
"""Create channel. `name` is base channel name, `location`
is a location data (list of one to three of [wojewodztwo, powiat,
gmina])"""
self.name = name
if location is None:
location = []
self.location = location
self.level = len(location)
if self.level > 3:
raise ChannelError, "Channel too deep. Maximum depth level is 3"
if self.level:
if "." in location or ".." in location:
raise ValueError, "Forbidden entries in channel location!"
location = [ l.replace("/", "_").replace("\\", "_") for l in location ]
self.directory = os.path.join("reports", os.path.join(*location))
else:
self.directory = "reports"
if not os.path.exists(self.directory):
os.makedirs(self.directory)
if name in (".", ".."):
raise ValueError, "Forbidden channel name!"
name = name.replace("/", "_").replace("\\", "_")
self.log_file = codecs.open( os.path.join(self.directory, name + ".txt"), "w", "utf-8" )
self.counter = 0
self.quiet = False
self.split_level = 0
self.subchannels = {}
self.map_file = None
def __del__(self):
self.close()
def close(self):
if self.log_file:
self.log_file.close()
self.log_file = None
if self.map_file:
self.close_map_file()
def close_map_file(self):
self.map_file.write("</osm>\n")
self.map_file.close()
self.map_file = None
def set_mapping(self, value):
if not self.map_file:
if value:
self.map_file = file( os.path.join(self.directory,
self.name + ".osm"), "w" )
self.map_file.write('<osm generator="teryt2osm" version="0.6">\n')
elif not value:
self.close_map_file()
for subch in self.subchannels.values():
subch.mapping = value
def get_mapping(self):
if self.map_file:
return True
else:
return False
mapping = property(get_mapping, set_mapping)
def emit(self, msg, location):
self.log_file.write(u"%s\n" % (msg,))
if self.map_file and isinstance(location, OSM_Place):
self.map_file.write(
ElementTree.tostring(location.element, "utf-8"))
if not self.split_level:
return
try:
if self.level == 0:
loc_obj = location.wojewodztwo
elif self.level == 1:
loc_obj = location.powiat
elif self.level == 2:
loc_obj = location.gmina
except AttributeError, KeyError:
loc_obj = None
if loc_obj:
loc_name = loc_obj.name
split_level = self.split_level - 1
else:
loc_name = u"_brak"
split_level = 0
if loc_name in self.subchannels:
subchannel = self.subchannels[loc_name]
else:
subchannel = Channel(self.name, self.location + [loc_name])
if self.mapping:
subchannel.mapping = True
self.subchannels[loc_name] = subchannel
subchannel.split_level = split_level
subchannel.emit(msg, location)
def __repr__(self):
return "<Channel %i %r quiet=%r>" % (id(self), self.name, self.quiet)
class Reporting(object):
instance = None
def _init(self, logging = True):
global OSM_Place
from teryt2osm.osm_places import OSM_Place
self.logging = logging
self.progress_total = None
self.progress_step = None
self.progress_value = None
self.need_eol = False
self.channels = {}
if not os.path.exists("reports"):
os.mkdir("reports")
self.log_file = codecs.open( os.path.join("reports", "log.txt"), "w", "utf-8" )
def __del__(self):
self.close()
def close(self):
if self.need_eol:
print >>sys.stderr
if self.log_file:
self.log_file.close()
if Reporting.instance is self:
Reporting.instance = None
for channel in self.channels.values():
channel.close()
self.channels = {}
def __new__(cls, logging = True):
if cls.instance is None:
cls.instance = object.__new__(cls)
cls.instance._init(logging = logging)
return cls.instance
def get_channel(self, name):
if name in self.channels:
return self.channels[name]
channel = Channel(name)
self.channels[name] = channel
return channel
def config_channel(self, name, quiet = None, mapping = None, split_level = None):
channel = self.get_channel(name)
if quiet is not None:
channel.quiet = quiet
if mapping is not None:
channel.mapping = mapping
if split_level is not None:
channel.split_level = split_level
def log(self, msg):
if self.logging:
print >> self.log_file, msg
def print_msg(self, msg):
if self.need_eol:
print >>sys.stderr, u"\n%s" % (msg,)
self.need_eol = False
else:
print >>sys.stderr, msg
def output_msg(self, channel_name, msg, location = None):
"""Output a single message via channel 'channel'."""
channel = self.get_channel(channel_name)
if not channel.quiet:
self.print_msg(msg)
self.log(msg)
if self.logging:
channel.emit(msg, location)
def progress_start(self, msg, total, step = 1):
"""Start progrss reporting.
:Parameters:
- `total`: total number of progrss point
- `step`: percentage step when progress counter should be updated
"""
if self.progress_total:
raise ProgressError, u"Progress reporting already started."
self.progress_total = total
self.progress_step = max(int(total * step / 100), 1)
self.progress_value = 0
self.progress_msg = msg
self.log(u"%s… rozpoczęte" % (msg,))
sys.stderr.write(u"\r%s… " % (msg,))
sys.stderr.flush()
self.need_eol = True
def progress(self, increment = None, value = None):
if self.progress_total is None:
raise ProgressError, u"Progress reporting not started."
if not self.progress_total:
return
if increment is not None:
self.progress_value += increment
elif value is not None:
self.progress_value = value
else:
self.progress_value += 1
if self.progress_value % self.progress_step:
return
sys.stderr.write(u"\r%s… %2i%% " % (self.progress_msg,
self.progress_value * 100 / self.progress_total))
sys.stderr.flush()
self.need_eol = True
def progress_stop(self):
if self.progress_total is None:
raise ProgressError, u"Progress reporting not started."
print >>sys.stderr, "\r%s 100%% " % (self.progress_msg,)
self.progress_total = None
self.progress_step = None
self.progress_value = None
self.need_eol = False
self.log(u"%s… zakończone" % self.progress_msg)
| gpl-2.0 | -6,645,002,260,093,666,000 | 33.035573 | 96 | 0.569272 | false |
openstack/vitrage | vitrage/tests/unit/datasources/aodh/test_aodh_driver.py | 1 | 35245 | # Copyright 2016 - ZTE, Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from vitrage.common.constants import DatasourceOpts as DSOpts
from vitrage.common.constants import DatasourceProperties as DSProps
from vitrage.common.constants import UpdateMethod
from vitrage.datasources.aodh import AODH_DATASOURCE
from vitrage.datasources.aodh.properties import AodhEventType
from vitrage.datasources.aodh.properties import AodhExtendedAlarmType as AType
from vitrage.datasources.aodh.properties import AodhProperties as AodhProps
from vitrage.tests import base
from vitrage.tests.mocks import mock_driver
from vitrage.tests.unit.datasources.aodh.mock_driver import MockAodhDriver
class AodhDriverTest(base.BaseTest):
OPTS = [
cfg.StrOpt(DSOpts.UPDATE_METHOD,
default=UpdateMethod.PUSH),
]
# noinspection PyPep8Naming
@classmethod
def setUpClass(cls):
super(AodhDriverTest, cls).setUpClass()
cls.conf = cfg.ConfigOpts()
cls.conf.register_opts(cls.OPTS, group=AODH_DATASOURCE)
def test_event_alarm_notifications(self):
aodh_driver = MockAodhDriver()
# 1. alarm creation with 'ok' state
# prepare data
detail_data = {"type": "creation",
AodhProps.DETAIL: self._create_alarm_data_type_event()}
generators = \
mock_driver.simple_aodh_alarm_notification_generators(
alarm_num=1,
update_events=1,
update_vals=detail_data)
alarm = mock_driver.generate_sequential_events_list(generators)[0]
alarm_info = alarm.copy()
# action
entity = aodh_driver.enrich_event(alarm, AodhEventType.CREATION)
# Test assertions
# alarm with status OK should not be handled
self.assertIsNone(entity)
# 2.alarm state transition from 'ok' to 'alarm'
detail_data = {"type": "state transition",
AodhProps.DETAIL: {AodhProps.STATE: "alarm"}}
alarm.update(detail_data)
entity = aodh_driver.enrich_event(alarm,
AodhEventType.STATE_TRANSITION)
# Test assertions
# alarm state change: ok->alarm, need to be added
self.assertIsNotNone(entity)
self._validate_aodh_entity_comm_props(entity, alarm_info)
self.assertEqual(entity[AodhProps.STATE],
alarm[AodhProps.DETAIL][AodhProps.STATE])
self.assertEqual(entity[AodhProps.SEVERITY],
alarm[AodhProps.SEVERITY])
self.assertEqual(entity[DSProps.EVENT_TYPE],
AodhEventType.STATE_TRANSITION)
# 3. delete alarm which is 'alarm' state
# prepare data
detail_data = {"type": "deletion"}
alarm.update(detail_data)
# action
entity = aodh_driver.enrich_event(alarm, AodhEventType.DELETION)
# Test assertions
self.assertIsNotNone(entity)
self._validate_aodh_entity_comm_props(entity, alarm_info)
self.assertEqual(entity[DSProps.EVENT_TYPE],
AodhEventType.DELETION)
# 4. alarm creation with 'alarm' state
# prepare data
detail_data = {"type": "creation",
AodhProps.DETAIL:
self._create_alarm_data_type_event(state="alarm")}
generators = \
mock_driver.simple_aodh_alarm_notification_generators(
alarm_num=1,
update_events=1,
update_vals=detail_data)
alarm = mock_driver.generate_sequential_events_list(generators)[0]
alarm_info = alarm.copy()
# action
entity = aodh_driver.enrich_event(alarm, AodhEventType.CREATION)
# Test assertions
# alarm with status 'alarm' need to be added
self.assertIsNotNone(entity)
self._validate_aodh_entity_comm_props(entity, alarm_info)
self.assertEqual(entity[AodhProps.STATE],
alarm[AodhProps.DETAIL][AodhProps.STATE])
self.assertEqual(entity[AodhProps.SEVERITY],
alarm[AodhProps.SEVERITY])
self.assertIsNone(entity[AodhProps.RESOURCE_ID])
self.assertEqual("*", entity[AodhProps.EVENT_TYPE])
self.assertEqual(entity[DSProps.EVENT_TYPE],
AodhEventType.CREATION)
# 5. alarm rule change
# prepare data
detail_data = {"type": "rule change",
AodhProps.DETAIL: {
"severity": "critical",
AodhProps.RULE:
{"query": [{"field": "traits.resource_id",
"type": "",
"value": "1",
"op": "eq"}],
"event_type": "instance.update"}}}
alarm.update(detail_data)
# action
entity = aodh_driver.enrich_event(alarm,
AodhEventType.RULE_CHANGE)
# Test assertions
# alarm rule change: need to be update
self.assertIsNotNone(entity)
self._validate_aodh_entity_comm_props(entity, alarm_info)
self.assertEqual(entity[AodhProps.SEVERITY],
alarm[AodhProps.DETAIL][AodhProps.SEVERITY])
self.assertEqual(
entity[AodhProps.EVENT_TYPE],
alarm[AodhProps.DETAIL][AodhProps.RULE][AodhProps.EVENT_TYPE])
self.assertEqual("1", entity[AodhProps.RESOURCE_ID])
self.assertEqual(entity[DSProps.EVENT_TYPE],
AodhEventType.RULE_CHANGE)
# 6. alarm state change from 'alarm' to 'ok'
# prepare data
detail_data = {"type": "state transition",
AodhProps.DETAIL: {AodhProps.STATE: "ok"}}
alarm.update(detail_data)
# action
entity = aodh_driver.enrich_event(alarm,
AodhEventType.STATE_TRANSITION)
# Test assertions
# alarm state change: alarm->OK, need to be deleted
self.assertIsNotNone(entity)
self._validate_aodh_entity_comm_props(entity, alarm_info)
self.assertEqual(entity[DSProps.EVENT_TYPE],
AodhEventType.STATE_TRANSITION)
# 7. delete alarm which is 'ok' state
# prepare data
detail_data = {"type": "deletion"}
alarm.update(detail_data)
# action
entity = aodh_driver.enrich_event(alarm, AodhEventType.DELETION)
# Test assertions
self.assertIsNone(entity)
def test_gnocchi_threshold_alarm_notifications(self):
aodh_driver = MockAodhDriver()
# 1. alarm creation with 'ok' state
# prepare data
detail_data = {"type": "gnocchi_resources_threshold",
AodhProps.DETAIL: self._create_alarm_data_gnocchi()}
generators = \
mock_driver.simple_aodh_alarm_notification_generators(
alarm_num=1,
update_events=1,
update_vals=detail_data)
alarm = mock_driver.generate_sequential_events_list(generators)[0]
alarm_info = alarm.copy()
# action
entity = aodh_driver.enrich_event(alarm, AodhEventType.CREATION)
# Test assertions
# alarm with status OK should not be handled
self.assertIsNone(entity)
# 2.alarm state transition from 'ok' to 'alarm'
detail_data = {"type": "state transition",
AodhProps.DETAIL: {AodhProps.STATE: "alarm"}}
alarm.update(detail_data)
entity = aodh_driver.enrich_event(alarm,
AodhEventType.STATE_TRANSITION)
# Test assertions
# alarm state change: ok->alarm, need to be added
self.assertIsNotNone(entity)
self._validate_aodh_entity_comm_props(entity, alarm_info)
self.assertEqual(entity[AodhProps.STATE],
alarm[AodhProps.DETAIL][AodhProps.STATE])
self.assertEqual(entity[AodhProps.SEVERITY],
alarm[AodhProps.SEVERITY])
# 3. delete alarm which is 'alarm' state
# prepare data
detail_data = {"type": "deletion"}
alarm.update(detail_data)
# action
entity = aodh_driver.enrich_event(alarm, AodhEventType.DELETION)
# Test assertions
self.assertIsNotNone(entity)
self._validate_aodh_entity_comm_props(entity, alarm_info)
self.assertEqual(entity[DSProps.EVENT_TYPE],
AodhEventType.DELETION)
# 4. alarm creation with 'alarm' state
# prepare data
detail_data = {"type": "gnocchi_resources_threshold",
AodhProps.DETAIL:
self._create_alarm_data_gnocchi(state="alarm")}
generators = \
mock_driver.simple_aodh_alarm_notification_generators(
alarm_num=1,
update_events=1,
update_vals=detail_data)
alarm = mock_driver.generate_sequential_events_list(generators)[0]
alarm_info = alarm.copy()
# action
entity = aodh_driver.enrich_event(alarm, AodhEventType.CREATION)
# Test assertions
# alarm with status 'alarm' need to be added
self.assertIsNotNone(entity)
self._validate_aodh_entity_comm_props(entity, alarm_info)
self.assertEqual(entity[AodhProps.STATE],
alarm[AodhProps.DETAIL][AodhProps.STATE])
self.assertEqual(entity[AodhProps.SEVERITY],
alarm[AodhProps.SEVERITY])
self.assertEqual(entity[DSProps.EVENT_TYPE],
AodhEventType.CREATION)
# 5. alarm rule change
# prepare data
detail_data = {"type": "rule change",
AodhProps.DETAIL: {
"severity": "critical",
AodhProps.RULE:
{"granularity": "300",
"threshold": "0.0123",
"comparison_operator": "eq"}}}
alarm.update(detail_data)
# action
entity = aodh_driver.enrich_event(alarm,
AodhEventType.RULE_CHANGE)
# Test assertions
# alarm rule change: need to be update
self.assertIsNotNone(entity)
self._validate_aodh_entity_comm_props(entity, alarm_info)
self.assertEqual(entity[AodhProps.SEVERITY],
alarm[AodhProps.DETAIL][AodhProps.SEVERITY])
self.assertEqual(entity[DSProps.EVENT_TYPE],
AodhEventType.RULE_CHANGE)
# 6. alarm state change from 'alarm' to 'ok'
# prepare data
detail_data = {"type": "state transition",
AodhProps.DETAIL: {AodhProps.STATE: "ok"}}
alarm.update(detail_data)
# action
entity = aodh_driver.enrich_event(alarm,
AodhEventType.STATE_TRANSITION)
# Test assertions
# alarm state change: alarm->OK, need to be deleted
self.assertIsNotNone(entity)
self._validate_aodh_entity_comm_props(entity, alarm_info)
self.assertEqual(entity[DSProps.EVENT_TYPE],
AodhEventType.STATE_TRANSITION)
# 7. delete alarm which is 'ok' state
# prepare data
detail_data = {"type": "deletion"}
alarm.update(detail_data)
# action
entity = aodh_driver.enrich_event(alarm, AodhEventType.DELETION)
# Test assertions
self.assertIsNone(entity)
def test_gnocchi_aggregation_by_metrics_alarm_notifications(self):
aodh_driver = MockAodhDriver()
# 1. alarm creation with 'ok' state
# prepare data
detail_data = {
"type": AType.GNOCCHI_AGGREGATION_BY_METRICS_THRESHOLD,
AodhProps.DETAIL: self._create_alarm_data_metrics()
}
generators = \
mock_driver.simple_aodh_alarm_notification_generators(
alarm_num=1,
update_events=1,
update_vals=detail_data)
alarm = mock_driver.generate_sequential_events_list(generators)[0]
alarm_info = alarm.copy()
# action
entity = aodh_driver.enrich_event(alarm, AodhEventType.CREATION)
# Test assertions
# alarm with status OK should not be handled
self.assertIsNone(entity)
# 2.alarm state transition from 'ok' to 'alarm'
detail_data = {"type": "state transition",
AodhProps.DETAIL: {AodhProps.STATE: "alarm"}}
alarm.update(detail_data)
entity = aodh_driver.enrich_event(alarm,
AodhEventType.STATE_TRANSITION)
# Test assertions
# alarm state change: ok->alarm, need to be added
self.assertIsNotNone(entity)
self._validate_aodh_entity_comm_props(entity, alarm_info)
self.assertEqual(entity[AodhProps.STATE],
alarm[AodhProps.DETAIL][AodhProps.STATE])
self.assertEqual(entity[AodhProps.SEVERITY],
alarm[AodhProps.SEVERITY])
# 3. delete alarm which is 'alarm' state
# prepare data
detail_data = {"type": "deletion"}
alarm.update(detail_data)
# action
entity = aodh_driver.enrich_event(alarm, AodhEventType.DELETION)
# Test assertions
self.assertIsNotNone(entity)
self._validate_aodh_entity_comm_props(entity, alarm_info)
self.assertEqual(entity[DSProps.EVENT_TYPE],
AodhEventType.DELETION)
# 4. alarm creation with 'alarm' state
# prepare data
detail_data = {
"type": AType.GNOCCHI_AGGREGATION_BY_METRICS_THRESHOLD,
AodhProps.DETAIL: self._create_alarm_data_metrics(state="alarm")
}
generators = \
mock_driver.simple_aodh_alarm_notification_generators(
alarm_num=1,
update_events=1,
update_vals=detail_data)
alarm = mock_driver.generate_sequential_events_list(generators)[0]
alarm_info = alarm.copy()
# action
entity = aodh_driver.enrich_event(alarm, AodhEventType.CREATION)
# Test assertions
# alarm with status 'alarm' need to be added
self.assertIsNotNone(entity)
self._validate_aodh_entity_comm_props(entity, alarm_info)
self.assertEqual(entity[AodhProps.STATE],
alarm[AodhProps.DETAIL][AodhProps.STATE])
self.assertEqual(entity[AodhProps.SEVERITY],
alarm[AodhProps.SEVERITY])
self.assertEqual(entity[DSProps.EVENT_TYPE],
AodhEventType.CREATION)
# 5. alarm rule change
# prepare data
detail_data = {"type": "rule change",
AodhProps.DETAIL: {
"severity": "critical",
AodhProps.RULE:
{"granularity": "300",
"threshold": "0.0123",
"comparison_operator": "eq"}}}
alarm.update(detail_data)
# action
entity = aodh_driver.enrich_event(alarm,
AodhEventType.RULE_CHANGE)
# Test assertions
# alarm rule change: need to be update
self.assertIsNotNone(entity)
self._validate_aodh_entity_comm_props(entity, alarm_info)
self.assertEqual(entity[AodhProps.SEVERITY],
alarm[AodhProps.DETAIL][AodhProps.SEVERITY])
self.assertEqual(entity[DSProps.EVENT_TYPE],
AodhEventType.RULE_CHANGE)
# 6. alarm state change from 'alarm' to 'ok'
# prepare data
detail_data = {"type": "state transition",
AodhProps.DETAIL: {AodhProps.STATE: "ok"}}
alarm.update(detail_data)
# action
entity = aodh_driver.enrich_event(alarm,
AodhEventType.STATE_TRANSITION)
# Test assertions
# alarm state change: alarm->OK, need to be deleted
self.assertIsNotNone(entity)
self._validate_aodh_entity_comm_props(entity, alarm_info)
self.assertEqual(entity[DSProps.EVENT_TYPE],
AodhEventType.STATE_TRANSITION)
# 7. delete alarm which is 'ok' state
# prepare data
detail_data = {"type": "deletion"}
alarm.update(detail_data)
# action
entity = aodh_driver.enrich_event(alarm, AodhEventType.DELETION)
# Test assertions
self.assertIsNone(entity)
def test_gnocchi_aggregation_by_resource_alarm_notifications(self):
aodh_driver = MockAodhDriver()
# 1. alarm creation with 'ok' state
# prepare data
detail_data = {
"type": AType.GNOCCHI_AGGREGATION_BY_RESOURCES_THRESHOLD,
AodhProps.DETAIL: self._create_alarm_data_resource()
}
generators = \
mock_driver.simple_aodh_alarm_notification_generators(
alarm_num=1,
update_events=1,
update_vals=detail_data)
alarm = mock_driver.generate_sequential_events_list(generators)[0]
alarm_info = alarm.copy()
# action
entity = aodh_driver.enrich_event(alarm, AodhEventType.CREATION)
# Test assertions
# alarm with status OK should not be handled
self.assertIsNone(entity)
# 2.alarm state transition from 'ok' to 'alarm'
detail_data = {"type": "state transition",
AodhProps.DETAIL: {AodhProps.STATE: "alarm"}}
alarm.update(detail_data)
entity = aodh_driver.enrich_event(alarm,
AodhEventType.STATE_TRANSITION)
# Test assertions
# alarm state change: ok->alarm, need to be added
self.assertIsNotNone(entity)
self._validate_aodh_entity_comm_props(entity, alarm_info)
self.assertEqual(entity[AodhProps.STATE],
alarm[AodhProps.DETAIL][AodhProps.STATE])
self.assertEqual(entity[AodhProps.SEVERITY],
alarm[AodhProps.SEVERITY])
# 3. delete alarm which is 'alarm' state
# prepare data
detail_data = {"type": "deletion"}
alarm.update(detail_data)
# action
entity = aodh_driver.enrich_event(alarm, AodhEventType.DELETION)
# Test assertions
self.assertIsNotNone(entity)
self._validate_aodh_entity_comm_props(entity, alarm_info)
self.assertEqual(entity[DSProps.EVENT_TYPE],
AodhEventType.DELETION)
# 4. alarm creation with 'alarm' state
# prepare data
detail_data = {
"type": AType.GNOCCHI_AGGREGATION_BY_RESOURCES_THRESHOLD,
AodhProps.DETAIL:
self._create_alarm_data_gnocchi(state="alarm")
}
generators = \
mock_driver.simple_aodh_alarm_notification_generators(
alarm_num=1,
update_events=1,
update_vals=detail_data)
alarm = mock_driver.generate_sequential_events_list(generators)[0]
alarm_info = alarm.copy()
# action
entity = aodh_driver.enrich_event(alarm, AodhEventType.CREATION)
# Test assertions
# alarm with status 'alarm' need to be added
self.assertIsNotNone(entity)
self._validate_aodh_entity_comm_props(entity, alarm_info)
self.assertEqual(entity[AodhProps.STATE],
alarm[AodhProps.DETAIL][AodhProps.STATE])
self.assertEqual(entity[AodhProps.SEVERITY],
alarm[AodhProps.SEVERITY])
self.assertEqual(entity[DSProps.EVENT_TYPE],
AodhEventType.CREATION)
# 5. alarm rule change
# prepare data
detail_data = {"type": "rule change",
AodhProps.DETAIL: {
"severity": "critical",
AodhProps.RULE:
{"granularity": "300",
"threshold": "0.0123",
"comparison_operator": "eq"}}}
alarm.update(detail_data)
# action
entity = aodh_driver.enrich_event(alarm,
AodhEventType.RULE_CHANGE)
# Test assertions
# alarm rule change: need to be update
self.assertIsNotNone(entity)
self._validate_aodh_entity_comm_props(entity, alarm_info)
self.assertEqual(entity[AodhProps.SEVERITY],
alarm[AodhProps.DETAIL][AodhProps.SEVERITY])
self.assertEqual(entity[DSProps.EVENT_TYPE],
AodhEventType.RULE_CHANGE)
# 6. alarm state change from 'alarm' to 'ok'
# prepare data
detail_data = {"type": "state transition",
AodhProps.DETAIL: {AodhProps.STATE: "ok"}}
alarm.update(detail_data)
# action
entity = aodh_driver.enrich_event(alarm,
AodhEventType.STATE_TRANSITION)
# Test assertions
# alarm state change: alarm->OK, need to be deleted
self.assertIsNotNone(entity)
self._validate_aodh_entity_comm_props(entity, alarm_info)
self.assertEqual(entity[DSProps.EVENT_TYPE],
AodhEventType.STATE_TRANSITION)
# 7. delete alarm which is 'ok' state
# prepare data
detail_data = {"type": "deletion"}
alarm.update(detail_data)
# action
entity = aodh_driver.enrich_event(alarm, AodhEventType.DELETION)
# Test assertions
self.assertIsNone(entity)
def test_composite_alarm_notifications(self):
aodh_driver = MockAodhDriver()
# 1. alarm creation with 'ok' state
# prepare data
detail_data = {"type": "composite",
AodhProps.DETAIL: self._create_alarm_data_composite()}
generators = \
mock_driver.simple_aodh_alarm_notification_generators(
alarm_num=1,
update_events=1,
update_vals=detail_data)
alarm = mock_driver.generate_sequential_events_list(generators)[0]
alarm_info = alarm.copy()
# action
entity = aodh_driver.enrich_event(alarm, AodhEventType.CREATION)
# Test assertions
# alarm with status OK should not be handled
self.assertIsNone(entity)
# 2.alarm state transition from 'ok' to 'alarm'
detail_data = {"type": "state transition",
AodhProps.DETAIL: {AodhProps.STATE: "alarm"}}
alarm.update(detail_data)
entity = aodh_driver.enrich_event(alarm,
AodhEventType.STATE_TRANSITION)
# Test assertions
# alarm state change: ok->alarm, need to be added
self.assertIsNotNone(entity)
self._validate_aodh_entity_comm_props(entity, alarm_info)
self.assertEqual(entity[AodhProps.STATE],
alarm[AodhProps.DETAIL][AodhProps.STATE])
self.assertEqual(entity[AodhProps.SEVERITY],
alarm[AodhProps.SEVERITY])
# 3. delete alarm which is 'alarm' state
# prepare data
detail_data = {"type": "deletion"}
alarm.update(detail_data)
# action
entity = aodh_driver.enrich_event(alarm, AodhEventType.DELETION)
# Test assertions
self.assertIsNotNone(entity)
self._validate_aodh_entity_comm_props(entity, alarm_info)
self.assertEqual(entity[DSProps.EVENT_TYPE],
AodhEventType.DELETION)
# 4. alarm creation with 'alarm' state
# prepare data
detail_data = {"type": "composite",
AodhProps.DETAIL:
self._create_alarm_data_composite(state="alarm")}
generators = \
mock_driver.simple_aodh_alarm_notification_generators(
alarm_num=1,
update_events=1,
update_vals=detail_data)
alarm = mock_driver.generate_sequential_events_list(generators)[0]
alarm_info = alarm.copy()
# action
entity = aodh_driver.enrich_event(alarm, AodhEventType.CREATION)
# Test assertions
# alarm with status 'alarm' need to be added
self.assertIsNotNone(entity)
self._validate_aodh_entity_comm_props(entity, alarm_info)
self.assertEqual(entity[AodhProps.STATE],
alarm[AodhProps.DETAIL][AodhProps.STATE])
self.assertEqual(entity[AodhProps.SEVERITY],
alarm[AodhProps.SEVERITY])
self.assertEqual(entity[DSProps.EVENT_TYPE],
AodhEventType.CREATION)
# 5. alarm rule change
# prepare data
detail_data = {"type": "rule change",
AodhProps.DETAIL: {
"severity": "critical",
AodhProps.RULE:
{"granularity": "300",
"threshold": "0.0123",
"comparison_operator": "eq"}}}
alarm.update(detail_data)
# action
entity = aodh_driver.enrich_event(alarm,
AodhEventType.RULE_CHANGE)
# Test assertions
# alarm rule change: need to be update
self.assertIsNotNone(entity)
self._validate_aodh_entity_comm_props(entity, alarm_info)
self.assertEqual(entity[AodhProps.SEVERITY],
alarm[AodhProps.DETAIL][AodhProps.SEVERITY])
self.assertEqual(entity[DSProps.EVENT_TYPE],
AodhEventType.RULE_CHANGE)
# 6. alarm state change from 'alarm' to 'ok'
# prepare data
detail_data = {"type": "state transition",
AodhProps.DETAIL: {AodhProps.STATE: "ok"}}
alarm.update(detail_data)
# action
entity = aodh_driver.enrich_event(alarm,
AodhEventType.STATE_TRANSITION)
# Test assertions
# alarm state change: alarm->OK, need to be deleted
self.assertIsNotNone(entity)
self._validate_aodh_entity_comm_props(entity, alarm_info)
self.assertEqual(entity[DSProps.EVENT_TYPE],
AodhEventType.STATE_TRANSITION)
# 7. delete alarm which is 'ok' state
# prepare data
detail_data = {"type": "deletion"}
alarm.update(detail_data)
# action
entity = aodh_driver.enrich_event(alarm, AodhEventType.DELETION)
# Test assertions
self.assertIsNone(entity)
def _create_alarm_data_composite(self,
state='ok',
type='composite',
rule=None):
if rule is None:
rule = {"or":
[{"evaluation_periods": 1,
"metrics": ["6ade05e5-f98b-4b7d-a0b3-9d330c4c3c41"],
"aggregation_method": "mean",
"granularity": 60,
"threshold": 100.0,
"type": "gnocchi_aggregation_by_metrics_threshold",
"comparison_operator": "lt"},
{"evaluation_periods": 3,
"metrics": ["89vde0e5-k3rb-4b7d-a0b3-9d330c4c3c41"],
"aggregation_method": "mean",
"granularity": 2,
"threshold": 80.0,
"type": "gnocchi_aggregation_by_metrics_threshold",
"comparison_operator": "ge"}
]}
return {AodhProps.DESCRIPTION: "test",
AodhProps.TIMESTAMP: "2016-11-09T01:39:13.839584",
AodhProps.ENABLED: True,
AodhProps.STATE_TIMESTAMP: "2016-11-09T01:39:13.839584",
AodhProps.ALARM_ID: "7e5c3754-e2eb-4782-ae00-7da5ded8568b",
AodhProps.REPEAT_ACTIONS: False,
AodhProps.PROJECT_ID: "c365d18fcc03493187016ae743f0cc4d",
AodhProps.NAME: "test",
AodhProps.SEVERITY: "low",
AodhProps.RESOURCE_ID: "88cd2d1d-8af4-4d00-9b5e-f82f8c8b0f8d",
AodhProps.TYPE: type,
AodhProps.STATE: state,
AodhProps.RULE: rule}
def _create_alarm_data_metrics(
self,
state='ok',
type=AType.GNOCCHI_AGGREGATION_BY_METRICS_THRESHOLD,
rule=None
):
if rule is None:
rule = {"threshold": '100',
"aggregation_method": "mean",
"comparison_operator": "lt"
}
return {AodhProps.DESCRIPTION: "metric test",
AodhProps.TIMESTAMP: "2017-04-03T01:39:13.839584",
AodhProps.ENABLED: True,
AodhProps.STATE_TIMESTAMP: "2017-04-03T01:39:13.839584",
AodhProps.ALARM_ID: "7e5c3754-e2eb-4782-ae00-7da5ded8568b",
AodhProps.REPEAT_ACTIONS: False,
AodhProps.PROJECT_ID: "c365d18fcc03493187016ae743f0cc4d",
AodhProps.NAME: "test",
AodhProps.SEVERITY: "low",
AodhProps.RESOURCE_ID: "88cd2d1d-8af4-4d00-9b5e-f82f8c8b0f8d",
AodhProps.METRICS: "6ade05e5-f98b-4b7d-a0b3-9d330c4c3c41",
AodhProps.TYPE: type,
AodhProps.STATE: state,
AodhProps.RULE: rule}
def _create_alarm_data_resource(
self,
state='ok',
type=AType.GNOCCHI_AGGREGATION_BY_RESOURCES_THRESHOLD,
rule=None):
if rule is None:
rule = {"evaluation_periods": 3,
"metric": "cpu_util",
"aggregation_method": "mean",
"granularity": 300,
"threshold": 50.0,
"query": [{"=":
{"resource_id":
"6df1747a-ef31-4897-854e-ffa2ae568e45"}}],
"comparison_operator": "ge",
"resource_type": "instance"
}
return {AodhProps.DESCRIPTION: "test",
AodhProps.TIMESTAMP: "2016-11-09T01:39:13.839584",
AodhProps.ENABLED: True,
AodhProps.STATE_TIMESTAMP: "2016-11-09T01:39:13.839584",
AodhProps.ALARM_ID: "7e5c3754-e2eb-4782-ae00-7da5ded8568b",
AodhProps.REPEAT_ACTIONS: False,
AodhProps.PROJECT_ID: "c365d18fcc03493187016ae743f0cc4d",
AodhProps.NAME: "test",
AodhProps.SEVERITY: "low",
AodhProps.RESOURCE_ID: "88cd2d1d-8af4-4d00-9b5e-f82f8c8b0f8d",
AodhProps.TYPE: type,
AodhProps.STATE: state,
AodhProps.RULE: rule}
def _create_alarm_data_gnocchi(self,
state="ok",
type="gnocchi_resources_threshold",
rule=None):
if rule is None:
rule = {"granularity": "300",
"threshold": "0.001",
"comparison_operator": "gt",
"resource_type": "instance",
AodhProps.RESOURCE_ID:
"88cd2d1d-8af4-4d00-9b5e-f82f8c8b0f8d"
}
return {AodhProps.DESCRIPTION: "test",
AodhProps.TIMESTAMP: "2016-11-09T01:39:13.839584",
AodhProps.ENABLED: True,
AodhProps.STATE_TIMESTAMP: "2016-11-09T01:39:13.839584",
AodhProps.ALARM_ID: "7e5c3754-e2eb-4782-ae00-7da5ded8568b",
AodhProps.REPEAT_ACTIONS: False,
AodhProps.PROJECT_ID: "c365d18fcc03493187016ae743f0cc4d",
AodhProps.NAME: "test",
AodhProps.SEVERITY: "low",
AodhProps.RESOURCE_ID: "88cd2d1d-8af4-4d00-9b5e-f82f8c8b0f8d",
AodhProps.TYPE: type,
AodhProps.STATE: state,
AodhProps.RULE: rule}
def _create_alarm_data_type_event(self,
state="ok",
type="event",
rule=None):
if rule is None:
rule = {"query": [], "event_type": "*"}
return {AodhProps.DESCRIPTION: "test",
AodhProps.TIMESTAMP: "2016-11-09T01:39:13.839584",
AodhProps.ENABLED: True,
AodhProps.STATE_TIMESTAMP: "2016-11-09T01:39:13.839584",
AodhProps.ALARM_ID: "7e5c3754-e2eb-4782-ae00-7da5ded8568b",
AodhProps.REPEAT_ACTIONS: False,
AodhProps.PROJECT_ID: "c365d18fcc03493187016ae743f0cc4d",
AodhProps.NAME: "test",
AodhProps.SEVERITY: "low",
AodhProps.TYPE: type,
AodhProps.STATE: state,
AodhProps.RULE: rule}
def _validate_aodh_entity_comm_props(self, entity, alarm):
self.assertEqual(entity[AodhProps.ALARM_ID],
alarm[AodhProps.ALARM_ID])
self.assertEqual(entity[AodhProps.PROJECT_ID],
alarm[AodhProps.PROJECT_ID])
self.assertEqual(entity[AodhProps.TIMESTAMP],
alarm[AodhProps.TIMESTAMP])
self.assertEqual(entity[AodhProps.DESCRIPTION],
alarm[AodhProps.DETAIL][AodhProps.DESCRIPTION])
self.assertEqual(entity[AodhProps.ENABLED],
alarm[AodhProps.DETAIL][AodhProps.ENABLED])
self.assertEqual(entity[AodhProps.NAME],
alarm[AodhProps.DETAIL][AodhProps.NAME])
self.assertEqual(entity[AodhProps.REPEAT_ACTIONS],
alarm[AodhProps.DETAIL][AodhProps.REPEAT_ACTIONS])
self.assertEqual(entity[AodhProps.TYPE],
alarm[AodhProps.DETAIL][AodhProps.TYPE])
| apache-2.0 | 2,648,268,639,343,504,000 | 39.326087 | 78 | 0.558632 | false |
rjschwei/azure-sdk-for-python | azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/operations/schedule_operations.py | 1 | 21161 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class ScheduleOperations(object):
"""ScheduleOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def list(
self, resource_group_name, lab_name, filter=None, top=None, order_by=None, custom_headers=None, raw=False, **operation_config):
"""List schedules in a given lab.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param filter: The filter to apply on the operation.
:type filter: str
:param top: The maximum number of resources to return from the
operation.
:type top: int
:param order_by: The ordering expression for the results, using OData
notation.
:type order_by: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`SchedulePaged
<azure.mgmt.devtestlabs.models.SchedulePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/schedules'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if order_by is not None:
query_parameters['$orderBy'] = self._serialize.query("order_by", order_by, 'str')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.SchedulePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.SchedulePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def get_resource(
self, resource_group_name, lab_name, name, custom_headers=None, raw=False, **operation_config):
"""Get schedule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param name: The name of the schedule.
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Schedule <azure.mgmt.devtestlabs.models.Schedule>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/schedules/{name}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'name': self._serialize.url("name", name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Schedule', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update_resource(
self, resource_group_name, lab_name, name, schedule, custom_headers=None, raw=False, **operation_config):
"""Create or replace an existing schedule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param name: The name of the schedule.
:type name: str
:param schedule:
:type schedule: :class:`Schedule
<azure.mgmt.devtestlabs.models.Schedule>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Schedule <azure.mgmt.devtestlabs.models.Schedule>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/schedules/{name}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'name': self._serialize.url("name", name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(schedule, 'Schedule')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Schedule', response)
if response.status_code == 201:
deserialized = self._deserialize('Schedule', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete_resource(
self, resource_group_name, lab_name, name, custom_headers=None, raw=False, **operation_config):
"""Delete schedule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param name: The name of the schedule.
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/schedules/{name}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'name': self._serialize.url("name", name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def patch_resource(
self, resource_group_name, lab_name, name, schedule, custom_headers=None, raw=False, **operation_config):
"""Modify properties of schedules.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param name: The name of the schedule.
:type name: str
:param schedule:
:type schedule: :class:`Schedule
<azure.mgmt.devtestlabs.models.Schedule>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Schedule <azure.mgmt.devtestlabs.models.Schedule>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/schedules/{name}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'name': self._serialize.url("name", name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(schedule, 'Schedule')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Schedule', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def execute(
self, resource_group_name, lab_name, name, custom_headers=None, raw=False, **operation_config):
"""Execute a schedule. This operation can take a while to complete.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param name: The name of the schedule.
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/schedules/{name}/execute'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'name': self._serialize.url("name", name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
| mit | 5,449,993,796,146,375,000 | 44.704104 | 153 | 0.630972 | false |
zjuchenyuan/BioWeb | Lib/Bio/SVDSuperimposer/__init__.py | 1 | 5375 | # Copyright (C) 2002, Thomas Hamelryck ([email protected])
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Align on protein structure onto another using SVD alignment.
SVDSuperimposer finds the best rotation and translation to put
two point sets on top of each other (minimizing the RMSD). This is
eg. useful to superimpose crystal structures. SVD stands for singular
value decomposition, which is used in the algorithm.
"""
from __future__ import print_function
from numpy import dot, transpose, sqrt, array
from numpy.linalg import svd, det
class SVDSuperimposer(object):
"""Class to run SVD alignment,
SVDSuperimposer finds the best rotation and translation to put
two point sets on top of each other (minimizing the RMSD). This is
eg. useful to superimpose crystal structures.
SVD stands for Singular Value Decomposition, which is used to calculate
the superposition.
Reference:
Matrix computations, 2nd ed. Golub, G. & Van Loan, CF., The Johns
Hopkins University Press, Baltimore, 1989
"""
def __init__(self):
self._clear()
# Private methods
def _clear(self):
self.reference_coords = None
self.coords = None
self.transformed_coords = None
self.rot = None
self.tran = None
self.rms = None
self.init_rms = None
def _rms(self, coords1, coords2):
"""Return rms deviations between coords1 and coords2."""
diff = coords1 - coords2
l = coords1.shape[0]
return sqrt(sum(sum(diff * diff)) / l)
# Public methods
def set(self, reference_coords, coords):
"""Set the coordinates to be superimposed.
coords will be put on top of reference_coords.
- reference_coords: an NxDIM array
- coords: an NxDIM array
DIM is the dimension of the points, N is the number
of points to be superimposed.
"""
# clear everything from previous runs
self._clear()
# store cordinates
self.reference_coords = reference_coords
self.coords = coords
n = reference_coords.shape
m = coords.shape
if n != m or not(n[1] == m[1] == 3):
raise Exception("Coordinate number/dimension mismatch.")
self.n = n[0]
def run(self):
"""Superimpose the coordinate sets."""
if self.coords is None or self.reference_coords is None:
raise Exception("No coordinates set.")
coords = self.coords
reference_coords = self.reference_coords
# center on centroid
av1 = sum(coords) / self.n
av2 = sum(reference_coords) / self.n
coords = coords - av1
reference_coords = reference_coords - av2
# correlation matrix
a = dot(transpose(coords), reference_coords)
u, d, vt = svd(a)
self.rot = transpose(dot(transpose(vt), transpose(u)))
# check if we have found a reflection
if det(self.rot) < 0:
vt[2] = -vt[2]
self.rot = transpose(dot(transpose(vt), transpose(u)))
self.tran = av2 - dot(av1, self.rot)
def get_transformed(self):
"""Get the transformed coordinate set."""
if self.coords is None or self.reference_coords is None:
raise Exception("No coordinates set.")
if self.rot is None:
raise Exception("Nothing superimposed yet.")
if self.transformed_coords is None:
self.transformed_coords = dot(self.coords, self.rot) + self.tran
return self.transformed_coords
def get_rotran(self):
"""Right multiplying rotation matrix and translation."""
if self.rot is None:
raise Exception("Nothing superimposed yet.")
return self.rot, self.tran
def get_init_rms(self):
"""Root mean square deviation of untransformed coordinates."""
if self.coords is None:
raise Exception("No coordinates set yet.")
if self.init_rms is None:
self.init_rms = self._rms(self.coords, self.reference_coords)
return self.init_rms
def get_rms(self):
"""Root mean square deviation of superimposed coordinates."""
if self.rms is None:
transformed_coords = self.get_transformed()
self.rms = self._rms(transformed_coords, self.reference_coords)
return self.rms
if __name__ == "__main__":
# start with two coordinate sets (Nx3 arrays - float)
x = array([[51.65, -1.90, 50.07],
[50.40, -1.23, 50.65],
[50.68, -0.04, 51.54],
[50.22, -0.02, 52.85]], 'f')
y = array([[51.30, -2.99, 46.54],
[51.09, -1.88, 47.58],
[52.36, -1.20, 48.03],
[52.71, -1.18, 49.38]], 'f')
# start!
sup = SVDSuperimposer()
# set the coords
# y will be rotated and translated on x
sup.set(x, y)
# do the lsq fit
sup.run()
# get the rmsd
rms = sup.get_rms()
# get rotation (right multiplying!) and the translation
rot, tran = sup.get_rotran()
# rotate y on x
y_on_x1 = dot(y, rot) + tran
# same thing
y_on_x2 = sup.get_transformed()
print(y_on_x1)
print("")
print(y_on_x2)
print("")
print("%.2f" % rms)
| mit | -6,332,052,390,746,206,000 | 30.432749 | 76 | 0.610977 | false |
UnitexGramLab/unitex-library | docs/fr/conf.py | 1 | 10744 | # -*- coding: utf-8 -*-
#
# unitex-library documentation build configuration file, created by
# sphinx-quickstart on Mon Jun 23 17:35:11 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# When RTD builds your project, it sets the READTHEDOCS environment variable
# to the string True.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# Documentation prolog
rst_prolog = u"""
.. note::
Cette documentation est en cours de rédaction. Elle peut contenir des erreurs
et des informations peuvent manquer. Si vous avez des commentaires ou questions,
merci de nous contacter sur la liste de diffusion unitex-devel ou de remplir
un rapport de bogue en utilisant notre `système de suivi des incidents`_.
.. _système de suivi des incidents: https://github.com/UnitexGramLab/unitex-library/issues
"""
# Documentation epilog
rst_epilog = """
.. |ugl| replace:: Unitex/GramLab
"""
# General information about the project.
project = u'Bibliothèque Unitex'
copyright = u'Université Paris-Est Marne-la-Vallée, 2001-2015'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.1'
# The full version, including alpha/beta/rc tags.
release = '3.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = "fr"
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'html', 'doctrees']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['../include/_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'unitex-library'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'unitex-library.tex', u'unitex-library Documentation',
u'The Unitex/GramLab devel team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'unitex-library', u'unitex-library Documentation',
[u'The Unitex/GramLab devel team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'unitex-library', u'unitex-library Documentation',
u'The Unitex/GramLab devel team', 'unitex-library', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'unitex-library'
epub_author = u'The Unitex/GramLab devel team'
epub_publisher = u'The Unitex/GramLab devel team'
epub_copyright = copyright
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Setup function
def setup(app):
# Overrides CSS file theme
app.add_stylesheet("theme_overrides.css")
| lgpl-2.1 | -547,079,259,875,797,700 | 30.675516 | 92 | 0.709257 | false |
jiadaizhao/LeetCode | 0201-0300/0269-Alien Dictionary/0269-Alien Dictionary.py | 1 | 1091 | import collections
class Solution:
def alienOrder(self, words: List[str]) -> str:
letters = set()
prev = ''
graph = collections.defaultdict(set)
for word in words:
for c in word:
letters.add(c)
for a, b in zip(prev, word):
if a != b:
graph[a].add(b)
break
else:
if len(prev) > len(word):
return ''
prev = word
degrees = collections.Counter()
for v in graph.values():
for c in v:
degrees[c] += 1
Q = collections.deque()
result = []
for c in letters:
if degrees[c] == 0:
Q.append(c)
result.append(c)
while Q:
c = Q.popleft()
for n in graph[c]:
degrees[n] -= 1
if degrees[n] == 0:
Q.append(n)
result.append(n)
return ''.join(result) if len(result) == len(letters) else ''
| mit | -724,017,005,088,899,200 | 25.609756 | 69 | 0.409716 | false |
fevxie/odoo-infrastructure | infrastructure/models/environment.py | 1 | 9464 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
import string
from openerp import models, fields, api, _
from openerp.exceptions import Warning
from .server import custom_sudo as sudo
from fabric.contrib.files import exists
import os
class environment(models.Model):
""""""
_name = 'infrastructure.environment'
_description = 'environment'
_order = 'number'
_inherit = ['ir.needaction_mixin', 'mail.thread']
_states_ = [
# State machine: untitle
('draft', 'Draft'),
('active', 'Active'),
('inactive', 'Inactive'),
('cancel', 'Cancel'),
]
@api.model
def get_odoo_version(self):
return self.env['infrastructure.odoo_version'].search([], limit=1)
number = fields.Integer(
string='Number',
required=True,
readonly=True,
states={'draft': [('readonly', False)]},
)
name = fields.Char(
string='Name',
readonly=True,
required=True,
size=16,
states={'draft': [('readonly', False)]},
)
type = fields.Selection([
(u'docker', u'Docker'),
],
string='Type',
readonly=True,
required=True,
states={'draft': [('readonly', False)]},
default='docker'
)
description = fields.Char(
string='Description'
)
partner_id = fields.Many2one(
'res.partner',
string='Partner',
required=True,
readonly=True,
states={'draft': [('readonly', False)]},
)
odoo_version_id = fields.Many2one(
'infrastructure.odoo_version',
string='Odoo Version',
required=True,
readonly=True,
default=get_odoo_version,
states={'draft': [('readonly', False)]},
)
note = fields.Html(
string='Note'
)
color = fields.Integer(
string='Color Index',
compute='get_color',
)
state = fields.Selection(
_states_,
string="State",
default='draft',
)
server_id = fields.Many2one(
'infrastructure.server',
string='Server',
ondelete='cascade',
required=True,
readonly=True,
states={'draft': [('readonly', False)]},
)
instance_ids = fields.One2many(
'infrastructure.instance',
'environment_id',
string='Instances',
context={'from_environment': True},
domain=[('state', '!=', 'cancel')],
)
path = fields.Char(
string='Path',
readonly=True,
required=True,
states={'draft': [('readonly', False)]},
)
instance_count = fields.Integer(
string='# Instances',
compute='_get_instances'
)
database_ids = fields.One2many(
'infrastructure.database',
'environment_id',
string='Databases',
domain=[('state', '!=', 'cancel')],
)
database_count = fields.Integer(
string='# Databases',
compute='_get_databases'
)
@api.one
@api.depends('state')
def get_color(self):
color = 4
if self.state == 'draft':
color = 7
elif self.state == 'cancel':
color = 1
elif self.state == 'inactive':
color = 3
self.color = color
@api.one
@api.depends('database_ids')
def _get_databases(self):
self.database_count = len(self.database_ids)
@api.one
@api.depends('instance_ids')
def _get_instances(self):
self.instance_count = len(self.instance_ids)
@api.one
@api.constrains('number')
def _check_number(self):
if not self.number or self.number < 10 or self.number > 99:
raise Warning(_('Number should be between 10 and 99'))
@api.one
def unlink(self):
if self.state not in ('draft', 'cancel'):
raise Warning(
_('You cannot delete a environment which is not \
draft or cancelled.'))
return super(environment, self).unlink()
@api.onchange('server_id')
def _get_number(self):
environments = self.search(
[('server_id', '=', self.server_id.id)],
order='number desc')
if self.server_id.server_use_type:
self.partner_id = self.server_id.used_by_id
self.number = environments and environments[0].number + 1 or 10
# TODO si no vamos a usar el sufijo entonces borrar lo comentado aca
@api.onchange('partner_id')
# @api.onchange('partner_id', 'odoo_version_id')
def _get_name(self):
name = False
if self.partner_id:
# if self.partner_id and self.odoo_version_id:
name = self.partner_id.commercial_partner_id.name
# partner_name = self.partner_id.commercial_partner_id.name
# sufix = self.odoo_version_id.sufix
# name = '%s-%s' % (partner_name, sufix)
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
name = ''.join(c for c in name if c in valid_chars)
name = name.replace(' ', '').replace('.', '').lower()
self.name = name
@api.onchange('name', 'server_id')
def _get_path(self):
path = False
if self.server_id.base_path and self.name:
path = os.path.join(self.server_id.base_path, self.name)
self.path = path
@api.one
def make_env_paths(self):
self.server_id.get_env()
if exists(self.path, use_sudo=True):
raise Warning(_("Folder '%s' already exists") %
(self.path))
sudo('mkdir -p ' + self.path)
@api.multi
def create_environment(self):
self.make_env_paths()
self.signal_workflow('sgn_to_active')
@api.one
def check_to_inactive(self):
for instance in self.instance_ids:
if instance.service_type != 'no_service':
raise Warning(_(
'To set and environment as inactive you should set all '
'env instances with Service Type "No Service" and better'
' if you stop all of them'))
return True
@api.multi
def delete(self):
if self.instance_ids:
raise Warning(_(
'You can not delete an environment that has instances'))
self.server_id.get_env()
paths = [self.path]
for path in paths:
sudo('rm -f -r ' + path)
self.signal_workflow('sgn_cancel')
@api.multi
def action_wfk_set_draft(self):
self.write({'state': 'draft'})
self.delete_workflow()
self.create_workflow()
return True
_sql_constraints = [
('name_uniq', 'unique(name, server_id)',
'Name must be unique per server!'),
('path_uniq', 'unique(path, server_id)',
'Path must be unique per server!'),
('sources_number', 'unique(number, server_id)',
'Number must be unique per server!'),
]
@api.multi
def action_view_instances(self):
'''
This function returns an action that display a form or tree view
'''
self.ensure_one()
instances = self.instance_ids.search(
[('environment_id', 'in', self.ids)])
action = self.env['ir.model.data'].xmlid_to_object(
'infrastructure.action_infrastructure_instance_instances')
if not action:
return False
res = action.read()[0]
if len(self) == 1:
res['context'] = {
'default_environment_id': self.id,
'search_default_environment_id': self.id,
'search_default_not_cancel': 1,
}
if not len(instances.ids) > 1:
form_view_id = self.env['ir.model.data'].xmlid_to_res_id(
'infrastructure.view_infrastructure_instance_form')
res['views'] = [(form_view_id, 'form')]
# if 1 then we send res_id, if 0 open a new form view
res['res_id'] = instances and instances.ids[0] or False
return res
@api.multi
def action_view_databases(self):
'''
This function returns an action that display a form or tree view
'''
self.ensure_one()
databases = self.database_ids.search(
[('environment_id', 'in', self.ids)])
action = self.env['ir.model.data'].xmlid_to_object(
'infrastructure.action_infrastructure_database_databases')
if not action:
return False
res = action.read()[0]
if len(self) == 1:
res['context'] = {
'default_server_id': self.id,
'search_default_environment_id': self.id,
'search_default_not_cancel': 1,
}
if not len(databases.ids) > 1:
form_view_id = self.env['ir.model.data'].xmlid_to_res_id(
'infrastructure.view_infrastructure_database_form')
res['views'] = [(form_view_id, 'form')]
# if 1 then we send res_id, if 0 open a new form view
res['res_id'] = databases and databases.ids[0] or False
return res
| agpl-3.0 | -3,885,337,799,388,930,600 | 31.190476 | 78 | 0.533707 | false |
basmot/futsal_management | base/models/person_address.py | 1 | 1675 | ##############################################################################
#
# Copyright 2015-2016 Bastien Mottiaux
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##############################################################################
from django.db import models
from django.contrib import admin
from base.models import person
class PersonAddressAdmin(admin.ModelAdmin):
list_display = ('person', 'label', 'location', 'postal_code', 'city', 'country')
fieldsets = ((None, {'fields': ('player', 'label', 'location', 'postal_code', 'city', 'country')}),)
class PersonAddress(models.Model):
person = models.ForeignKey('Person')
label = models.CharField(max_length=20)
location = models.CharField(max_length=255)
postal_code = models.CharField(max_length=20)
city = models.CharField(max_length=255)
country = models.CharField(max_length=255)
def find_by_person(a_person):
""" Return a list containing one or more addresses of a p. Returns None if there is no address.
:param a_person: An instance of the class base.models.Player
"""
return PersonAddress.objects.filter(person=a_person)
| apache-2.0 | -7,179,946,004,434,599,000 | 38.880952 | 104 | 0.647761 | false |
Zuckonit/devent | devent/event.py | 1 | 1423 | #!/usr/bin/env python
# encoding: utf-8
"""
use a global dict to store the events,
and here put event related operations wrapper
~~~~~~~~
event.py
"""
from gevent.event import AsyncResult
from .errors import (
EventKeyAlreadyExisted,
EventKeyTypeError,
)
__all__ = [
'set_event',
'get_event',
'init_event',
'register_event'
]
EVENT_ITEMS = {}
def set_event(name, value):
"""
@name string, event name
@value any, event value
@return boolean the set status
"""
if not isinstance(name, basestring):
raise EventKeyTypeError
global EVENT_ITEMS
if EVENT_ITEMS.has_key(name):
EVENT_ITEMS[name].set(value)
return True
return False
def register_event(name, value=None):
if not isinstance(name, basestring):
raise EventKeyTypeError
global EVENT_ITEMS
if EVENT_ITEMS.has_key(name):
raise EventKeyAlreadyExisted
EVENT_ITEMS[name] = AsyncResult()
if value is not None:
set_event(name, value)
def get_event(name):
global EVENT_ITEMS
return EVENT_ITEMS.get(name, None)
def init_event(name):
"""
@name string event name
"""
if not isinstance(name, basestring):
raise EventKeyTypeError
global EVENT_ITEMS
EVENT_ITEMS[name] = AsyncResult()
if __name__ == '__main__':
register_event('What', 1)
print get_event('What')
| mit | 4,389,554,712,849,286,000 | 19.623188 | 49 | 0.624034 | false |
Azure/azure-sdk-for-python | sdk/storage/azure-storage-blob/azure/storage/blob/aio/_download_async.py | 1 | 22010 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# pylint: disable=invalid-overridden-method
import asyncio
import sys
from io import BytesIO
from itertools import islice
import warnings
from typing import AsyncIterator
from aiohttp import ClientPayloadError
from azure.core.exceptions import HttpResponseError, ServiceResponseError
from .._shared.encryption import decrypt_blob
from .._shared.request_handlers import validate_and_format_range_headers
from .._shared.response_handlers import process_storage_error, parse_length_from_content_range
from .._deserialize import get_page_ranges_result
from .._download import process_range_and_offset, _ChunkDownloader
async def process_content(data, start_offset, end_offset, encryption):
if data is None:
raise ValueError("Response cannot be None.")
content = data.response.body()
if encryption.get('key') is not None or encryption.get('resolver') is not None:
try:
return decrypt_blob(
encryption.get('required'),
encryption.get('key'),
encryption.get('resolver'),
content,
start_offset,
end_offset,
data.response.headers)
except Exception as error:
raise HttpResponseError(
message="Decryption failed.",
response=data.response,
error=error)
return content
class _AsyncChunkDownloader(_ChunkDownloader):
def __init__(self, **kwargs):
super(_AsyncChunkDownloader, self).__init__(**kwargs)
self.stream_lock = asyncio.Lock() if kwargs.get('parallel') else None
self.progress_lock = asyncio.Lock() if kwargs.get('parallel') else None
async def process_chunk(self, chunk_start):
chunk_start, chunk_end = self._calculate_range(chunk_start)
chunk_data = await self._download_chunk(chunk_start, chunk_end - 1)
length = chunk_end - chunk_start
if length > 0:
await self._write_to_stream(chunk_data, chunk_start)
await self._update_progress(length)
async def yield_chunk(self, chunk_start):
chunk_start, chunk_end = self._calculate_range(chunk_start)
return await self._download_chunk(chunk_start, chunk_end - 1)
async def _update_progress(self, length):
if self.progress_lock:
async with self.progress_lock: # pylint: disable=not-async-context-manager
self.progress_total += length
else:
self.progress_total += length
async def _write_to_stream(self, chunk_data, chunk_start):
if self.stream_lock:
async with self.stream_lock: # pylint: disable=not-async-context-manager
self.stream.seek(self.stream_start + (chunk_start - self.start_index))
self.stream.write(chunk_data)
else:
self.stream.write(chunk_data)
async def _download_chunk(self, chunk_start, chunk_end):
download_range, offset = process_range_and_offset(
chunk_start, chunk_end, chunk_end, self.encryption_options)
# No need to download the empty chunk from server if there's no data in the chunk to be downloaded.
# Do optimize and create empty chunk locally if condition is met.
if self._do_optimize(download_range[0], download_range[1]):
chunk_data = b"\x00" * self.chunk_size
else:
range_header, range_validation = validate_and_format_range_headers(
download_range[0],
download_range[1],
check_content_md5=self.validate_content
)
retry_active = True
retry_total = 3
while retry_active:
try:
_, response = await self.client.download(
range=range_header,
range_get_content_md5=range_validation,
validate_content=self.validate_content,
data_stream_total=self.total_size,
download_stream_current=self.progress_total,
**self.request_options
)
retry_active = False
except HttpResponseError as error:
process_storage_error(error)
except ClientPayloadError as error:
retry_total -= 1
if retry_total <= 0:
raise ServiceResponseError(error, error=error)
await asyncio.sleep(1)
chunk_data = await process_content(response, offset[0], offset[1], self.encryption_options)
# This makes sure that if_match is set so that we can validate
# that subsequent downloads are to an unmodified blob
if self.request_options.get('modified_access_conditions'):
self.request_options['modified_access_conditions'].if_match = response.properties.etag
return chunk_data
class _AsyncChunkIterator(object):
"""Async iterator for chunks in blob download stream."""
def __init__(self, size, content, downloader, chunk_size):
self.size = size
self._chunk_size = chunk_size
self._current_content = content
self._iter_downloader = downloader
self._iter_chunks = None
self._complete = (size == 0)
def __len__(self):
return self.size
def __iter__(self):
raise TypeError("Async stream must be iterated asynchronously.")
def __aiter__(self):
return self
async def __anext__(self):
"""Iterate through responses."""
if self._complete:
raise StopAsyncIteration("Download complete")
if not self._iter_downloader:
# cut the data obtained from initial GET into chunks
if len(self._current_content) > self._chunk_size:
return self._get_chunk_data()
self._complete = True
return self._current_content
if not self._iter_chunks:
self._iter_chunks = self._iter_downloader.get_chunk_offsets()
# initial GET result still has more than _chunk_size bytes of data
if len(self._current_content) >= self._chunk_size:
return self._get_chunk_data()
try:
chunk = next(self._iter_chunks)
self._current_content += await self._iter_downloader.yield_chunk(chunk)
except StopIteration:
self._complete = True
# it's likely that there some data left in self._current_content
if self._current_content:
return self._current_content
raise StopAsyncIteration("Download complete")
return self._get_chunk_data()
def _get_chunk_data(self):
chunk_data = self._current_content[: self._chunk_size]
self._current_content = self._current_content[self._chunk_size:]
return chunk_data
class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes
"""A streaming object to download from Azure Storage.
:ivar str name:
The name of the blob being downloaded.
:ivar str container:
The name of the container where the blob is.
:ivar ~azure.storage.blob.BlobProperties properties:
The properties of the blob being downloaded. If only a range of the data is being
downloaded, this will be reflected in the properties.
:ivar int size:
The size of the total data in the stream. This will be the byte range if speficied,
otherwise the total size of the blob.
"""
def __init__(
self,
clients=None,
config=None,
start_range=None,
end_range=None,
validate_content=None,
encryption_options=None,
max_concurrency=1,
name=None,
container=None,
encoding=None,
**kwargs
):
self.name = name
self.container = container
self.properties = None
self.size = None
self._clients = clients
self._config = config
self._start_range = start_range
self._end_range = end_range
self._max_concurrency = max_concurrency
self._encoding = encoding
self._validate_content = validate_content
self._encryption_options = encryption_options or {}
self._request_options = kwargs
self._location_mode = None
self._download_complete = False
self._current_content = None
self._file_size = None
self._non_empty_ranges = None
self._response = None
# The service only provides transactional MD5s for chunks under 4MB.
# If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first
# chunk so a transactional MD5 can be retrieved.
self._first_get_size = self._config.max_single_get_size if not self._validate_content \
else self._config.max_chunk_get_size
initial_request_start = self._start_range if self._start_range is not None else 0
if self._end_range is not None and self._end_range - self._start_range < self._first_get_size:
initial_request_end = self._end_range
else:
initial_request_end = initial_request_start + self._first_get_size - 1
self._initial_range, self._initial_offset = process_range_and_offset(
initial_request_start, initial_request_end, self._end_range, self._encryption_options
)
def __len__(self):
return self.size
async def _setup(self):
self._response = await self._initial_request()
self.properties = self._response.properties
self.properties.name = self.name
self.properties.container = self.container
# Set the content length to the download size instead of the size of
# the last range
self.properties.size = self.size
# Overwrite the content range to the user requested range
self.properties.content_range = 'bytes {0}-{1}/{2}'.format(
self._start_range,
self._end_range,
self._file_size
)
# Overwrite the content MD5 as it is the MD5 for the last range instead
# of the stored MD5
# TODO: Set to the stored MD5 when the service returns this
self.properties.content_md5 = None
if self.size == 0:
self._current_content = b""
else:
self._current_content = await process_content(
self._response,
self._initial_offset[0],
self._initial_offset[1],
self._encryption_options
)
async def _initial_request(self):
range_header, range_validation = validate_and_format_range_headers(
self._initial_range[0],
self._initial_range[1],
start_range_required=False,
end_range_required=False,
check_content_md5=self._validate_content)
retry_active = True
retry_total = 3
while retry_active:
try:
location_mode, response = await self._clients.blob.download(
range=range_header,
range_get_content_md5=range_validation,
validate_content=self._validate_content,
data_stream_total=None,
download_stream_current=0,
**self._request_options)
# Check the location we read from to ensure we use the same one
# for subsequent requests.
self._location_mode = location_mode
# Parse the total file size and adjust the download size if ranges
# were specified
self._file_size = parse_length_from_content_range(response.properties.content_range)
if self._end_range is not None:
# Use the length unless it is over the end of the file
self.size = min(self._file_size, self._end_range - self._start_range + 1)
elif self._start_range is not None:
self.size = self._file_size - self._start_range
else:
self.size = self._file_size
retry_active = False
except HttpResponseError as error:
if self._start_range is None and error.response.status_code == 416:
# Get range will fail on an empty file. If the user did not
# request a range, do a regular get request in order to get
# any properties.
try:
_, response = await self._clients.blob.download(
validate_content=self._validate_content,
data_stream_total=0,
download_stream_current=0,
**self._request_options)
retry_active = False
except HttpResponseError as error:
process_storage_error(error)
# Set the download size to empty
self.size = 0
self._file_size = 0
else:
process_storage_error(error)
except ClientPayloadError as error:
retry_total -= 1
if retry_total <= 0:
raise ServiceResponseError(error, error=error)
await asyncio.sleep(1)
# get page ranges to optimize downloading sparse page blob
if response.properties.blob_type == 'PageBlob':
try:
page_ranges = await self._clients.page_blob.get_page_ranges()
self._non_empty_ranges = get_page_ranges_result(page_ranges)[0]
except HttpResponseError:
pass
# If the file is small, the download is complete at this point.
# If file size is large, download the rest of the file in chunks.
if response.properties.size != self.size:
if self._request_options.get('modified_access_conditions'):
self._request_options['modified_access_conditions'].if_match = response.properties.etag
else:
self._download_complete = True
return response
def chunks(self):
# type: () -> AsyncIterator[bytes]
"""Iterate over chunks in the download stream.
:rtype: AsyncIterator[bytes]
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_hello_world_async.py
:start-after: [START download_a_blob_in_chunk]
:end-before: [END download_a_blob_in_chunk]
:language: python
:dedent: 16
:caption: Download a blob using chunks().
"""
if self.size == 0 or self._download_complete:
iter_downloader = None
else:
data_end = self._file_size
if self._end_range is not None:
# Use the length unless it is over the end of the file
data_end = min(self._file_size, self._end_range + 1)
iter_downloader = _AsyncChunkDownloader(
client=self._clients.blob,
non_empty_ranges=self._non_empty_ranges,
total_size=self.size,
chunk_size=self._config.max_chunk_get_size,
current_progress=self._first_get_size,
start_range=self._initial_range[1] + 1, # Start where the first download ended
end_range=data_end,
stream=None,
parallel=False,
validate_content=self._validate_content,
encryption_options=self._encryption_options,
use_location=self._location_mode,
**self._request_options)
return _AsyncChunkIterator(
size=self.size,
content=self._current_content,
downloader=iter_downloader,
chunk_size=self._config.max_chunk_get_size)
async def readall(self):
"""Download the contents of this blob.
This operation is blocking until all data is downloaded.
:rtype: bytes or str
"""
stream = BytesIO()
await self.readinto(stream)
data = stream.getvalue()
if self._encoding:
return data.decode(self._encoding)
return data
async def content_as_bytes(self, max_concurrency=1):
"""Download the contents of this file.
This operation is blocking until all data is downloaded.
:keyword int max_concurrency:
The number of parallel connections with which to download.
:rtype: bytes
"""
warnings.warn(
"content_as_bytes is deprecated, use readall instead",
DeprecationWarning
)
self._max_concurrency = max_concurrency
return await self.readall()
async def content_as_text(self, max_concurrency=1, encoding="UTF-8"):
"""Download the contents of this blob, and decode as text.
This operation is blocking until all data is downloaded.
:param int max_concurrency:
The number of parallel connections with which to download.
:param str encoding:
Test encoding to decode the downloaded bytes. Default is UTF-8.
:rtype: str
"""
warnings.warn(
"content_as_text is deprecated, use readall instead",
DeprecationWarning
)
self._max_concurrency = max_concurrency
self._encoding = encoding
return await self.readall()
async def readinto(self, stream):
"""Download the contents of this blob to a stream.
:param stream:
The stream to download to. This can be an open file-handle,
or any writable stream. The stream must be seekable if the download
uses more than one parallel connection.
:returns: The number of bytes read.
:rtype: int
"""
# the stream must be seekable if parallel download is required
parallel = self._max_concurrency > 1
if parallel:
error_message = "Target stream handle must be seekable."
if sys.version_info >= (3,) and not stream.seekable():
raise ValueError(error_message)
try:
stream.seek(stream.tell())
except (NotImplementedError, AttributeError):
raise ValueError(error_message)
# Write the content to the user stream
stream.write(self._current_content)
if self._download_complete:
return self.size
data_end = self._file_size
if self._end_range is not None:
# Use the length unless it is over the end of the file
data_end = min(self._file_size, self._end_range + 1)
downloader = _AsyncChunkDownloader(
client=self._clients.blob,
non_empty_ranges=self._non_empty_ranges,
total_size=self.size,
chunk_size=self._config.max_chunk_get_size,
current_progress=self._first_get_size,
start_range=self._initial_range[1] + 1, # start where the first download ended
end_range=data_end,
stream=stream,
parallel=parallel,
validate_content=self._validate_content,
encryption_options=self._encryption_options,
use_location=self._location_mode,
**self._request_options)
dl_tasks = downloader.get_chunk_offsets()
running_futures = [
asyncio.ensure_future(downloader.process_chunk(d))
for d in islice(dl_tasks, 0, self._max_concurrency)
]
while running_futures:
# Wait for some download to finish before adding a new one
done, running_futures = await asyncio.wait(
running_futures, return_when=asyncio.FIRST_COMPLETED)
try:
for task in done:
task.result()
except HttpResponseError as error:
process_storage_error(error)
try:
next_chunk = next(dl_tasks)
except StopIteration:
break
else:
running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk)))
if running_futures:
# Wait for the remaining downloads to finish
done, _running_futures = await asyncio.wait(running_futures)
try:
for task in done:
task.result()
except HttpResponseError as error:
process_storage_error(error)
return self.size
async def download_to_stream(self, stream, max_concurrency=1):
"""Download the contents of this blob to a stream.
:param stream:
The stream to download to. This can be an open file-handle,
or any writable stream. The stream must be seekable if the download
uses more than one parallel connection.
:param int max_concurrency:
The number of parallel connections with which to download.
:returns: The properties of the downloaded blob.
:rtype: Any
"""
warnings.warn(
"download_to_stream is deprecated, use readinto instead",
DeprecationWarning
)
self._max_concurrency = max_concurrency
await self.readinto(stream)
return self.properties
| mit | -8,986,826,406,296,550,000 | 39.23766 | 107 | 0.581645 | false |
Naereen/mazhe | phystricksExoXLVL.py | 1 | 1188 | from phystricks import *
def ExoXLVL():
pspict,fig = SinglePicture("ExoXLVL")
x=var('x')
dist=0.1
l=2.5
C1=Rectangle( Point(-l,l),Point(-dist,dist) )
C2=Rectangle( Point(0,0),Point(l,l) )
C3=Rectangle( Point(0,0),Point(-l,-l) )
C4=Rectangle( Point(dist,-dist),Point(l,-l) )
C1.parameters.color="blue"
C2.parameters.color="red"
C3.parameters.color="cyan"
C4.parameters.color="green"
C1.parameters.style="dashed"
C2.parameters.style=C1.parameters.style
C2.parameters.style=C1.parameters.style
C4.parameters.style=C1.parameters.style
a1=C1.center()
a1.parameters.symbol=""
a1.put_mark(0,0,"\( xy\)",automatic_place=pspict)
a2=C2.center()
a2.parameters.symbol=""
a2.put_mark(0,0,"\( x-y\)",automatic_place=pspict)
a3=C3.center()
a3.parameters.symbol=""
a3.put_mark(0,0,"\( x^2y\)",automatic_place=pspict)
a4=C4.center()
a4.parameters.symbol=""
a4.put_mark(0,0,"\( x+y\)",automatic_place=pspict)
pspict.axes.no_graduation()
pspict.DrawGraphs(C1,C2,C3,C4,a1,a2,a3,a4)
pspict.DrawDefaultAxes()
pspict.dilatation(1)
fig.conclude()
fig.write_the_file()
| gpl-3.0 | 8,272,567,932,617,199,000 | 26 | 55 | 0.637205 | false |
chaincoin/chaincoin | contrib/devtools/update-translations.py | 1 | 8664 | #!/usr/bin/env python3
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'chaincoin_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
# Regexp to check for Chaincoin addresses
ADDRESS_REGEXP = re.compile('([C2]|chc)[a-zA-Z0-9]{30,}')
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
sys.exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
sys.exit(1)
def find_format_specifiers(s, errors):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
try:
specifiers.append(s[percent+1])
pos = percent+2
except IndexError:
errors.append("Failed to parse specifier: %s'" % (sanitize_string(s)))
# just jump over and move on
pos = percent+1
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# If both numeric format specifiers and "others" are used, assume we're dealing
# with a Qt-formatted message. In the case of Qt formatting (see https://doc.qt.io/qt-5/qstring.html#arg)
# only numeric formats are replaced at all. This means "(percentage: %1%)" is valid, without needing
# any kind of escaping that would be necessary for strprintf. Without this, this function
# would wrongly detect '%)' as a printf format specifier.
if numeric:
other = []
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source, errors))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation, errors))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def contains_bitcoin_addr(text, errors):
if text is not None and ADDRESS_REGEXP.search(text) is not None:
errors.append('Translation "%s" contains a chaincoin address. This will be removed.' % (text))
return True
return False
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus) and not contains_bitcoin_addr(translation, errors)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
| mit | 1,825,707,706,652,677,600 | 38.381818 | 140 | 0.622807 | false |
ZeitOnline/zeit.edit | src/zeit/edit/meta.py | 1 | 2283 | import gocept.lxml.interfaces
import grokcore.component
import martian
import zeit.edit.block
import zeit.edit.interfaces
import zope.component.zcml
import zope.interface
class NoneGuard(object):
"""An IRuleGlob must never return None, because then it would not show up
in the getAdapters() result, so its name would be undefined leading to
eval-errors.
XXX: using an inline function or lambda to do the wrapping didn't work, so
we use instances of this class instead.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kw):
result = self.func(*args, **kw)
if result is None:
result = '__NONE__'
return result
class GlobalRuleGlobsGrokker(martian.GlobalGrokker):
def grok(self, name, module, module_info, config, **kw):
globs = module_info.getAnnotation('zeit.edit.globs', [])
for func, adapts in globs:
zope.component.zcml.adapter(
config,
for_=(adapts,),
factory=(NoneGuard(func),),
provides=zeit.edit.interfaces.IRuleGlob,
name=unicode(func.func_name))
return True
class SimpleElementGrokker(martian.ClassGrokker):
martian.component(zeit.edit.block.SimpleElement)
def execute(self, context, config, **kw):
for_ = (context.area, gocept.lxml.interfaces.IObjectified)
provides = zope.interface.implementedBy(context).declared[0]
config.action(
discriminator=('adapter', for_, provides, context.type),
callable=zope.component.provideAdapter,
args=(context, for_, provides, context.type),
)
return True
class ElementFactoryGrokker(martian.ClassGrokker):
martian.component(zeit.edit.block.ElementFactory)
martian.directive(grokcore.component.context)
def execute(self, factory, config, context, **kw):
name = factory.element_type = factory.produces.type
provides = list(zope.interface.implementedBy(factory))[0]
config.action(
discriminator=('adapter', context, provides, name),
callable=zope.component.provideAdapter,
args=(factory, (context,), provides, name),
)
return True
| bsd-3-clause | 3,401,236,150,200,274,400 | 30.708333 | 78 | 0.645642 | false |
DavidPurcell/murano_temp | murano/tests/unit/dsl/test_dsl.py | 1 | 6877 | # Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from yaql.language import yaqltypes
from yaql.language import expressions as yaql_expressions
from murano.dsl import dsl
from murano.dsl import dsl_types
from murano.dsl import helpers
from murano.tests.unit.dsl.foundation import test_case
class TestMuranoObjectParameter(test_case.DslTestCase):
def setUp(self):
super(TestMuranoObjectParameter, self).setUp()
self.mop = dsl.MuranoObjectParameter()
@mock.patch.object(yaqltypes.PythonType, 'check', return_value=False)
def test_check_fail_super_check(self, mock_parent_check):
val = dsl_types.MuranoObject()
is_check = self.mop.check(val, 'context', 123, foo='bar')
mock_parent_check.assert_called_with(val, 'context', 123, foo='bar')
self.assertFalse(is_check)
@mock.patch.object(yaqltypes.PythonType, 'check', return_value=True)
def test_check_val_none_or_yaql_expr(self, mock_parent_check):
is_check1 = self.mop.check(None, 'context')
is_check2 = self.mop.check(yaql_expressions.Expression(), 'context')
self.assertTrue(is_check1 and is_check2)
@mock.patch.object(yaqltypes.PythonType, 'check', return_value=True)
def test_check_val_not_murano_object(self, mock_parent_check):
is_check = self.mop.check('value', 'context')
self.assertFalse(is_check)
@mock.patch.object(yaqltypes.PythonType, 'check', return_value=True)
def test_check_val_murano_object(self, mock_parent_check):
val = dsl_types.MuranoObject()
is_check = self.mop.check(val, 'context')
self.assertTrue(is_check)
@mock.patch.object(yaqltypes.PythonType, 'check', return_value=True)
@mock.patch.object(helpers, 'get_type')
@mock.patch.object(helpers, 'is_instance_of', return_value=True)
def test_check_string_murano_class(self, mock_iio, mock_gt,
mock_parent_check):
val = dsl_types.MuranoObject()
self.mop = dsl.MuranoObjectParameter(murano_class='myclass')
is_check = self.mop.check(val, 'context')
self.assertTrue(is_check)
@mock.patch.object(yaqltypes.PythonType, 'check', return_value=True)
def test_check_murano_class(self, mock_parent_check):
val = dsl_types.MuranoObject()
murano_class = mock.MagicMock()
murano_class.is_compatible.return_value = False
self.mop = dsl.MuranoObjectParameter(murano_class=murano_class)
is_check = self.mop.check(val, 'context')
self.assertFalse(is_check)
@mock.patch.object(yaqltypes.PythonType, 'convert', return_value='result')
@mock.patch.object(dsl.MuranoObjectInterface, 'create', return_value='return')
def test_convert(self, mock_moi_create, mock_parent_convert):
ret = self.mop.convert('val', 'sender', 'context', 'fs', 'eng')
mock_parent_convert.assert_called_with('val', 'sender', 'context', 'fs', 'eng')
mock_moi_create.assert_called_with('result')
self.assertEqual('return', ret)
@mock.patch.object(yaqltypes.PythonType, 'convert')
def test_convert_no_decorate(self, mock_parent_convert):
self.mop = dsl.MuranoObjectParameter(decorate=False)
expected = dsl_types.MuranoObject()
mock_parent_convert.return_value = expected
ret = self.mop.convert('val', 'sender', 'context', 'fs', 'eng')
mock_parent_convert.assert_called_with('val', 'sender', 'context', 'fs', 'eng')
self.assertEqual(expected, ret)
@mock.patch.object(yaqltypes.PythonType, 'convert')
def test_convert_no_decorate_none(self, mock_parent_convert):
self.mop = dsl.MuranoObjectParameter(decorate=False)
mock_parent_convert.return_value = None
ret = self.mop.convert('val', 'sender', 'context', 'fs', 'eng')
mock_parent_convert.assert_called_with('val', 'sender', 'context', 'fs', 'eng')
self.assertIs(None, ret)
@mock.patch.object(yaqltypes.PythonType, 'convert')
def test_convert_no_decorate_non_murano_object(self, mock_parent_convert):
self.mop = dsl.MuranoObjectParameter(decorate=False)
mock_parent_convert.return_value = mock.MagicMock(object='myobject')
ret = self.mop.convert('val', 'sender', 'context', 'fs', 'eng')
mock_parent_convert.assert_called_with('val', 'sender', 'context', 'fs', 'eng')
self.assertIs('myobject', ret)
class TestThisParameter(test_case.DslTestCase):
def setUp(self):
super(TestThisParameter, self).setUp()
self.this_param = dsl.ThisParameter()
@mock.patch.object(dsl, 'get_this', return_value='return')
def test_convert(self, mock_get_this):
ret = self.this_param.convert('val', 'sender', 'ctxt', 'fs', 'eng')
mock_get_this.assert_called_with('ctxt')
self.assertEqual('return', ret)
class TestInterfacesParameter(test_case.DslTestCase):
def setUp(self):
super(TestInterfacesParameter, self).setUp()
self.ifs_param = dsl.InterfacesParameter()
@mock.patch.object(helpers, 'get_this', return_value='gotthis')
@mock.patch.object(dsl, 'Interfaces', return_value='interfaces')
def test_convert(self, mock_interfaces, mock_h_get_this):
ret = self.ifs_param.convert('val', 'sender', 'ctxt', 'fs', 'eng')
mock_interfaces.assert_called_with('gotthis')
self.assertEqual('interfaces', ret)
class TestMuranoTypeParameter(test_case.DslTestCase):
def setUp(self):
super(TestMuranoTypeParameter, self).setUp()
self.mtp = dsl.MuranoTypeParameter()
@mock.patch.object(yaqltypes.PythonType, 'check', return_value=False)
def test_check_fail_super_check(self, mock_parent_check):
val = dsl_types.MuranoTypeReference(mock.MagicMock())
is_check = self.mtp.check(val, 'context', 123, foo='bar')
mock_parent_check.assert_called_with(val, 'context', 123, foo='bar')
self.assertFalse(is_check)
@mock.patch.object(yaqltypes.PythonType, 'check', return_value=True)
def tests_check_value_not_string_not_resolve_strings(
self, mock_parent_check):
self.mtp = dsl.MuranoTypeParameter(resolve_strings=False)
val = 12345
is_check = self.mtp.check(val, 'context')
self.assertFalse(is_check)
| apache-2.0 | -4,086,422,080,617,058,000 | 44.846667 | 87 | 0.67544 | false |
rosalindfdt/huzzahbadge | huzzah/register/adafruit_register/i2c_bcd_alarm.py | 1 | 6391 | # The MIT License (MIT)
#
# Copyright (c) 2016 Scott Shawcroft for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import time
def _bcd2bin(value):
"""Convert binary coded decimal to Binary
Arguments:
value - the BCD value to convert to binary (required, no default)
"""
return value - 6 * (value >> 4)
def _bin2bcd(value):
"""Convert a binary value to binary coded decimal.
Arguments:
value - the binary value to convert to BCD. (required, no default)
"""
return value + 6 * (value // 10)
ALARM_COMPONENT_DISABLED = 0x80
FREQUENCY = ["secondly", "minutely", "hourly", "daily", "weekly", "monthly"]
class BCDAlarmTimeRegister:
"""
Alarm date and time register using binary coded decimal structure.
The byte order of the registers must* be: [second], minute, hour, day,
weekday. Each byte must also have a high enable bit where 1 is disabled and
0 is enabled.
* If weekday_shared is True, then weekday and day share a register.
* If has_seconds is True, then there is a seconds register.
Values are a tuple of (`time.struct_time`, `str`) where the struct represents
a date and time that would alarm. The string is the frequency:
* "secondly", once a second (only if alarm has_seconds)
* "minutely", once a minute when seconds match (if alarm doesn't seconds then when seconds = 0)
* "hourly", once an hour when `tm_min` and `tm_sec` match
* "daily", once a day when `tm_hour`, `tm_min` and `tm_sec` match
* "weekly", once a week when `tm_wday`, `tm_hour`, `tm_min`, `tm_sec` match
* "monthly", once a month when `tm_mday`, `tm_hour`, `tm_min`, `tm_sec` match
:param int register_address: The register address to start the read
:param bool has_seconds: True if the alarm can happen minutely.
:param bool weekday_shared: True if weekday and day share the same register
:param int weekday_start: 0 or 1 depending on the RTC's representation of the first day of the week (Monday)
"""
# Defaults are based on alarm1 of the DS3231.
def __init__(self, register_address, has_seconds=True, weekday_shared=True, weekday_start=1):
buffer_size = 5
if weekday_shared:
buffer_size -= 1
if has_seconds:
buffer_size += 1
self.has_seconds = has_seconds
self.buffer = bytearray(buffer_size)
self.buffer[0] = register_address
self.weekday_shared = weekday_shared
self.weekday_start = weekday_start
def __get__(self, obj, objtype=None):
# Read the alarm register.
with obj.i2c_device:
obj.i2c_device.write(self.buffer, end=1, stop=False)
obj.i2c_device.readinto(self.buffer, start=1)
frequency = None
i = 1
seconds = 0
if self.has_seconds:
if (self.buffer[1] & 0x80) != 0:
frequency = "secondly"
else:
frequency = "minutely"
seconds = _bcd2bin(self.buffer[1] & 0x7f)
i = 2
minute = 0
if (self.buffer[i] & 0x80) == 0:
frequency = "hourly"
minute = _bcd2bin(self.buffer[i] & 0x7f)
hour = 0
if (self.buffer[i + 1] & 0x80) == 0:
frequency = "daily"
hour = _bcd2bin(self.buffer[i + 1] & 0x7f)
mday = None
wday = None
if (self.buffer[i + 2] & 0x80) == 0:
# day of the month
if not self.weekday_shared or (self.buffer[i + 2] & 0x40) == 0:
frequency = "monthly"
mday = _bcd2bin(self.buffer[i + 2] & 0x3f)
else: # weekday
frequency = "weekly"
wday = _bcd2bin(self.buffer[i + 2] & 0x3f) - self.weekday_start
# weekday
if not self.weekday_shared and (self.buffer[i + 3] & 0x80) == 0:
frequency = "monthly"
mday = _bcd2bin(self.buffer[i + 3] & 0x7f)
if mday is not None:
wday = (mday - 2) % 7
elif wday is not None:
mday = wday + 2
else:
# Jan 1, 2017 was a Sunday (6)
wday = 6
mday = 1
return (time.struct_time((2017, 1, mday, hour, minute, seconds, wday, mday, -1)), frequency)
def __set__(self, obj, value):
# Turn all components off by default.
for i in range(len(self.buffer) - 1):
self.buffer[i + 1] = ALARM_COMPONENT_DISABLED
frequency = FREQUENCY.index(value[1])
# i is the index of the minute byte
i = 2 if self.has_seconds else 1
if frequency > 0 and self.has_seconds: # minutely at least
self.buffer[1] = _bin2bcd(value[0].tm_sec)
if frequency > 1: # hourly at least
self.buffer[i] = _bin2bcd(value[0].tm_min)
if frequency > 2: # daily at least
self.buffer[i + 1] = _bin2bcd(value[0].tm_hour)
if value[1] == "weekly":
if self.weekday_shared:
self.buffer[i + 2] = _bin2bcd(value[0].tm_wday + self.weekday_start) | 0x40
else:
self.buffer[i + 3] = _bin2bcd(value[0].tm_wday + self.weekday_start)
elif value[1] == "monthly":
self.buffer[i + 2] = _bin2bcd(value[0].tm_mday)
with obj.i2c_device:
obj.i2c_device.write(self.buffer)
| artistic-2.0 | 7,971,070,526,889,591,000 | 37.733333 | 112 | 0.610703 | false |
dimtruck/magnum | magnum/tests/unit/conductor/tasks/test_heat_tasks.py | 1 | 4943 | # Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from magnum.conductor.tasks import heat_tasks
from magnum.tests import base
import mock
from taskflow import engines
from taskflow.patterns import linear_flow
class HeatTasksTests(base.TestCase):
def setUp(self):
super(HeatTasksTests, self).setUp()
self.heat_client = mock.MagicMock(name='heat_client')
def _get_create_stack_flow(self, heat_client):
flow = linear_flow.Flow("create stack flow")
flow.add(
heat_tasks.CreateStack(
os_client=heat_client,
requires=('stack_name', 'parameters', 'template', 'files'),
provides='new_stack',
),
)
return flow
def _get_update_stack_flow(self, heat_client):
flow = linear_flow.Flow("update stack flow")
flow.add(
heat_tasks.UpdateStack(
os_client=heat_client,
requires=('stack_id', 'parameters', 'template', 'files'),
),
)
return flow
def _get_delete_stack_flow(self, heat_client):
flow = linear_flow.Flow("delete stack flow")
flow.add(
heat_tasks.DeleteStack(
os_client=heat_client,
requires=('stack_id'),
),
)
return flow
def test_create_stack(self):
heat_client = mock.MagicMock(name='heat_client')
stack_id = 'stack_id'
stack_name = 'stack_name'
stack = {
'stack': {
'id': stack_id
}
}
heat_client.stacks.create.return_value = stack
flow_store = {
'stack_name': stack_name,
'parameters': 'parameters',
'template': 'template',
'files': 'files'
}
flow = self._get_create_stack_flow(heat_client)
result = engines.run(flow, store=flow_store)
heat_client.stacks.create.assert_called_once_with(**flow_store)
self.assertEqual(stack_id, result['new_stack']['stack']['id'])
def test_create_stack_with_error(self):
heat_client = mock.MagicMock(name='heat_client')
heat_client.stacks.create.side_effect = ValueError
stack_name = 'stack_name'
flow_store = {
'stack_name': stack_name,
'parameters': 'parameters',
'template': 'template',
'files': 'files'
}
flow = self._get_create_stack_flow(heat_client)
self.assertRaises(ValueError, engines.run, flow, store=flow_store)
def test_update_stack(self):
heat_client = mock.MagicMock(name='heat_client')
stack_id = 'stack_id'
flow_store = {
'stack_id': stack_id,
'parameters': 'parameters',
'template': 'template',
'files': 'files'
}
flow = self._get_update_stack_flow(heat_client)
expected_params = dict(flow_store)
del expected_params['stack_id']
engines.run(flow, store=flow_store)
heat_client.stacks.update.assert_called_once_with(stack_id,
**expected_params)
def test_update_stack_with_error(self):
heat_client = mock.MagicMock(name='heat_client')
heat_client.stacks.update.side_effect = ValueError
stack_id = 'stack_id'
flow_store = {
'stack_id': stack_id,
'parameters': 'parameters',
'template': 'template',
'files': 'files'
}
flow = self._get_update_stack_flow(heat_client)
self.assertRaises(ValueError, engines.run, flow, store=flow_store)
def test_delete_stack(self):
heat_client = mock.MagicMock(name='heat_client')
stack_id = 'stack_id'
flow_store = {'stack_id': stack_id}
flow = self._get_delete_stack_flow(heat_client)
engines.run(flow, store=flow_store)
heat_client.stacks.delete.assert_called_once_with(stack_id)
def test_delete_stack_with_error(self):
heat_client = mock.MagicMock(name='heat_client')
heat_client.stacks.delete.side_effect = ValueError
stack_id = 'stack_id'
flow_store = {'stack_id': stack_id}
flow = self._get_delete_stack_flow(heat_client)
self.assertRaises(ValueError, engines.run, flow, store=flow_store)
| apache-2.0 | 6,042,682,037,558,177,000 | 33.566434 | 76 | 0.588711 | false |
metno/gridpp | tests/neighbourhood_quantile_test.py | 1 | 1596 | from __future__ import print_function
import unittest
import gridpp
import numpy as np
lats = [60, 60, 60, 60, 60, 70]
lons = [10,10.1,10.2,10.3,10.4, 10]
"""Simple check
20 21 22 23 24
15 16 17 18 19
10 11 12 13 nan
5 6 7 nan 9
0 1 2 3 4
"""
values = np.reshape(range(25), [5, 5]).astype(float)
values[1, 3] = np.nan
values[2, 4] = np.nan
values = np.array(values)
class Test(unittest.TestCase):
def test_invalid_arguments(self):
field = np.ones([5, 5])
halfwidth = -1
quantiles = [-0.1, 1.1, np.nan]
for quantile in quantiles:
with self.assertRaises(ValueError) as e:
gridpp.neighbourhood_quantile(field, quantile, halfwidth)
def test_empty_argument(self):
halfwidth = 3
for quantile in [0, 0.5, 1]:
output = gridpp.neighbourhood_quantile([[]], quantile, halfwidth)
self.assertEqual(len(output.shape), 2)
self.assertEqual(output.shape[0], 0)
self.assertEqual(output.shape[1], 0)
def test_missing(self):
"""Checks that missing values are handled correctly"""
empty = np.zeros([5, 5])
empty[0:3, 0:3] = np.nan
output = gridpp.neighbourhood_quantile(empty, 0.5, 1)
self.assertTrue(np.isnan(np.array(output)[0:2,0:2]).all())
def test_quantile(self):
output = np.array(gridpp.neighbourhood_quantile(values, 0.5, 1))
self.assertEqual(output[2][2], 12.5)
self.assertEqual(output[2][3], 13)
self.assertEqual(output[0][4], 4)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 3,947,422,046,611,754,500 | 28.018182 | 77 | 0.600877 | false |
RincewindWizzard/django_digisys | django_digisys/settings.py | 1 | 2561 | """
Django settings for django_digisys project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-$1-^^%upj_+svh@k)tfns5z&dt*)2vnufrlee@347e8!k6j=5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'suit',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'uebungen',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'django_digisys.urls'
WSGI_APPLICATION = 'django_digisys.wsgi.application'
# django suit
SUIT_CONFIG = {
'ADMIN_NAME': 'DigiSys Admin',
'MENU': (
# Keep original label and models
'sites',
{ 'app': 'auth' },
{ 'app': 'uebungen' },
{ 'label': 'Export', 'url': '/digisys/', 'icon': 'icon-hand-right' },
)
}
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP
TEMPLATE_CONTEXT_PROCESSORS = TCP + (
'django.core.context_processors.request',
)
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'de-De'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
DATE_FORMAT = 'j F Y'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| gpl-2.0 | 5,638,799,035,028,847,000 | 23.625 | 77 | 0.696212 | false |
sbuss/TigerShark | tigershark/parsers/M278_4010_X094_27_A1.py | 1 | 129238 | #
# Generated by TigerShark.tools.convertPyX12 on 2012-07-10 16:29:58.981434
#
from tigershark.X12.parse import Message, Loop, Segment, Composite, Element, Properties
parsed_278_HEADER = Loop( u'HEADER', Properties(looptype=u'wrapper',repeat=u'1',pos=u'015',req_sit=u'R',desc=u'Table 1 - Header'),
Segment( u'BHT', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'020',desc=u'Beginning of Hierarchical Transaction'),
Element( u'BHT01', Properties(desc=u'Hierarchical Structure Code', req_sit=u'R', data_type=(u'ID',u'4',u'4'), position=1,
codes=[u'0078'] ) ),
Element( u'BHT02', Properties(desc=u'Transaction Set Purpose Code', req_sit=u'R', data_type=(u'ID',u'2',u'2'), position=2,
codes=[u'11'] ) ),
Element( u'BHT03', Properties(desc=u'Reference Identification', req_sit=u'R', data_type=(u'AN',u'1',u'50'), position=3,
codes=[] ) ),
Element( u'BHT04', Properties(desc=u'Date', req_sit=u'R', data_type=(u'DT',u'8',u'8'), position=4,
codes=[] ) ),
Element( u'BHT05', Properties(desc=u'Time', req_sit=u'R', data_type=(u'TM',u'4',u'8'), position=5,
codes=[] ) ),
Element( u'BHT06', Properties(desc=u'Transaction Type Code', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=6,
codes=[u'18', u'19', u'AT'] ) ),
),
)
parsed_278_2010A = Loop( u'2010A', Properties(looptype='',repeat=u'1',pos=u'170',req_sit=u'R',desc=u'Utilization Management Organization (UMO) Name'),
Segment( u'NM1', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'170',desc=u'Utilization Management Organization (UMO) Name'),
Element( u'NM101', Properties(desc=u'Entity Identifier Code', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'X3'] ) ),
Element( u'NM102', Properties(desc=u'Entity Type Qualifier', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=2,
codes=[u'1', u'2'] ) ),
Element( u'NM103', Properties(desc=u'Name Last or Organization Name', req_sit=u'S', data_type=(u'AN',u'1',u'60'), position=3,
codes=[] ) ),
Element( u'NM104', Properties(desc=u'Name First', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=4,
codes=[] ) ),
Element( u'NM105', Properties(desc=u'Name Middle', req_sit=u'S', data_type=(u'AN',u'1',u'25'), position=5,
codes=[] ) ),
Element( u'NM106', Properties(desc=u'Name Prefix', req_sit=u'N', data_type=(u'AN',u'1',u'10'), position=6,
codes=[] ) ),
Element( u'NM107', Properties(desc=u'Name Suffix', req_sit=u'S', data_type=(u'AN',u'1',u'10'), position=7,
codes=[] ) ),
Element( u'NM108', Properties(desc=u'Identification Code Qualifier', req_sit=u'R', data_type=(u'ID',u'1',u'2'), position=8,
codes=[u'24', u'34', u'46', u'PI', u'XV', u'XX'] ) ),
Element( u'NM109', Properties(desc=u'Identification Code', req_sit=u'R', data_type=(u'AN',u'2',u'80'), position=9,
codes=[] ) ),
Element( u'NM110', Properties(desc=u'Entity Relationship Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=10,
codes=[] ) ),
Element( u'NM111', Properties(desc=u'Entity Identifier Code', req_sit=u'N', data_type=(u'ID',u'2',u'3'), position=11,
codes=[] ) ),
),
Segment( u'PER', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'220',desc=u'Utilization Management Organization (UMO) Contact Information'),
Element( u'PER01', Properties(desc=u'Contact Function Code', req_sit=u'R', data_type=(u'ID',u'2',u'2'), position=1,
codes=[u'IC'] ) ),
Element( u'PER02', Properties(desc=u'Name', req_sit=u'S', data_type=(u'AN',u'1',u'60'), position=2,
codes=[] ) ),
Element( u'PER03', Properties(desc=u'Communication Number Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=3,
codes=[u'EM', u'FX', u'TE'] ) ),
Element( u'PER04', Properties(desc=u'Communication Number', req_sit=u'S', data_type=(u'AN',u'1',u'256'), position=4,
codes=[] ) ),
Element( u'PER05', Properties(desc=u'Communication Number Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=5,
codes=[u'EM', u'EX', u'FX', u'TE'] ) ),
Element( u'PER06', Properties(desc=u'Communication Number', req_sit=u'S', data_type=(u'AN',u'1',u'256'), position=6,
codes=[] ) ),
Element( u'PER07', Properties(desc=u'Communication Number Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=7,
codes=[u'EM', u'EX', u'FX', u'TE'] ) ),
Element( u'PER08', Properties(desc=u'Communication Number', req_sit=u'S', data_type=(u'AN',u'1',u'256'), position=8,
codes=[] ) ),
Element( u'PER09', Properties(desc=u'Contact Inquiry Reference', req_sit=u'N', data_type=(u'AN',u'1',u'20'), position=9,
codes=[] ) ),
),
Segment( u'AAA', Properties(syntax='',req_sit=u'S',repeat=u'9',pos=u'230',desc=u'Utilization Management Organization (UMO) Request Validation'),
Element( u'AAA01', Properties(desc=u'Yes/No Condition or Response Code', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=1,
codes=[u'N', u'Y'] ) ),
Element( u'AAA02', Properties(desc=u'Agency Qualifier Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=2,
codes=[] ) ),
Element( u'AAA03', Properties(desc=u'Reject Reason Code', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=3,
codes=[u'04', u'41', u'42', u'79', u'80', u'T4'] ) ),
Element( u'AAA04', Properties(desc=u'Follow-up Action Code', req_sit=u'S', data_type=(u'ID',u'1',u'1'), position=4,
codes=[u'N', u'P', u'Y'] ) ),
),
)
parsed_278_2010B = Loop( u'2010B', Properties(looptype='',repeat=u'1',pos=u'170',req_sit=u'R',desc=u'Requester Name'),
Segment( u'NM1', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'170',desc=u'Requester Name'),
Element( u'NM101', Properties(desc=u'Entity Identifier Code', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'1P', u'FA'] ) ),
Element( u'NM102', Properties(desc=u'Entity Type Qualifier', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=2,
codes=[u'1', u'2'] ) ),
Element( u'NM103', Properties(desc=u'Name Last or Organization Name', req_sit=u'S', data_type=(u'AN',u'1',u'60'), position=3,
codes=[] ) ),
Element( u'NM104', Properties(desc=u'Name First', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=4,
codes=[] ) ),
Element( u'NM105', Properties(desc=u'Name Middle', req_sit=u'S', data_type=(u'AN',u'1',u'25'), position=5,
codes=[] ) ),
Element( u'NM106', Properties(desc=u'Name Prefix', req_sit=u'N', data_type=(u'AN',u'1',u'10'), position=6,
codes=[] ) ),
Element( u'NM107', Properties(desc=u'Name Suffix', req_sit=u'S', data_type=(u'AN',u'1',u'10'), position=7,
codes=[] ) ),
Element( u'NM108', Properties(desc=u'Identification Code Qualifier', req_sit=u'R', data_type=(u'ID',u'1',u'2'), position=8,
codes=[u'24', u'34', u'46', u'XX'] ) ),
Element( u'NM109', Properties(desc=u'Identification Code', req_sit=u'R', data_type=(u'AN',u'2',u'80'), position=9,
codes=[] ) ),
Element( u'NM110', Properties(desc=u'Entity Relationship Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=10,
codes=[] ) ),
Element( u'NM111', Properties(desc=u'Entity Identifier Code', req_sit=u'N', data_type=(u'ID',u'2',u'3'), position=11,
codes=[] ) ),
),
Segment( u'REF', Properties(syntax='',req_sit=u'S',repeat=u'8',pos=u'180',desc=u'Requester Supplemental Identification'),
Element( u'REF01', Properties(desc=u'Reference Identification Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'1G', u'1J', u'CT', u'EI', u'N5', u'N7', u'SY', u'ZH'] ) ),
Element( u'REF02', Properties(desc=u'Reference Identification', req_sit=u'R', data_type=(u'AN',u'1',u'50'), position=2,
codes=[] ) ),
Element( u'REF03', Properties(desc=u'Description', req_sit=u'N', data_type=(u'AN',u'1',u'80'), position=3,
codes=[] ) ),
Composite( u'C040', Properties(req_sit=u'N',refdes='',seq=u'04',desc=u'Reference Identifier'),
),
),
Segment( u'AAA', Properties(syntax='',req_sit=u'S',repeat=u'9',pos=u'230',desc=u'Requester Request Validation'),
Element( u'AAA01', Properties(desc=u'Yes/No Condition or Response Code', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=1,
codes=[u'N', u'Y'] ) ),
Element( u'AAA02', Properties(desc=u'Agency Qualifier Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=2,
codes=[] ) ),
Element( u'AAA03', Properties(desc=u'Reject Reason Code', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=3,
codes=[u'35', u'41', u'43', u'44', u'45', u'46', u'47', u'49', u'50', u'51', u'79', u'97'] ) ),
Element( u'AAA04', Properties(desc=u'Follow-up Action Code', req_sit=u'S', data_type=(u'ID',u'1',u'1'), position=4,
codes=[u'C', u'N', u'R'] ) ),
),
Segment( u'PRV', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'240',desc=u'Requester Provider Information'),
Element( u'PRV01', Properties(desc=u'Provider Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=1,
codes=[u'AD', u'AS', u'AT', u'CO', u'CV', u'OP', u'OR', u'OT', u'PC', u'PE', u'RF'] ) ),
Element( u'PRV02', Properties(desc=u'Reference Identification Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'ZZ'] ) ),
Element( u'PRV03', Properties(desc=u'Reference Identification', req_sit=u'R', data_type=(u'AN',u'1',u'50'), position=3,
codes=[] ) ),
Element( u'PRV04', Properties(desc=u'State or Province Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=4,
codes=[] ) ),
Composite( u'C035', Properties(req_sit=u'N',refdes='',seq=u'05',desc=u'Provider Specialty Information'),
),
Element( u'PRV06', Properties(desc=u'Provider Organization Code', req_sit=u'N', data_type=(u'ID',u'3',u'3'), position=6,
codes=[] ) ),
),
)
parsed_278_2010CA = Loop( u'2010CA', Properties(looptype='',repeat=u'1',pos=u'170',req_sit=u'R',desc=u'Subscriber Name'),
Segment( u'NM1', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'170',desc=u'Subscriber Name'),
Element( u'NM101', Properties(desc=u'Entity Identifier Code', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'IL'] ) ),
Element( u'NM102', Properties(desc=u'Entity Type Qualifier', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=2,
codes=[u'1'] ) ),
Element( u'NM103', Properties(desc=u'Name Last or Organization Name', req_sit=u'S', data_type=(u'AN',u'1',u'60'), position=3,
codes=[] ) ),
Element( u'NM104', Properties(desc=u'Name First', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=4,
codes=[] ) ),
Element( u'NM105', Properties(desc=u'Name Middle', req_sit=u'S', data_type=(u'AN',u'1',u'25'), position=5,
codes=[] ) ),
Element( u'NM106', Properties(desc=u'Name Prefix', req_sit=u'N', data_type=(u'AN',u'1',u'10'), position=6,
codes=[] ) ),
Element( u'NM107', Properties(desc=u'Name Suffix', req_sit=u'S', data_type=(u'AN',u'1',u'10'), position=7,
codes=[] ) ),
Element( u'NM108', Properties(desc=u'Identification Code Qualifier', req_sit=u'R', data_type=(u'ID',u'1',u'2'), position=8,
codes=[u'MI', u'ZZ'] ) ),
Element( u'NM109', Properties(desc=u'Identification Code', req_sit=u'R', data_type=(u'AN',u'2',u'80'), position=9,
codes=[] ) ),
Element( u'NM110', Properties(desc=u'Entity Relationship Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=10,
codes=[] ) ),
Element( u'NM111', Properties(desc=u'Entity Identifier Code', req_sit=u'N', data_type=(u'ID',u'2',u'3'), position=11,
codes=[] ) ),
),
Segment( u'REF', Properties(syntax='',req_sit=u'S',repeat=u'9',pos=u'180',desc=u'Subscriber Supplemental Identification'),
Element( u'REF01', Properties(desc=u'Reference Identification Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'1L', u'1W', u'6P', u'A6', u'EJ', u'F6', u'HJ', u'IG', u'N6', u'NQ', u'SY'] ) ),
Element( u'REF02', Properties(desc=u'Reference Identification', req_sit=u'R', data_type=(u'AN',u'1',u'50'), position=2,
codes=[] ) ),
Element( u'REF03', Properties(desc=u'Description', req_sit=u'N', data_type=(u'AN',u'1',u'80'), position=3,
codes=[] ) ),
Composite( u'C040', Properties(req_sit=u'N',refdes='',seq=u'04',desc=u'Reference Identifier'),
),
),
Segment( u'AAA', Properties(syntax='',req_sit=u'S',repeat=u'9',pos=u'230',desc=u'Subscriber Request Validation'),
Element( u'AAA01', Properties(desc=u'Yes/No Condition or Response Code', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=1,
codes=[u'N', u'Y'] ) ),
Element( u'AAA02', Properties(desc=u'Agency Qualifier Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=2,
codes=[] ) ),
Element( u'AAA03', Properties(desc=u'Reject Reason Code', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=3,
codes=[u'15', u'58', u'64', u'65', u'66', u'67', u'68', u'71', u'72', u'73', u'74', u'75', u'76', u'77', u'78', u'79', u'95'] ) ),
Element( u'AAA04', Properties(desc=u'Follow-up Action Code', req_sit=u'S', data_type=(u'ID',u'1',u'1'), position=4,
codes=[u'C', u'N'] ) ),
),
Segment( u'DMG', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'250',desc=u'Subscriber Demographic Information'),
Element( u'DMG01', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'D8'] ) ),
Element( u'DMG02', Properties(desc=u'Date Time Period', req_sit=u'R', data_type=(u'AN',u'1',u'35'), position=2,
codes=[] ) ),
Element( u'DMG03', Properties(desc=u'Gender Code', req_sit=u'S', data_type=(u'ID',u'1',u'1'), position=3,
codes=[u'F', u'M', u'U'] ) ),
Element( u'DMG04', Properties(desc=u'Marital Status Code', req_sit=u'N', data_type=(u'ID',u'1',u'1'), position=4,
codes=[] ) ),
Element( u'DMG05', Properties(desc=u'Race or Ethnicity Code', req_sit=u'N', data_type=(u'ID',u'1',u'1'), position=5,
codes=[] ) ),
Element( u'DMG06', Properties(desc=u'Citizenship Status Code', req_sit=u'N', data_type=(u'ID',u'1',u'2'), position=6,
codes=[] ) ),
Element( u'DMG07', Properties(desc=u'Country Code', req_sit=u'N', data_type=(u'ID',u'2',u'3'), position=7,
codes=[] ) ),
Element( u'DMG08', Properties(desc=u'Basis of Verification Code', req_sit=u'N', data_type=(u'ID',u'1',u'2'), position=8,
codes=[] ) ),
Element( u'DMG09', Properties(desc=u'Quantity', req_sit=u'N', data_type=(u'R',u'1',u'15'), position=9,
codes=[] ) ),
),
)
parsed_278_2010CB = Loop( u'2010CB', Properties(looptype='',repeat=u'1',pos=u'170',req_sit=u'S',desc=u'Additional Patient Information Contact Name'),
Segment( u'NM1', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'170',desc=u'Additional Patient Information Contact Name'),
Element( u'NM101', Properties(desc=u'Entity Identifier Code', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'1P', u'2B', u'ABG', u'FA', u'PR', u'X3'] ) ),
Element( u'NM102', Properties(desc=u'Entity Type Qualifier', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=2,
codes=[u'1', u'2'] ) ),
Element( u'NM103', Properties(desc=u'Name Last or Organization Name', req_sit=u'S', data_type=(u'AN',u'1',u'60'), position=3,
codes=[] ) ),
Element( u'NM104', Properties(desc=u'Name First', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=4,
codes=[] ) ),
Element( u'NM105', Properties(desc=u'Name Middle', req_sit=u'S', data_type=(u'AN',u'1',u'25'), position=5,
codes=[] ) ),
Element( u'NM106', Properties(desc=u'Name Prefix', req_sit=u'N', data_type=(u'AN',u'1',u'10'), position=6,
codes=[] ) ),
Element( u'NM107', Properties(desc=u'Name Suffix', req_sit=u'S', data_type=(u'AN',u'1',u'10'), position=7,
codes=[] ) ),
Element( u'NM108', Properties(desc=u'Identification Code Qualifier', req_sit=u'S', data_type=(u'ID',u'1',u'2'), position=8,
codes=[u'24', u'34', u'46', u'PI', u'XV', u'XX'] ) ),
Element( u'NM109', Properties(desc=u'Identification Code', req_sit=u'S', data_type=(u'AN',u'2',u'80'), position=9,
codes=[] ) ),
Element( u'NM110', Properties(desc=u'Entity Relationship Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=10,
codes=[] ) ),
Element( u'NM111', Properties(desc=u'Entity Identifier Code', req_sit=u'N', data_type=(u'ID',u'2',u'3'), position=11,
codes=[] ) ),
),
Segment( u'N3', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'200',desc=u'Additional Patient Information Contact Address'),
Element( u'N301', Properties(desc=u'Address Information', req_sit=u'R', data_type=(u'AN',u'1',u'55'), position=1,
codes=[] ) ),
Element( u'N302', Properties(desc=u'Address Information', req_sit=u'S', data_type=(u'AN',u'1',u'55'), position=2,
codes=[] ) ),
),
Segment( u'N4', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'210',desc=u'Additional Patient Information Contact City/State/Zip Code'),
Element( u'N401', Properties(desc=u'City Name', req_sit=u'S', data_type=(u'AN',u'2',u'30'), position=1,
codes=[] ) ),
Element( u'N402', Properties(desc=u'State or Province Code', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=2,
codes=[] ) ),
Element( u'N403', Properties(desc=u'Postal Code', req_sit=u'S', data_type=(u'ID',u'3',u'15'), position=3,
codes=[] ) ),
Element( u'N404', Properties(desc=u'Country Code', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=4,
codes=[] ) ),
Element( u'N405', Properties(desc=u'Location Qualifier', req_sit=u'S', data_type=(u'ID',u'1',u'2'), position=5,
codes=[u'B1', u'DP'] ) ),
Element( u'N406', Properties(desc=u'Location Identifier', req_sit=u'S', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Segment( u'PER', Properties(syntax='',req_sit=u'S',repeat=u'3',pos=u'220',desc=u'Additional Patient Information Contact Information'),
Element( u'PER01', Properties(desc=u'Contact Function Code', req_sit=u'R', data_type=(u'ID',u'2',u'2'), position=1,
codes=[u'IC'] ) ),
Element( u'PER02', Properties(desc=u'Name', req_sit=u'S', data_type=(u'AN',u'1',u'60'), position=2,
codes=[] ) ),
Element( u'PER03', Properties(desc=u'Communication Number Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=3,
codes=[u'EM', u'FX', u'TE'] ) ),
Element( u'PER04', Properties(desc=u'Communication Number', req_sit=u'S', data_type=(u'AN',u'1',u'256'), position=4,
codes=[] ) ),
Element( u'PER05', Properties(desc=u'Communication Number Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=5,
codes=[u'EM', u'EX', u'FX', u'TE'] ) ),
Element( u'PER06', Properties(desc=u'Communication Number', req_sit=u'S', data_type=(u'AN',u'1',u'256'), position=6,
codes=[] ) ),
Element( u'PER07', Properties(desc=u'Communication Number Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=7,
codes=[u'EM', u'EX', u'FX', u'TE'] ) ),
Element( u'PER08', Properties(desc=u'Communication Number', req_sit=u'S', data_type=(u'AN',u'1',u'256'), position=8,
codes=[] ) ),
Element( u'PER09', Properties(desc=u'Contact Inquiry Reference', req_sit=u'N', data_type=(u'AN',u'1',u'20'), position=9,
codes=[] ) ),
),
)
parsed_278_2010DA = Loop( u'2010DA', Properties(looptype='',repeat=u'1',pos=u'170',req_sit=u'R',desc=u'Dependent Name'),
Segment( u'NM1', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'170',desc=u'Dependent Name'),
Element( u'NM101', Properties(desc=u'Entity Identifier Code', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'QC'] ) ),
Element( u'NM102', Properties(desc=u'Entity Type Qualifier', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=2,
codes=[u'1'] ) ),
Element( u'NM103', Properties(desc=u'Name Last or Organization Name', req_sit=u'S', data_type=(u'AN',u'1',u'60'), position=3,
codes=[] ) ),
Element( u'NM104', Properties(desc=u'Name First', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=4,
codes=[] ) ),
Element( u'NM105', Properties(desc=u'Name Middle', req_sit=u'S', data_type=(u'AN',u'1',u'25'), position=5,
codes=[] ) ),
Element( u'NM106', Properties(desc=u'Name Prefix', req_sit=u'N', data_type=(u'AN',u'1',u'10'), position=6,
codes=[] ) ),
Element( u'NM107', Properties(desc=u'Name Suffix', req_sit=u'S', data_type=(u'AN',u'1',u'10'), position=7,
codes=[] ) ),
Element( u'NM108', Properties(desc=u'Identification Code Qualifier', req_sit=u'S', data_type=(u'ID',u'1',u'2'), position=8,
codes=[u'MI', u'ZZ'] ) ),
Element( u'NM109', Properties(desc=u'Identification Code', req_sit=u'S', data_type=(u'AN',u'2',u'80'), position=9,
codes=[] ) ),
Element( u'NM110', Properties(desc=u'Entity Relationship Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=10,
codes=[] ) ),
Element( u'NM111', Properties(desc=u'Entity Identifier Code', req_sit=u'N', data_type=(u'ID',u'2',u'3'), position=11,
codes=[] ) ),
),
Segment( u'REF', Properties(syntax='',req_sit=u'S',repeat=u'3',pos=u'180',desc=u'Dependent Supplemental Identification'),
Element( u'REF01', Properties(desc=u'Reference Identification Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'A6', u'EJ', u'SY'] ) ),
Element( u'REF02', Properties(desc=u'Reference Identification', req_sit=u'R', data_type=(u'AN',u'1',u'50'), position=2,
codes=[] ) ),
Element( u'REF03', Properties(desc=u'Description', req_sit=u'N', data_type=(u'AN',u'1',u'80'), position=3,
codes=[] ) ),
Composite( u'C040', Properties(req_sit=u'N',refdes='',seq=u'04',desc=u'Reference Identifier'),
),
),
Segment( u'AAA', Properties(syntax='',req_sit=u'S',repeat=u'9',pos=u'230',desc=u'Dependent Request Validation'),
Element( u'AAA01', Properties(desc=u'Yes/No Condition or Response Code', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=1,
codes=[u'N', u'Y'] ) ),
Element( u'AAA02', Properties(desc=u'Agency Qualifier Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=2,
codes=[] ) ),
Element( u'AAA03', Properties(desc=u'Reject Reason Code', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=3,
codes=[u'15', u'33', u'58', u'64', u'65', u'66', u'67', u'68', u'71', u'77', u'95'] ) ),
Element( u'AAA04', Properties(desc=u'Follow-up Action Code', req_sit=u'S', data_type=(u'ID',u'1',u'1'), position=4,
codes=[u'C', u'N'] ) ),
),
Segment( u'DMG', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'250',desc=u'Dependent Demographic Information'),
Element( u'DMG01', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'D8'] ) ),
Element( u'DMG02', Properties(desc=u'Date Time Period', req_sit=u'R', data_type=(u'AN',u'1',u'35'), position=2,
codes=[] ) ),
Element( u'DMG03', Properties(desc=u'Gender Code', req_sit=u'S', data_type=(u'ID',u'1',u'1'), position=3,
codes=[u'F', u'M', u'U'] ) ),
Element( u'DMG04', Properties(desc=u'Marital Status Code', req_sit=u'N', data_type=(u'ID',u'1',u'1'), position=4,
codes=[] ) ),
Element( u'DMG05', Properties(desc=u'Race or Ethnicity Code', req_sit=u'N', data_type=(u'ID',u'1',u'1'), position=5,
codes=[] ) ),
Element( u'DMG06', Properties(desc=u'Citizenship Status Code', req_sit=u'N', data_type=(u'ID',u'1',u'2'), position=6,
codes=[] ) ),
Element( u'DMG07', Properties(desc=u'Country Code', req_sit=u'N', data_type=(u'ID',u'2',u'3'), position=7,
codes=[] ) ),
Element( u'DMG08', Properties(desc=u'Basis of Verification Code', req_sit=u'N', data_type=(u'ID',u'1',u'2'), position=8,
codes=[] ) ),
Element( u'DMG09', Properties(desc=u'Quantity', req_sit=u'N', data_type=(u'R',u'1',u'15'), position=9,
codes=[] ) ),
),
Segment( u'INS', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'260',desc=u'Dependent Relationship'),
Element( u'INS01', Properties(desc=u'Yes/No Condition or Response Code', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=1,
codes=[u'N'] ) ),
Element( u'INS02', Properties(desc=u'Individual Relationship Code', req_sit=u'R', data_type=(u'ID',u'2',u'2'), position=2,
codes=[u'01', u'04', u'05', u'07', u'09', u'10', u'15', u'17', u'19', u'20', u'21', u'22', u'23', u'24', u'29', u'32', u'33', u'34', u'39', u'40', u'41', u'43', u'53', u'G8'] ) ),
Element( u'INS03', Properties(desc=u'Maintenance Type Code', req_sit=u'N', data_type=(u'ID',u'3',u'3'), position=3,
codes=[] ) ),
Element( u'INS04', Properties(desc=u'Maintenance Reason Code', req_sit=u'N', data_type=(u'ID',u'2',u'3'), position=4,
codes=[] ) ),
Element( u'INS05', Properties(desc=u'Benefit Status Code', req_sit=u'N', data_type=(u'ID',u'1',u'1'), position=5,
codes=[] ) ),
Element( u'INS06', Properties(desc=u'Medicare Plan Code', req_sit=u'N', data_type=(u'ID',u'1',u'1'), position=6,
codes=[] ) ),
Element( u'INS07', Properties(desc=u'Consolidated Omnibus Budget Reconciliation Act (COBRA) Qualifying', req_sit=u'N', data_type=(u'ID',u'1',u'2'), position=7,
codes=[] ) ),
Element( u'INS08', Properties(desc=u'Employment Status Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=8,
codes=[] ) ),
Element( u'INS09', Properties(desc=u'Student Status Code', req_sit=u'N', data_type=(u'ID',u'1',u'1'), position=9,
codes=[] ) ),
Element( u'INS10', Properties(desc=u'Yes/No Condition or Response Code', req_sit=u'N', data_type=(u'ID',u'1',u'1'), position=10,
codes=[] ) ),
Element( u'INS11', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'N', data_type=(u'ID',u'2',u'3'), position=11,
codes=[] ) ),
Element( u'INS12', Properties(desc=u'Date Time Period', req_sit=u'N', data_type=(u'AN',u'1',u'35'), position=12,
codes=[] ) ),
Element( u'INS13', Properties(desc=u'Confidentiality Code', req_sit=u'N', data_type=(u'ID',u'1',u'1'), position=13,
codes=[] ) ),
Element( u'INS14', Properties(desc=u'City Name', req_sit=u'N', data_type=(u'AN',u'2',u'30'), position=14,
codes=[] ) ),
Element( u'INS15', Properties(desc=u'State or Province Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=15,
codes=[] ) ),
Element( u'INS16', Properties(desc=u'Country Code', req_sit=u'N', data_type=(u'ID',u'2',u'3'), position=16,
codes=[] ) ),
Element( u'INS17', Properties(desc=u'Number', req_sit=u'S', data_type=(u'N0',u'1',u'9'), position=17,
codes=[] ) ),
),
)
parsed_278_2010DB = Loop( u'2010DB', Properties(looptype='',repeat=u'1',pos=u'170',req_sit=u'S',desc=u'Additional Patient Information Contact Name'),
Segment( u'NM1', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'170',desc=u'Additional Patient Information Contact Name'),
Element( u'NM101', Properties(desc=u'Entity Identifier Code', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'1P', u'2B', u'ABG', u'FA', u'PR', u'X3'] ) ),
Element( u'NM102', Properties(desc=u'Entity Type Qualifier', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=2,
codes=[u'1', u'2'] ) ),
Element( u'NM103', Properties(desc=u'Name Last or Organization Name', req_sit=u'S', data_type=(u'AN',u'1',u'60'), position=3,
codes=[] ) ),
Element( u'NM104', Properties(desc=u'Name First', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=4,
codes=[] ) ),
Element( u'NM105', Properties(desc=u'Name Middle', req_sit=u'S', data_type=(u'AN',u'1',u'25'), position=5,
codes=[] ) ),
Element( u'NM106', Properties(desc=u'Name Prefix', req_sit=u'N', data_type=(u'AN',u'1',u'10'), position=6,
codes=[] ) ),
Element( u'NM107', Properties(desc=u'Name Suffix', req_sit=u'S', data_type=(u'AN',u'1',u'10'), position=7,
codes=[] ) ),
Element( u'NM108', Properties(desc=u'Identification Code Qualifier', req_sit=u'S', data_type=(u'ID',u'1',u'2'), position=8,
codes=[u'24', u'34', u'46', u'PI', u'XV', u'XX'] ) ),
Element( u'NM109', Properties(desc=u'Identification Code', req_sit=u'S', data_type=(u'AN',u'2',u'80'), position=9,
codes=[] ) ),
Element( u'NM110', Properties(desc=u'Entity Relationship Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=10,
codes=[] ) ),
Element( u'NM111', Properties(desc=u'Entity Identifier Code', req_sit=u'N', data_type=(u'ID',u'2',u'3'), position=11,
codes=[] ) ),
),
Segment( u'N3', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'200',desc=u'Additional Patient Information Contact Address'),
Element( u'N301', Properties(desc=u'Address Information', req_sit=u'R', data_type=(u'AN',u'1',u'55'), position=1,
codes=[] ) ),
Element( u'N302', Properties(desc=u'Address Information', req_sit=u'S', data_type=(u'AN',u'1',u'55'), position=2,
codes=[] ) ),
),
Segment( u'N4', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'210',desc=u'Additional Patient Information Contact City/State/Zip Code'),
Element( u'N401', Properties(desc=u'City Name', req_sit=u'S', data_type=(u'AN',u'2',u'30'), position=1,
codes=[] ) ),
Element( u'N402', Properties(desc=u'State or Province Code', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=2,
codes=[] ) ),
Element( u'N403', Properties(desc=u'Postal Code', req_sit=u'S', data_type=(u'ID',u'3',u'15'), position=3,
codes=[] ) ),
Element( u'N404', Properties(desc=u'Country Code', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=4,
codes=[] ) ),
Element( u'N405', Properties(desc=u'Location Qualifier', req_sit=u'S', data_type=(u'ID',u'1',u'2'), position=5,
codes=[u'B1', u'DP'] ) ),
Element( u'N406', Properties(desc=u'Location Identifier', req_sit=u'S', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Segment( u'PER', Properties(syntax='',req_sit=u'S',repeat=u'3',pos=u'220',desc=u'Additional Patient Information Contact Information'),
Element( u'PER01', Properties(desc=u'Contact Function Code', req_sit=u'R', data_type=(u'ID',u'2',u'2'), position=1,
codes=[u'IC'] ) ),
Element( u'PER02', Properties(desc=u'Name', req_sit=u'S', data_type=(u'AN',u'1',u'60'), position=2,
codes=[] ) ),
Element( u'PER03', Properties(desc=u'Communication Number Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=3,
codes=[u'EM', u'FX', u'TE'] ) ),
Element( u'PER04', Properties(desc=u'Communication Number', req_sit=u'S', data_type=(u'AN',u'1',u'256'), position=4,
codes=[] ) ),
Element( u'PER05', Properties(desc=u'Communication Number Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=5,
codes=[u'EM', u'EX', u'FX', u'TE'] ) ),
Element( u'PER06', Properties(desc=u'Communication Number', req_sit=u'S', data_type=(u'AN',u'1',u'256'), position=6,
codes=[] ) ),
Element( u'PER07', Properties(desc=u'Communication Number Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=7,
codes=[u'EM', u'EX', u'FX', u'TE'] ) ),
Element( u'PER08', Properties(desc=u'Communication Number', req_sit=u'S', data_type=(u'AN',u'1',u'256'), position=8,
codes=[] ) ),
Element( u'PER09', Properties(desc=u'Contact Inquiry Reference', req_sit=u'N', data_type=(u'AN',u'1',u'20'), position=9,
codes=[] ) ),
),
)
parsed_278_2010E = Loop( u'2010E', Properties(looptype='',repeat=u'3',pos=u'170',req_sit=u'R',desc=u'Service Provider Name'),
Segment( u'NM1', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'170',desc=u'Service Provider Name'),
Element( u'NM101', Properties(desc=u'Entity Identifier Code', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'1T', u'FA', u'SJ'] ) ),
Element( u'NM102', Properties(desc=u'Entity Type Qualifier', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=2,
codes=[u'1', u'2'] ) ),
Element( u'NM103', Properties(desc=u'Name Last or Organization Name', req_sit=u'S', data_type=(u'AN',u'1',u'60'), position=3,
codes=[] ) ),
Element( u'NM104', Properties(desc=u'Name First', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=4,
codes=[] ) ),
Element( u'NM105', Properties(desc=u'Name Middle', req_sit=u'S', data_type=(u'AN',u'1',u'25'), position=5,
codes=[] ) ),
Element( u'NM106', Properties(desc=u'Name Prefix', req_sit=u'N', data_type=(u'AN',u'1',u'10'), position=6,
codes=[] ) ),
Element( u'NM107', Properties(desc=u'Name Suffix', req_sit=u'S', data_type=(u'AN',u'1',u'10'), position=7,
codes=[] ) ),
Element( u'NM108', Properties(desc=u'Identification Code Qualifier', req_sit=u'S', data_type=(u'ID',u'1',u'2'), position=8,
codes=[u'24', u'34', u'46', u'XX'] ) ),
Element( u'NM109', Properties(desc=u'Identification Code', req_sit=u'S', data_type=(u'AN',u'2',u'80'), position=9,
codes=[] ) ),
Element( u'NM110', Properties(desc=u'Entity Relationship Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=10,
codes=[] ) ),
Element( u'NM111', Properties(desc=u'Entity Identifier Code', req_sit=u'N', data_type=(u'ID',u'2',u'3'), position=11,
codes=[] ) ),
),
Segment( u'REF', Properties(syntax='',req_sit=u'S',repeat=u'7',pos=u'180',desc=u'Service Provider Supplemental Identification'),
Element( u'REF01', Properties(desc=u'Reference Identification Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'1G', u'1J', u'EI', u'N5', u'N7', u'SY', u'ZH'] ) ),
Element( u'REF02', Properties(desc=u'Reference Identification', req_sit=u'R', data_type=(u'AN',u'1',u'50'), position=2,
codes=[] ) ),
Element( u'REF03', Properties(desc=u'Description', req_sit=u'N', data_type=(u'AN',u'1',u'80'), position=3,
codes=[] ) ),
Composite( u'C040', Properties(req_sit=u'N',refdes='',seq=u'04',desc=u'Reference Identifier'),
),
),
Segment( u'N3', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'200',desc=u'Service Provider Address'),
Element( u'N301', Properties(desc=u'Address Information', req_sit=u'R', data_type=(u'AN',u'1',u'55'), position=1,
codes=[] ) ),
Element( u'N302', Properties(desc=u'Address Information', req_sit=u'S', data_type=(u'AN',u'1',u'55'), position=2,
codes=[] ) ),
),
Segment( u'N4', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'210',desc=u'Service Provider City/State/ZIP Code'),
Element( u'N401', Properties(desc=u'City Name', req_sit=u'S', data_type=(u'AN',u'2',u'30'), position=1,
codes=[] ) ),
Element( u'N402', Properties(desc=u'State or Province Code', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=2,
codes=[] ) ),
Element( u'N403', Properties(desc=u'Postal Code', req_sit=u'S', data_type=(u'ID',u'3',u'15'), position=3,
codes=[] ) ),
Element( u'N404', Properties(desc=u'Country Code', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=4,
codes=[] ) ),
Element( u'N405', Properties(desc=u'Location Qualifier', req_sit=u'N', data_type=(u'ID',u'1',u'2'), position=5,
codes=[] ) ),
Element( u'N406', Properties(desc=u'Location Identifier', req_sit=u'N', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Segment( u'PER', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'220',desc=u'Service Provider Contact Information'),
Element( u'PER01', Properties(desc=u'Contact Function Code', req_sit=u'R', data_type=(u'ID',u'2',u'2'), position=1,
codes=[u'IC'] ) ),
Element( u'PER02', Properties(desc=u'Name', req_sit=u'S', data_type=(u'AN',u'1',u'60'), position=2,
codes=[] ) ),
Element( u'PER03', Properties(desc=u'Communication Number Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=3,
codes=[u'EM', u'FX', u'TE'] ) ),
Element( u'PER04', Properties(desc=u'Communication Number', req_sit=u'S', data_type=(u'AN',u'1',u'256'), position=4,
codes=[] ) ),
Element( u'PER05', Properties(desc=u'Communication Number Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=5,
codes=[u'EM', u'EX', u'FX', u'TE'] ) ),
Element( u'PER06', Properties(desc=u'Communication Number', req_sit=u'S', data_type=(u'AN',u'1',u'256'), position=6,
codes=[] ) ),
Element( u'PER07', Properties(desc=u'Communication Number Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=7,
codes=[u'EM', u'EX', u'FX', u'TE'] ) ),
Element( u'PER08', Properties(desc=u'Communication Number', req_sit=u'S', data_type=(u'AN',u'1',u'256'), position=8,
codes=[] ) ),
Element( u'PER09', Properties(desc=u'Contact Inquiry Reference', req_sit=u'N', data_type=(u'AN',u'1',u'20'), position=9,
codes=[] ) ),
),
Segment( u'AAA', Properties(syntax='',req_sit=u'S',repeat=u'9',pos=u'230',desc=u'Service Provider Request Validation'),
Element( u'AAA01', Properties(desc=u'Yes/No Condition or Response Code', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=1,
codes=[u'N', u'Y'] ) ),
Element( u'AAA02', Properties(desc=u'Agency Qualifier Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=2,
codes=[] ) ),
Element( u'AAA03', Properties(desc=u'Reject Reason Code', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=3,
codes=[u'15', u'33', u'35', u'41', u'43', u'44', u'45', u'46', u'47', u'49', u'51', u'52', u'79', u'97'] ) ),
Element( u'AAA04', Properties(desc=u'Follow-up Action Code', req_sit=u'S', data_type=(u'ID',u'1',u'1'), position=4,
codes=[u'C', u'N'] ) ),
),
Segment( u'PRV', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'240',desc=u'Service Provider Information'),
Element( u'PRV01', Properties(desc=u'Provider Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=1,
codes=[u'AD', u'AS', u'AT', u'CO', u'CV', u'OP', u'OR', u'OT', u'PC', u'PE'] ) ),
Element( u'PRV02', Properties(desc=u'Reference Identification Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'ZZ'] ) ),
Element( u'PRV03', Properties(desc=u'Reference Identification', req_sit=u'R', data_type=(u'AN',u'1',u'50'), position=3,
codes=[] ) ),
Element( u'PRV04', Properties(desc=u'State or Province Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=4,
codes=[] ) ),
Composite( u'C035', Properties(req_sit=u'N',refdes='',seq=u'05',desc=u'Provider Specialty Information'),
),
Element( u'PRV06', Properties(desc=u'Provider Organization Code', req_sit=u'N', data_type=(u'ID',u'3',u'3'), position=6,
codes=[] ) ),
),
)
parsed_278_2010F = Loop( u'2010F', Properties(looptype='',repeat=u'1',pos=u'170',req_sit=u'S',desc=u'Additional Service Information Contact Name'),
Segment( u'NM1', Properties(syntax='',req_sit=u'S',repeat=u'>1',pos=u'170',desc=u'Additional Service Information Contact Name'),
Element( u'NM101', Properties(desc=u'Entity Identifier Code', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'1P', u'2B', u'ABG', u'FA', u'PR', u'X3'] ) ),
Element( u'NM102', Properties(desc=u'Entity Type Qualifier', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=2,
codes=[u'1', u'2'] ) ),
Element( u'NM103', Properties(desc=u'Name Last or Organization Name', req_sit=u'S', data_type=(u'AN',u'1',u'60'), position=3,
codes=[] ) ),
Element( u'NM104', Properties(desc=u'Name First', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=4,
codes=[] ) ),
Element( u'NM105', Properties(desc=u'Name Middle', req_sit=u'S', data_type=(u'AN',u'1',u'25'), position=5,
codes=[] ) ),
Element( u'NM106', Properties(desc=u'Name Prefix', req_sit=u'N', data_type=(u'AN',u'1',u'10'), position=6,
codes=[] ) ),
Element( u'NM107', Properties(desc=u'Name Suffix', req_sit=u'S', data_type=(u'AN',u'1',u'10'), position=7,
codes=[] ) ),
Element( u'NM108', Properties(desc=u'Identification Code Qualifier', req_sit=u'S', data_type=(u'ID',u'1',u'2'), position=8,
codes=[u'24', u'34', u'46', u'PI', u'XV', u'XX'] ) ),
Element( u'NM109', Properties(desc=u'Identification Code', req_sit=u'S', data_type=(u'AN',u'2',u'80'), position=9,
codes=[] ) ),
Element( u'NM110', Properties(desc=u'Entity Relationship Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=10,
codes=[] ) ),
Element( u'NM111', Properties(desc=u'Entity Identifier Code', req_sit=u'N', data_type=(u'ID',u'2',u'3'), position=11,
codes=[] ) ),
),
Segment( u'N3', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'200',desc=u'Additional Service Information Contact Address'),
Element( u'N301', Properties(desc=u'Address Information', req_sit=u'R', data_type=(u'AN',u'1',u'55'), position=1,
codes=[] ) ),
Element( u'N302', Properties(desc=u'Address Information', req_sit=u'S', data_type=(u'AN',u'1',u'55'), position=2,
codes=[] ) ),
),
Segment( u'N4', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'210',desc=u'Additional Service Information Contact City/State/Zip Code'),
Element( u'N401', Properties(desc=u'City Name', req_sit=u'S', data_type=(u'AN',u'2',u'30'), position=1,
codes=[] ) ),
Element( u'N402', Properties(desc=u'State or Province Code', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=2,
codes=[] ) ),
Element( u'N403', Properties(desc=u'Postal Code', req_sit=u'S', data_type=(u'ID',u'3',u'15'), position=3,
codes=[] ) ),
Element( u'N404', Properties(desc=u'Country Code', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=4,
codes=[] ) ),
Element( u'N405', Properties(desc=u'Location Qualifier', req_sit=u'S', data_type=(u'ID',u'1',u'2'), position=5,
codes=[u'B1', u'DP'] ) ),
Element( u'N406', Properties(desc=u'Location Identifier', req_sit=u'S', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Segment( u'PER', Properties(syntax='',req_sit=u'S',repeat=u'3',pos=u'220',desc=u'Additional Service Information Contact Information'),
Element( u'PER01', Properties(desc=u'Contact Function Code', req_sit=u'R', data_type=(u'ID',u'2',u'2'), position=1,
codes=[u'IC'] ) ),
Element( u'PER02', Properties(desc=u'Name', req_sit=u'S', data_type=(u'AN',u'1',u'60'), position=2,
codes=[] ) ),
Element( u'PER03', Properties(desc=u'Communication Number Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=3,
codes=[u'EM', u'FX', u'TE'] ) ),
Element( u'PER04', Properties(desc=u'Communication Number', req_sit=u'S', data_type=(u'AN',u'1',u'256'), position=4,
codes=[] ) ),
Element( u'PER05', Properties(desc=u'Communication Number Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=5,
codes=[u'EM', u'EX', u'FX', u'TE'] ) ),
Element( u'PER06', Properties(desc=u'Communication Number', req_sit=u'S', data_type=(u'AN',u'1',u'256'), position=6,
codes=[] ) ),
Element( u'PER07', Properties(desc=u'Communication Number Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=7,
codes=[u'EM', u'EX', u'FX', u'TE'] ) ),
Element( u'PER08', Properties(desc=u'Communication Number', req_sit=u'S', data_type=(u'AN',u'1',u'256'), position=8,
codes=[] ) ),
Element( u'PER09', Properties(desc=u'Contact Inquiry Reference', req_sit=u'N', data_type=(u'AN',u'1',u'20'), position=9,
codes=[] ) ),
),
)
parsed_278_2000F = Loop( u'2000F', Properties(looptype='',repeat=u'>1',pos=u'180',req_sit=u'R',desc=u'Service Level'),
Segment( u'HL', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'010',desc=u'Service Level'),
Element( u'HL01', Properties(desc=u'Hierarchical ID Number', req_sit=u'R', data_type=(u'AN',u'1',u'12'), position=1,
codes=[] ) ),
Element( u'HL02', Properties(desc=u'Hierarchical Parent ID Number', req_sit=u'R', data_type=(u'AN',u'1',u'12'), position=2,
codes=[] ) ),
Element( u'HL03', Properties(desc=u'Hierarchical Level Code', req_sit=u'R', data_type=(u'ID',u'1',u'2'), position=3,
codes=[u'SS'] ) ),
Element( u'HL04', Properties(desc=u'Hierarchical Child Code', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=4,
codes=[u'0'] ) ),
),
Segment( u'TRN', Properties(syntax='',req_sit=u'S',repeat=u'3',pos=u'020',desc=u'Service Trace Number'),
Element( u'TRN01', Properties(desc=u'Trace Type Code', req_sit=u'R', data_type=(u'ID',u'1',u'2'), position=1,
codes=[u'1', u'2'] ) ),
Element( u'TRN02', Properties(desc=u'Reference Identification', req_sit=u'R', data_type=(u'AN',u'1',u'50'), position=2,
codes=[] ) ),
Element( u'TRN03', Properties(desc=u'Originating Company Identifier', req_sit=u'R', data_type=(u'AN',u'10',u'10'), position=3,
codes=[] ) ),
Element( u'TRN04', Properties(desc=u'Reference Identification', req_sit=u'S', data_type=(u'AN',u'1',u'50'), position=4,
codes=[] ) ),
),
Segment( u'AAA', Properties(syntax='',req_sit=u'S',repeat=u'9',pos=u'030',desc=u'Service Request Validation'),
Element( u'AAA01', Properties(desc=u'Yes/No Condition or Response Code', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=1,
codes=[u'N', u'Y'] ) ),
Element( u'AAA02', Properties(desc=u'Agency Qualifier Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=2,
codes=[] ) ),
Element( u'AAA03', Properties(desc=u'Reject Reason Code', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=3,
codes=[u'15', u'33', u'52', u'57', u'60', u'61', u'62', u'T5'] ) ),
Element( u'AAA04', Properties(desc=u'Follow-up Action Code', req_sit=u'S', data_type=(u'ID',u'1',u'1'), position=4,
codes=[u'C', u'N'] ) ),
),
Segment( u'UM', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'040',desc=u'Health Care Services Review Information'),
Element( u'UM01', Properties(desc=u'Request Category Code', req_sit=u'R', data_type=(u'ID',u'1',u'2'), position=1,
codes=[u'AR', u'HS', u'SC'] ) ),
Element( u'UM02', Properties(desc=u'Certification Type Code', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=2,
codes=[u'1', u'2', u'3', u'4', u'I', u'R', u'S'] ) ),
Element( u'UM03', Properties(desc=u'Service Type Code', req_sit=u'S', data_type=(u'ID',u'1',u'2'), position=3,
codes=[u'1', u'2', u'3', u'4', u'5', u'6', u'7', u'8', u'12', u'14', u'15', u'16', u'17', u'18', u'20', u'21', u'23', u'24', u'25', u'26', u'27', u'28', u'33', u'34', u'35', u'36', u'37', u'38', u'39', u'40', u'42', u'44', u'45', u'46', u'48', u'50', u'51', u'52', u'53', u'54', u'56', u'57', u'58', u'59', u'61', u'62', u'63', u'64', u'65', u'67', u'68', u'69', u'70', u'71', u'72', u'73', u'74', u'75', u'76', u'77', u'78', u'79', u'80', u'82', u'83', u'84', u'85', u'86', u'93', u'94', u'95', u'98', u'99', u'A0', u'A1', u'A2', u'A3', u'A4', u'A6', u'A7', u'A8', u'A9', u'AB', u'AC', u'AD', u'AE', u'AF', u'AG', u'AI', u'AJ', u'AK', u'AL', u'AR', u'BB', u'BC', u'BD', u'BE', u'BF', u'BG', u'BS'] ) ),
Composite( u'C023', Properties(req_sit=u'S',refdes='',seq=u'04',desc=u'Health Care Service Location Information'),
Element( u'UM04-01', Properties(desc=u'Facility Code Value', req_sit=u'R', data_type=(u'AN',u'1',u'2'), position=0,
codes=[] ) ),
Element( u'UM04-02', Properties(desc=u'Facility Code Qualifier', req_sit=u'R', data_type=(u'ID',u'1',u'2'), position=1,
codes=[u'A', u'B'] ) ),
Element( u'UM04-03', Properties(desc=u'Claim Frequency Type Code', req_sit=u'N', data_type=(u'ID',u'1',u'1'), position=2,
codes=[] ) ),
),
Composite( u'C024', Properties(req_sit=u'N',refdes='',seq=u'05',desc=u'Related Causes Information'),
),
Element( u'UM06', Properties(desc=u'Level of Service Code', req_sit=u'S', data_type=(u'ID',u'1',u'3'), position=6,
codes=[u'03', u'U'] ) ),
Element( u'UM07', Properties(desc=u'Current Health Condition Code', req_sit=u'N', data_type=(u'ID',u'1',u'1'), position=7,
codes=[] ) ),
Element( u'UM08', Properties(desc=u'Prognosis Code', req_sit=u'N', data_type=(u'ID',u'1',u'1'), position=8,
codes=[] ) ),
Element( u'UM09', Properties(desc=u'Release of Information Code', req_sit=u'N', data_type=(u'ID',u'1',u'1'), position=9,
codes=[] ) ),
Element( u'UM10', Properties(desc=u'Delay Reason Code', req_sit=u'N', data_type=(u'ID',u'1',u'2'), position=10,
codes=[] ) ),
),
Segment( u'HCR', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'050',desc=u'Health Care Services Review'),
Element( u'HCR01', Properties(desc=u'Action Code', req_sit=u'R', data_type=(u'ID',u'1',u'2'), position=1,
codes=[u'A1', u'A3', u'A4', u'A6', u'CT', u'NA'] ) ),
Element( u'HCR02', Properties(desc=u'Reference Identification', req_sit=u'S', data_type=(u'AN',u'1',u'50'), position=2,
codes=[] ) ),
Element( u'HCR03', Properties(desc=u'Reject Reason Code', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=3,
codes=[u'35', u'36', u'37', u'41', u'53', u'69', u'70', u'82', u'83', u'84', u'85', u'86', u'87', u'88', u'89', u'90', u'91', u'92', u'96', u'98', u'E8'] ) ),
Element( u'HCR04', Properties(desc=u'Yes/No Condition or Response Code', req_sit=u'S', data_type=(u'ID',u'1',u'1'), position=4,
codes=[u'N', u'Y'] ) ),
),
Segment( u'REF', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'060',desc=u'Previous Certification Identification'),
Element( u'REF01', Properties(desc=u'Reference Identification Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'BB'] ) ),
Element( u'REF02', Properties(desc=u'Reference Identification', req_sit=u'R', data_type=(u'AN',u'1',u'50'), position=2,
codes=[] ) ),
Element( u'REF03', Properties(desc=u'Description', req_sit=u'N', data_type=(u'AN',u'1',u'80'), position=3,
codes=[] ) ),
Composite( u'C040', Properties(req_sit=u'N',refdes='',seq=u'04',desc=u'Reference Identifier'),
),
),
Segment( u'DTP', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'070',desc=u'Service Date'),
Element( u'DTP01', Properties(desc=u'Date/Time Qualifier', req_sit=u'R', data_type=(u'ID',u'3',u'3'), position=1,
codes=[u'472'] ) ),
Element( u'DTP02', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8', u'RD8'] ) ),
Element( u'DTP03', Properties(desc=u'Date Time Period', req_sit=u'R', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
),
Segment( u'DTP', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'070',desc=u'Admission Date'),
Element( u'DTP01', Properties(desc=u'Date/Time Qualifier', req_sit=u'R', data_type=(u'ID',u'3',u'3'), position=1,
codes=[u'435'] ) ),
Element( u'DTP02', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8', u'RD8'] ) ),
Element( u'DTP03', Properties(desc=u'Date Time Period', req_sit=u'R', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
),
Segment( u'DTP', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'070',desc=u'Discharge Date'),
Element( u'DTP01', Properties(desc=u'Date/Time Qualifier', req_sit=u'R', data_type=(u'ID',u'3',u'3'), position=1,
codes=[u'096'] ) ),
Element( u'DTP02', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'DTP03', Properties(desc=u'Date Time Period', req_sit=u'R', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
),
Segment( u'DTP', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'070',desc=u'Surgery Date'),
Element( u'DTP01', Properties(desc=u'Date/Time Qualifier', req_sit=u'R', data_type=(u'ID',u'3',u'3'), position=1,
codes=[u'456'] ) ),
Element( u'DTP02', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'DTP03', Properties(desc=u'Date Time Period', req_sit=u'R', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
),
Segment( u'DTP', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'070',desc=u'Certification Issue Date'),
Element( u'DTP01', Properties(desc=u'Date/Time Qualifier', req_sit=u'R', data_type=(u'ID',u'3',u'3'), position=1,
codes=[u'102'] ) ),
Element( u'DTP02', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'DTP03', Properties(desc=u'Date Time Period', req_sit=u'R', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
),
Segment( u'DTP', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'070',desc=u'Certification Expiration Date'),
Element( u'DTP01', Properties(desc=u'Date/Time Qualifier', req_sit=u'R', data_type=(u'ID',u'3',u'3'), position=1,
codes=[u'036'] ) ),
Element( u'DTP02', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'DTP03', Properties(desc=u'Date Time Period', req_sit=u'R', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
),
Segment( u'DTP', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'070',desc=u'Certification Effective Date'),
Element( u'DTP01', Properties(desc=u'Date/Time Qualifier', req_sit=u'R', data_type=(u'ID',u'3',u'3'), position=1,
codes=[u'007'] ) ),
Element( u'DTP02', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8', u'RD8'] ) ),
Element( u'DTP03', Properties(desc=u'Date Time Period', req_sit=u'R', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
),
Segment( u'HI', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'080',desc=u'Procedures'),
Composite( u'C022', Properties(req_sit=u'R',refdes='',seq=u'01',desc=u'Procedure Code 1'),
Element( u'HI01-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'ABR', u'BO', u'BQ', u'JP', u'LOI', u'NDC', u'ZZ'] ) ),
Element( u'HI01-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI01-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8', u'RD8'] ) ),
Element( u'HI01-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI01-05', Properties(desc=u'Monetary Amount', req_sit=u'S', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI01-06', Properties(desc=u'Quantity', req_sit=u'S', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI01-07', Properties(desc=u'Version Identifier', req_sit=u'S', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'02',desc=u'Procedure Code 2'),
Element( u'HI02-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'ABR', u'BO', u'BQ', u'JP', u'LOI', u'NDC', u'ZZ'] ) ),
Element( u'HI02-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI02-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8', u'RD8'] ) ),
Element( u'HI02-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI02-05', Properties(desc=u'Monetary Amount', req_sit=u'S', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI02-06', Properties(desc=u'Quantity', req_sit=u'S', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI02-07', Properties(desc=u'Version Identifier', req_sit=u'S', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'03',desc=u'Procedure Code 3'),
Element( u'HI03-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'ABR', u'BO', u'BQ', u'JP', u'LOI', u'NDC', u'ZZ'] ) ),
Element( u'HI03-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI03-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8', u'RD8'] ) ),
Element( u'HI03-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI03-05', Properties(desc=u'Monetary Amount', req_sit=u'S', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI03-06', Properties(desc=u'Quantity', req_sit=u'S', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI03-07', Properties(desc=u'Version Identifier', req_sit=u'S', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'04',desc=u'Procedure Code 4'),
Element( u'HI04-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'ABR', u'BO', u'BQ', u'JP', u'LOI', u'NDC', u'ZZ'] ) ),
Element( u'HI04-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI04-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8', u'RD8'] ) ),
Element( u'HI04-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI04-05', Properties(desc=u'Monetary Amount', req_sit=u'S', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI04-06', Properties(desc=u'Quantity', req_sit=u'S', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI04-07', Properties(desc=u'Version Identifier', req_sit=u'S', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'05',desc=u'Procedure Code 5'),
Element( u'HI05-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'ABR', u'BO', u'BQ', u'JP', u'LOI', u'NDC', u'ZZ'] ) ),
Element( u'HI05-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI05-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8', u'RD8'] ) ),
Element( u'HI05-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI05-05', Properties(desc=u'Monetary Amount', req_sit=u'S', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI05-06', Properties(desc=u'Quantity', req_sit=u'S', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI05-07', Properties(desc=u'Version Identifier', req_sit=u'S', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'06',desc=u'Procedure Code 6'),
Element( u'HI06-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'ABR', u'BO', u'BQ', u'JP', u'LOI', u'NDC', u'ZZ'] ) ),
Element( u'HI06-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI06-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8', u'RD8'] ) ),
Element( u'HI06-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI06-05', Properties(desc=u'Monetary Amount', req_sit=u'S', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI06-06', Properties(desc=u'Quantity', req_sit=u'S', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI06-07', Properties(desc=u'Version Identifier', req_sit=u'S', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'07',desc=u'Procedure Code 7'),
Element( u'HI07-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'ABR', u'BO', u'BQ', u'JP', u'LOI', u'NDC', u'ZZ'] ) ),
Element( u'HI07-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI07-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8', u'RD8'] ) ),
Element( u'HI07-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI07-05', Properties(desc=u'Monetary Amount', req_sit=u'S', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI07-06', Properties(desc=u'Quantity', req_sit=u'S', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI07-07', Properties(desc=u'Version Identifier', req_sit=u'S', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'08',desc=u'Procedure Code 8'),
Element( u'HI08-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'ABR', u'BO', u'BQ', u'JP', u'LOI', u'NDC', u'ZZ'] ) ),
Element( u'HI08-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI08-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8', u'RD8'] ) ),
Element( u'HI08-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI08-05', Properties(desc=u'Monetary Amount', req_sit=u'S', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI08-06', Properties(desc=u'Quantity', req_sit=u'S', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI08-07', Properties(desc=u'Version Identifier', req_sit=u'S', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'09',desc=u'Procedure Code 9'),
Element( u'HI09-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'ABR', u'BO', u'BQ', u'JP', u'LOI', u'NDC', u'ZZ'] ) ),
Element( u'HI09-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI09-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8', u'RD8'] ) ),
Element( u'HI09-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI09-05', Properties(desc=u'Monetary Amount', req_sit=u'S', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI09-06', Properties(desc=u'Quantity', req_sit=u'S', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI09-07', Properties(desc=u'Version Identifier', req_sit=u'S', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'10',desc=u'Procedure Code 10'),
Element( u'HI10-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'ABR', u'BO', u'BQ', u'JP', u'LOI', u'NDC', u'ZZ'] ) ),
Element( u'HI10-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI10-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8', u'RD8'] ) ),
Element( u'HI10-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI10-05', Properties(desc=u'Monetary Amount', req_sit=u'S', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI10-06', Properties(desc=u'Quantity', req_sit=u'S', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI10-07', Properties(desc=u'Version Identifier', req_sit=u'S', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'11',desc=u'Procedure Code 11'),
Element( u'HI11-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'ABR', u'BO', u'BQ', u'JP', u'LOI', u'NDC', u'ZZ'] ) ),
Element( u'HI11-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI11-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8', u'RD8'] ) ),
Element( u'HI11-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI11-05', Properties(desc=u'Monetary Amount', req_sit=u'S', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI11-06', Properties(desc=u'Quantity', req_sit=u'S', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI11-07', Properties(desc=u'Version Identifier', req_sit=u'S', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'12',desc=u'Procedure Code 12'),
Element( u'HI12-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'ABR', u'BO', u'BQ', u'JP', u'LOI', u'NDC', u'ZZ'] ) ),
Element( u'HI12-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI12-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8', u'RD8'] ) ),
Element( u'HI12-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI12-05', Properties(desc=u'Monetary Amount', req_sit=u'S', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI12-06', Properties(desc=u'Quantity', req_sit=u'S', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI12-07', Properties(desc=u'Version Identifier', req_sit=u'S', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
),
Segment( u'HSD', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'090',desc=u'Health Care Services Delivery'),
Element( u'HSD01', Properties(desc=u'Quantity Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=1,
codes=[u'DY', u'FL', u'HS', u'MN', u'VS'] ) ),
Element( u'HSD02', Properties(desc=u'Quantity', req_sit=u'S', data_type=(u'R',u'1',u'15'), position=2,
codes=[] ) ),
Element( u'HSD03', Properties(desc=u'Unit or Basis for Measurement Code', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=3,
codes=[u'DA', u'MO', u'WK'] ) ),
Element( u'HSD04', Properties(desc=u'Sample Selection Modulus', req_sit=u'S', data_type=(u'R',u'1',u'6'), position=4,
codes=[] ) ),
Element( u'HSD05', Properties(desc=u'Time Period Qualifier', req_sit=u'S', data_type=(u'ID',u'1',u'2'), position=5,
codes=[u'6', u'7', u'21', u'26', u'27', u'34', u'35'] ) ),
Element( u'HSD06', Properties(desc=u'Number of Periods', req_sit=u'S', data_type=(u'N0',u'1',u'3'), position=6,
codes=[] ) ),
Element( u'HSD07', Properties(desc=u'Ship/Delivery or Calendar Pattern Code', req_sit=u'S', data_type=(u'ID',u'1',u'2'), position=7,
codes=[u'1', u'2', u'3', u'4', u'5', u'6', u'7', u'8', u'9', u'A', u'B', u'C', u'D', u'E', u'F', u'G', u'H', u'J', u'K', u'L', u'M', u'N', u'O', u'P', u'Q', u'R', u'S', u'SA', u'SB', u'SC', u'SD', u'SG', u'SL', u'SP', u'SX', u'SY', u'SZ', u'T', u'U', u'V', u'W', u'X', u'Y'] ) ),
Element( u'HSD08', Properties(desc=u'Ship/Delivery Pattern Time Code', req_sit=u'S', data_type=(u'ID',u'1',u'1'), position=8,
codes=[u'A', u'B', u'C', u'D', u'E', u'F', u'G', u'Y'] ) ),
),
Segment( u'CL1', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'110',desc=u'Institutional Claim Code'),
Element( u'CL101', Properties(desc=u'Admission Type Code', req_sit=u'S', data_type=(u'ID',u'1',u'1'), position=1,
codes=[] ) ),
Element( u'CL102', Properties(desc=u'Admission Source Code', req_sit=u'S', data_type=(u'ID',u'1',u'1'), position=2,
codes=[] ) ),
Element( u'CL103', Properties(desc=u'Patient Status Code', req_sit=u'S', data_type=(u'ID',u'1',u'2'), position=3,
codes=[] ) ),
Element( u'CL104', Properties(desc=u'Nursing Home Residential Status Code', req_sit=u'S', data_type=(u'ID',u'1',u'1'), position=4,
codes=[u'1', u'2', u'3', u'4', u'5', u'6', u'7', u'8', u'9'] ) ),
),
Segment( u'CR1', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'120',desc=u'Ambulance Transport Information'),
Element( u'CR101', Properties(desc=u'Unit or Basis for Measurement Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=1,
codes=[] ) ),
Element( u'CR102', Properties(desc=u'Weight', req_sit=u'N', data_type=(u'R',u'1',u'10'), position=2,
codes=[] ) ),
Element( u'CR103', Properties(desc=u'Ambulance Transport Code', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=3,
codes=[u'I', u'R', u'T', u'X'] ) ),
Element( u'CR104', Properties(desc=u'Ambulance Transport Reason Code', req_sit=u'N', data_type=(u'ID',u'1',u'1'), position=4,
codes=[] ) ),
Element( u'CR105', Properties(desc=u'Unit or Basis for Measurement Code', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=5,
codes=[u'DH', u'DK'] ) ),
Element( u'CR106', Properties(desc=u'Quantity', req_sit=u'S', data_type=(u'R',u'1',u'15'), position=6,
codes=[] ) ),
Element( u'CR107', Properties(desc=u'Address Information', req_sit=u'S', data_type=(u'AN',u'1',u'55'), position=7,
codes=[] ) ),
Element( u'CR108', Properties(desc=u'Address Information', req_sit=u'S', data_type=(u'AN',u'1',u'55'), position=8,
codes=[] ) ),
Element( u'CR109', Properties(desc=u'Description', req_sit=u'N', data_type=(u'AN',u'1',u'80'), position=9,
codes=[] ) ),
Element( u'CR110', Properties(desc=u'Description', req_sit=u'N', data_type=(u'AN',u'1',u'80'), position=10,
codes=[] ) ),
),
Segment( u'CR2', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'130',desc=u'Spinal Manipulation Service Information'),
Element( u'CR201', Properties(desc=u'Count', req_sit=u'S', data_type=(u'N0',u'1',u'9'), position=1,
codes=[] ) ),
Element( u'CR202', Properties(desc=u'Quantity', req_sit=u'S', data_type=(u'R',u'1',u'15'), position=2,
codes=[] ) ),
Element( u'CR203', Properties(desc=u'Subluxation Level Code', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=3,
codes=[u'C1', u'C2', u'C3', u'C4', u'C5', u'C6', u'C7', u'CO', u'IL', u'L1', u'L2', u'L3', u'L4', u'L5', u'OC', u'SA', u'T1', u'T10', u'T11', u'T12', u'T2', u'T3', u'T4', u'T5', u'T6', u'T7', u'T8', u'T9'] ) ),
Element( u'CR204', Properties(desc=u'Subluxation Level Code', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=4,
codes=[u'C1', u'C2', u'C3', u'C4', u'C5', u'C6', u'C7', u'CO', u'IL', u'L1', u'L2', u'L3', u'L4', u'L5', u'OC', u'SA', u'T1', u'T10', u'T11', u'T12', u'T2', u'T3', u'T4', u'T5', u'T6', u'T7', u'T8', u'T9'] ) ),
Element( u'CR205', Properties(desc=u'Unit or Basis for Measurement Code', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=5,
codes=[u'DA', u'MO', u'WK', u'YR'] ) ),
Element( u'CR206', Properties(desc=u'Quantity', req_sit=u'S', data_type=(u'R',u'1',u'15'), position=6,
codes=[] ) ),
Element( u'CR207', Properties(desc=u'Quantity', req_sit=u'S', data_type=(u'R',u'1',u'15'), position=7,
codes=[] ) ),
Element( u'CR208', Properties(desc=u'Nature of Condition Code', req_sit=u'N', data_type=(u'ID',u'1',u'1'), position=8,
codes=[] ) ),
Element( u'CR209', Properties(desc=u'Yes/No Condition or Response Code', req_sit=u'N', data_type=(u'ID',u'1',u'1'), position=9,
codes=[] ) ),
Element( u'CR210', Properties(desc=u'Description', req_sit=u'N', data_type=(u'AN',u'1',u'80'), position=10,
codes=[] ) ),
Element( u'CR211', Properties(desc=u'Description', req_sit=u'N', data_type=(u'AN',u'1',u'80'), position=11,
codes=[] ) ),
Element( u'CR212', Properties(desc=u'Yes/No Condition or Response Code', req_sit=u'N', data_type=(u'ID',u'1',u'1'), position=12,
codes=[] ) ),
),
Segment( u'CR5', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'140',desc=u'Home Oxygen Therapy Information'),
Element( u'CR501', Properties(desc=u'Certification Type Code', req_sit=u'N', data_type=(u'ID',u'1',u'1'), position=1,
codes=[] ) ),
Element( u'CR502', Properties(desc=u'Quantity', req_sit=u'N', data_type=(u'R',u'1',u'15'), position=2,
codes=[] ) ),
Element( u'CR503', Properties(desc=u'Oxygen Equipment Type Code', req_sit=u'S', data_type=(u'ID',u'1',u'1'), position=3,
codes=[u'A', u'B', u'C', u'D', u'E', u'O'] ) ),
Element( u'CR504', Properties(desc=u'Oxygen Equipment Type Code', req_sit=u'S', data_type=(u'ID',u'1',u'1'), position=4,
codes=[u'A', u'B', u'C', u'D', u'E', u'O'] ) ),
Element( u'CR505', Properties(desc=u'Description', req_sit=u'S', data_type=(u'AN',u'1',u'80'), position=5,
codes=[] ) ),
Element( u'CR506', Properties(desc=u'Quantity', req_sit=u'R', data_type=(u'R',u'1',u'15'), position=6,
codes=[] ) ),
Element( u'CR507', Properties(desc=u'Quantity', req_sit=u'S', data_type=(u'R',u'1',u'15'), position=7,
codes=[] ) ),
Element( u'CR508', Properties(desc=u'Quantity', req_sit=u'S', data_type=(u'R',u'1',u'15'), position=8,
codes=[] ) ),
Element( u'CR509', Properties(desc=u'Description', req_sit=u'S', data_type=(u'AN',u'1',u'80'), position=9,
codes=[] ) ),
Element( u'CR510', Properties(desc=u'Quantity', req_sit=u'N', data_type=(u'R',u'1',u'15'), position=10,
codes=[] ) ),
Element( u'CR511', Properties(desc=u'Quantity', req_sit=u'N', data_type=(u'R',u'1',u'15'), position=11,
codes=[] ) ),
Element( u'CR512', Properties(desc=u'Oxygen Test Condition Code', req_sit=u'N', data_type=(u'ID',u'1',u'1'), position=12,
codes=[] ) ),
Element( u'CR513', Properties(desc=u'Oxygen Test Findings Code', req_sit=u'N', data_type=(u'ID',u'1',u'1'), position=13,
codes=[] ) ),
Element( u'CR514', Properties(desc=u'Oxygen Test Findings Code', req_sit=u'N', data_type=(u'ID',u'1',u'1'), position=14,
codes=[] ) ),
Element( u'CR515', Properties(desc=u'Oxygen Test Findings Code', req_sit=u'N', data_type=(u'ID',u'1',u'1'), position=15,
codes=[] ) ),
Element( u'CR516', Properties(desc=u'Quantity', req_sit=u'S', data_type=(u'R',u'1',u'15'), position=16,
codes=[] ) ),
Element( u'CR517', Properties(desc=u'Oxygen Delivery System Code', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=17,
codes=[u'A', u'B', u'C', u'D', u'E'] ) ),
Element( u'CR518', Properties(desc=u'Oxygen Equipment Type Code', req_sit=u'S', data_type=(u'ID',u'1',u'1'), position=18,
codes=[u'A', u'B', u'C', u'D', u'E', u'O'] ) ),
),
Segment( u'CR6', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'150',desc=u'Home Health Care Information'),
Element( u'CR601', Properties(desc=u'Prognosis Code', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=1,
codes=[u'1', u'2', u'3', u'4', u'5', u'6', u'7', u'8'] ) ),
Element( u'CR602', Properties(desc=u'Date', req_sit=u'R', data_type=(u'DT',u'8',u'8'), position=2,
codes=[] ) ),
Element( u'CR603', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=3,
codes=[u'RD8'] ) ),
Element( u'CR604', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=4,
codes=[] ) ),
Element( u'CR605', Properties(desc=u'Date', req_sit=u'N', data_type=(u'DT',u'8',u'8'), position=5,
codes=[] ) ),
Element( u'CR606', Properties(desc=u'Yes/No Condition or Response Code', req_sit=u'N', data_type=(u'ID',u'1',u'1'), position=6,
codes=[] ) ),
Element( u'CR607', Properties(desc=u'Yes/No Condition or Response Code', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=7,
codes=[u'N', u'U', u'Y'] ) ),
Element( u'CR608', Properties(desc=u'Certification Type Code', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=8,
codes=[u'1', u'2', u'3', u'4', u'I', u'R', u'S'] ) ),
Element( u'CR609', Properties(desc=u'Date', req_sit=u'N', data_type=(u'DT',u'8',u'8'), position=9,
codes=[] ) ),
Element( u'CR610', Properties(desc=u'Product/Service ID Qualifier', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=10,
codes=[] ) ),
Element( u'CR611', Properties(desc=u'Medical Code Value', req_sit=u'N', data_type=(u'AN',u'1',u'15'), position=11,
codes=[] ) ),
Element( u'CR612', Properties(desc=u'Date', req_sit=u'N', data_type=(u'DT',u'8',u'8'), position=12,
codes=[] ) ),
Element( u'CR613', Properties(desc=u'Date', req_sit=u'N', data_type=(u'DT',u'8',u'8'), position=13,
codes=[] ) ),
Element( u'CR614', Properties(desc=u'Date', req_sit=u'N', data_type=(u'DT',u'8',u'8'), position=14,
codes=[] ) ),
Element( u'CR615', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'N', data_type=(u'ID',u'2',u'3'), position=15,
codes=[] ) ),
Element( u'CR616', Properties(desc=u'Date Time Period', req_sit=u'N', data_type=(u'AN',u'1',u'35'), position=16,
codes=[] ) ),
Element( u'CR617', Properties(desc=u'Patient Location Code', req_sit=u'N', data_type=(u'ID',u'1',u'1'), position=17,
codes=[] ) ),
Element( u'CR618', Properties(desc=u'Date', req_sit=u'N', data_type=(u'DT',u'8',u'8'), position=18,
codes=[] ) ),
Element( u'CR619', Properties(desc=u'Date', req_sit=u'N', data_type=(u'DT',u'8',u'8'), position=19,
codes=[] ) ),
Element( u'CR620', Properties(desc=u'Date', req_sit=u'N', data_type=(u'DT',u'8',u'8'), position=20,
codes=[] ) ),
Element( u'CR621', Properties(desc=u'Date', req_sit=u'N', data_type=(u'DT',u'8',u'8'), position=21,
codes=[] ) ),
),
Segment( u'PWK', Properties(syntax='',req_sit=u'S',repeat=u'10',pos=u'155',desc=u'Additional Service Information'),
Element( u'PWK01', Properties(desc=u'Report Type Code', req_sit=u'R', data_type=(u'ID',u'2',u'2'), position=1,
codes=[u'03', u'04', u'05', u'06', u'07', u'08', u'09', u'10', u'11', u'13', u'15', u'21', u'48', u'55', u'59', u'77', u'A3', u'A4', u'AM', u'AS', u'AT', u'B2', u'B3', u'BR', u'BS', u'BT', u'CB', u'CK', u'D2', u'DA', u'DB', u'DG', u'DJ', u'DS', u'FM', u'HC', u'HR', u'I5', u'IR', u'LA', u'M1', u'NN', u'OB', u'OC', u'OD', u'OE', u'OX', u'P4', u'P5', u'P6', u'P7', u'PE', u'PN', u'PO', u'PQ', u'PY', u'PZ', u'QC', u'QR', u'RB', u'RR', u'RT', u'RX', u'SG', u'V5', u'XP'] ) ),
Element( u'PWK02', Properties(desc=u'Report Transmission Code', req_sit=u'R', data_type=(u'ID',u'1',u'2'), position=2,
codes=[u'BM', u'EL', u'EM', u'FX', u'VO'] ) ),
Element( u'PWK03', Properties(desc=u'Report Copies Needed', req_sit=u'N', data_type=(u'N0',u'1',u'2'), position=3,
codes=[] ) ),
Element( u'PWK04', Properties(desc=u'Entity Identifier Code', req_sit=u'N', data_type=(u'ID',u'2',u'3'), position=4,
codes=[] ) ),
Element( u'PWK05', Properties(desc=u'Identification Code Qualifier', req_sit=u'S', data_type=(u'ID',u'1',u'2'), position=5,
codes=[u'AC'] ) ),
Element( u'PWK06', Properties(desc=u'Identification Code', req_sit=u'S', data_type=(u'AN',u'2',u'80'), position=6,
codes=[] ) ),
Element( u'PWK07', Properties(desc=u'Description', req_sit=u'S', data_type=(u'AN',u'1',u'80'), position=7,
codes=[] ) ),
Composite( u'C002', Properties(req_sit=u'N',refdes='',seq=u'08',desc=u'Actions Indicated'),
),
Element( u'PWK09', Properties(desc=u'Request Category Code', req_sit=u'N', data_type=(u'ID',u'1',u'2'), position=9,
codes=[] ) ),
),
Segment( u'MSG', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'160',desc=u'Message Text'),
Element( u'MSG01', Properties(desc=u'Free-form Message Text', req_sit=u'R', data_type=(u'AN',u'1',u'264'), position=1,
codes=[] ) ),
Element( u'MSG02', Properties(desc=u'Printer Carriage Control Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=2,
codes=[] ) ),
Element( u'MSG03', Properties(desc=u'Number', req_sit=u'N', data_type=(u'N0',u'1',u'9'), position=3,
codes=[] ) ),
),
parsed_278_2010F,
)
parsed_278_2000E = Loop( u'2000E', Properties(looptype='',repeat=u'>1',pos=u'180',req_sit=u'R',desc=u'Service Provider Level'),
Segment( u'HL', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'010',desc=u'Service Provider Level'),
Element( u'HL01', Properties(desc=u'Hierarchical ID Number', req_sit=u'R', data_type=(u'AN',u'1',u'12'), position=1,
codes=[] ) ),
Element( u'HL02', Properties(desc=u'Hierarchical Parent ID Number', req_sit=u'R', data_type=(u'AN',u'1',u'12'), position=2,
codes=[] ) ),
Element( u'HL03', Properties(desc=u'Hierarchical Level Code', req_sit=u'R', data_type=(u'ID',u'1',u'2'), position=3,
codes=[u'19'] ) ),
Element( u'HL04', Properties(desc=u'Hierarchical Child Code', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=4,
codes=[u'1'] ) ),
),
Segment( u'MSG', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'160',desc=u'Message Text'),
Element( u'MSG01', Properties(desc=u'Free-form Message Text', req_sit=u'R', data_type=(u'AN',u'1',u'264'), position=1,
codes=[] ) ),
Element( u'MSG02', Properties(desc=u'Printer Carriage Control Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=2,
codes=[] ) ),
Element( u'MSG03', Properties(desc=u'Number', req_sit=u'N', data_type=(u'N0',u'1',u'9'), position=3,
codes=[] ) ),
),
parsed_278_2010E,
parsed_278_2000F,
)
parsed_278_2000D = Loop( u'2000D', Properties(looptype='',repeat=u'1',pos=u'180',req_sit=u'S',desc=u'Dependent Level'),
Segment( u'HL', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'010',desc=u'Dependent Level'),
Element( u'HL01', Properties(desc=u'Hierarchical ID Number', req_sit=u'R', data_type=(u'AN',u'1',u'12'), position=1,
codes=[] ) ),
Element( u'HL02', Properties(desc=u'Hierarchical Parent ID Number', req_sit=u'R', data_type=(u'AN',u'1',u'12'), position=2,
codes=[] ) ),
Element( u'HL03', Properties(desc=u'Hierarchical Level Code', req_sit=u'R', data_type=(u'ID',u'1',u'2'), position=3,
codes=[u'23'] ) ),
Element( u'HL04', Properties(desc=u'Hierarchical Child Code', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=4,
codes=[u'1'] ) ),
),
Segment( u'TRN', Properties(syntax='',req_sit=u'S',repeat=u'3',pos=u'020',desc=u'Patient Event Tracking Number'),
Element( u'TRN01', Properties(desc=u'Trace Type Code', req_sit=u'R', data_type=(u'ID',u'1',u'2'), position=1,
codes=[u'1', u'2'] ) ),
Element( u'TRN02', Properties(desc=u'Reference Identification', req_sit=u'R', data_type=(u'AN',u'1',u'50'), position=2,
codes=[] ) ),
Element( u'TRN03', Properties(desc=u'Originating Company Identifier', req_sit=u'R', data_type=(u'AN',u'10',u'10'), position=3,
codes=[] ) ),
Element( u'TRN04', Properties(desc=u'Reference Identification', req_sit=u'S', data_type=(u'AN',u'1',u'50'), position=4,
codes=[] ) ),
),
Segment( u'AAA', Properties(syntax='',req_sit=u'S',repeat=u'9',pos=u'030',desc=u'Dependent Request Validation'),
Element( u'AAA01', Properties(desc=u'Yes/No Condition or Response Code', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=1,
codes=[u'N', u'Y'] ) ),
Element( u'AAA02', Properties(desc=u'Agency Qualifier Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=2,
codes=[] ) ),
Element( u'AAA03', Properties(desc=u'Reject Reason Code', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=3,
codes=[u'15', u'33', u'56'] ) ),
Element( u'AAA04', Properties(desc=u'Follow-up Action Code', req_sit=u'S', data_type=(u'ID',u'1',u'1'), position=4,
codes=[u'C', u'N'] ) ),
),
Segment( u'DTP', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'070',desc=u'Accident Date'),
Element( u'DTP01', Properties(desc=u'Date/Time Qualifier', req_sit=u'R', data_type=(u'ID',u'3',u'3'), position=1,
codes=[u'439'] ) ),
Element( u'DTP02', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'DTP03', Properties(desc=u'Date Time Period', req_sit=u'R', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
),
Segment( u'DTP', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'070',desc=u'Last Menstrual Period Date'),
Element( u'DTP01', Properties(desc=u'Date/Time Qualifier', req_sit=u'R', data_type=(u'ID',u'3',u'3'), position=1,
codes=[u'484'] ) ),
Element( u'DTP02', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'DTP03', Properties(desc=u'Date Time Period', req_sit=u'R', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
),
Segment( u'DTP', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'070',desc=u'Estimated Date of Birth'),
Element( u'DTP01', Properties(desc=u'Date/Time Qualifier', req_sit=u'R', data_type=(u'ID',u'3',u'3'), position=1,
codes=[u'ABC'] ) ),
Element( u'DTP02', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'DTP03', Properties(desc=u'Date Time Period', req_sit=u'R', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
),
Segment( u'DTP', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'070',desc=u'Onset of Current Symptoms or Illness Date'),
Element( u'DTP01', Properties(desc=u'Date/Time Qualifier', req_sit=u'R', data_type=(u'ID',u'3',u'3'), position=1,
codes=[u'431'] ) ),
Element( u'DTP02', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'DTP03', Properties(desc=u'Date Time Period', req_sit=u'R', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
),
Segment( u'HI', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'080',desc=u'Dependent Diagnosis'),
Composite( u'C022', Properties(req_sit=u'R',refdes='',seq=u'01',desc=u'Diagnosis 1'),
Element( u'HI01-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'BF', u'BJ', u'BK', u'LOI'] ) ),
Element( u'HI01-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI01-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'HI01-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI01-05', Properties(desc=u'Monetary Amount', req_sit=u'N', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI01-06', Properties(desc=u'Quantity', req_sit=u'N', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI01-07', Properties(desc=u'Version Identifier', req_sit=u'N', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'02',desc=u'Diagnosis 2'),
Element( u'HI02-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'BF', u'BJ', u'LOI'] ) ),
Element( u'HI02-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI02-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'HI02-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI02-05', Properties(desc=u'Monetary Amount', req_sit=u'N', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI02-06', Properties(desc=u'Quantity', req_sit=u'N', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI02-07', Properties(desc=u'Version Identifier', req_sit=u'N', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'03',desc=u'Diagnosis 3'),
Element( u'HI03-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'BF', u'LOI'] ) ),
Element( u'HI03-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI03-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'HI03-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI03-05', Properties(desc=u'Monetary Amount', req_sit=u'N', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI03-06', Properties(desc=u'Quantity', req_sit=u'N', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI03-07', Properties(desc=u'Version Identifier', req_sit=u'N', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'04',desc=u'Diagnosis 4'),
Element( u'HI04-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'BF', u'LOI'] ) ),
Element( u'HI04-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI04-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'HI04-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI04-05', Properties(desc=u'Monetary Amount', req_sit=u'N', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI04-06', Properties(desc=u'Quantity', req_sit=u'N', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI04-07', Properties(desc=u'Version Identifier', req_sit=u'N', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'05',desc=u'Diagnosis 5'),
Element( u'HI05-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'BF', u'LOI'] ) ),
Element( u'HI05-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI05-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'HI05-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI05-05', Properties(desc=u'Monetary Amount', req_sit=u'N', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI05-06', Properties(desc=u'Quantity', req_sit=u'N', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI05-07', Properties(desc=u'Version Identifier', req_sit=u'N', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'06',desc=u'Diagnosis 6'),
Element( u'HI06-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'BF', u'LOI'] ) ),
Element( u'HI06-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI06-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'HI06-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI06-05', Properties(desc=u'Monetary Amount', req_sit=u'N', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI06-06', Properties(desc=u'Quantity', req_sit=u'N', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI06-07', Properties(desc=u'Version Identifier', req_sit=u'N', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'07',desc=u'Diagnosis 7'),
Element( u'HI07-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'BF', u'LOI'] ) ),
Element( u'HI07-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI07-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'HI07-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI07-05', Properties(desc=u'Monetary Amount', req_sit=u'N', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI07-06', Properties(desc=u'Quantity', req_sit=u'N', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI07-07', Properties(desc=u'Version Identifier', req_sit=u'N', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'08',desc=u'Diagnosis 8'),
Element( u'HI08-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'BF', u'LOI'] ) ),
Element( u'HI08-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI08-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'HI08-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI08-05', Properties(desc=u'Monetary Amount', req_sit=u'N', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI08-06', Properties(desc=u'Quantity', req_sit=u'N', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI08-07', Properties(desc=u'Version Identifier', req_sit=u'N', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'09',desc=u'Diagnosis 9'),
Element( u'HI09-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'BF', u'LOI'] ) ),
Element( u'HI09-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI09-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'HI09-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI09-05', Properties(desc=u'Monetary Amount', req_sit=u'N', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI09-06', Properties(desc=u'Quantity', req_sit=u'N', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI09-07', Properties(desc=u'Version Identifier', req_sit=u'N', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'10',desc=u'Diagnosis 10'),
Element( u'HI10-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'BF', u'LOI'] ) ),
Element( u'HI10-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI10-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'HI10-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI10-05', Properties(desc=u'Monetary Amount', req_sit=u'N', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI10-06', Properties(desc=u'Quantity', req_sit=u'N', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI10-07', Properties(desc=u'Version Identifier', req_sit=u'N', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'11',desc=u'Diagnosis 11'),
Element( u'HI11-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'BF', u'LOI'] ) ),
Element( u'HI11-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI11-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'HI11-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI11-05', Properties(desc=u'Monetary Amount', req_sit=u'N', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI11-06', Properties(desc=u'Quantity', req_sit=u'N', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI11-07', Properties(desc=u'Version Identifier', req_sit=u'N', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'12',desc=u'Diagnosis 12'),
Element( u'HI12-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'BF', u'LOI'] ) ),
Element( u'HI12-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI12-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'HI12-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI12-05', Properties(desc=u'Monetary Amount', req_sit=u'N', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI12-06', Properties(desc=u'Quantity', req_sit=u'N', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI12-07', Properties(desc=u'Version Identifier', req_sit=u'N', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
),
Segment( u'PWK', Properties(syntax='',req_sit=u'S',repeat=u'10',pos=u'155',desc=u'Additional Patient Information'),
Element( u'PWK01', Properties(desc=u'Report Type Code', req_sit=u'R', data_type=(u'ID',u'2',u'2'), position=1,
codes=[u'03', u'04', u'05', u'06', u'07', u'08', u'09', u'10', u'11', u'13', u'15', u'21', u'48', u'55', u'59', u'77', u'A3', u'A4', u'AM', u'AS', u'AT', u'B2', u'B3', u'BR', u'BS', u'BT', u'CB', u'CK', u'D2', u'DA', u'DB', u'DG', u'DJ', u'DS', u'FM', u'HC', u'HR', u'I5', u'IR', u'LA', u'M1', u'NN', u'OB', u'OC', u'OD', u'OE', u'OX', u'P4', u'P5', u'P6', u'P7', u'PE', u'PN', u'PO', u'PQ', u'PY', u'PZ', u'QC', u'QR', u'RB', u'RR', u'RT', u'RX', u'SG', u'V5', u'XP'] ) ),
Element( u'PWK02', Properties(desc=u'Report Transmission Code', req_sit=u'R', data_type=(u'ID',u'1',u'2'), position=2,
codes=[u'BM', u'EL', u'EM', u'FX', u'VO'] ) ),
Element( u'PWK03', Properties(desc=u'Report Copies Needed', req_sit=u'N', data_type=(u'N0',u'1',u'2'), position=3,
codes=[] ) ),
Element( u'PWK04', Properties(desc=u'Entity Identifier Code', req_sit=u'N', data_type=(u'ID',u'2',u'3'), position=4,
codes=[] ) ),
Element( u'PWK05', Properties(desc=u'Identification Code Qualifier', req_sit=u'S', data_type=(u'ID',u'1',u'2'), position=5,
codes=[u'AC'] ) ),
Element( u'PWK06', Properties(desc=u'Identification Code', req_sit=u'S', data_type=(u'AN',u'2',u'80'), position=6,
codes=[] ) ),
Element( u'PWK07', Properties(desc=u'Description', req_sit=u'S', data_type=(u'AN',u'1',u'80'), position=7,
codes=[] ) ),
Composite( u'C002', Properties(req_sit=u'N',refdes='',seq=u'08',desc=u'Actions Indicated'),
),
Element( u'PWK09', Properties(desc=u'Request Category Code', req_sit=u'N', data_type=(u'ID',u'1',u'2'), position=9,
codes=[] ) ),
),
parsed_278_2010DA,
parsed_278_2010DB,
parsed_278_2000E,
)
parsed_278_2000C = Loop( u'2000C', Properties(looptype='',repeat=u'1',pos=u'180',req_sit=u'R',desc=u'Subscriber Level'),
Segment( u'HL', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'010',desc=u'Subscriber Level'),
Element( u'HL01', Properties(desc=u'Hierarchical ID Number', req_sit=u'R', data_type=(u'AN',u'1',u'12'), position=1,
codes=[] ) ),
Element( u'HL02', Properties(desc=u'Hierarchical Parent ID Number', req_sit=u'R', data_type=(u'AN',u'1',u'12'), position=2,
codes=[] ) ),
Element( u'HL03', Properties(desc=u'Hierarchical Level Code', req_sit=u'R', data_type=(u'ID',u'1',u'2'), position=3,
codes=[u'22'] ) ),
Element( u'HL04', Properties(desc=u'Hierarchical Child Code', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=4,
codes=[u'1'] ) ),
),
Segment( u'TRN', Properties(syntax='',req_sit=u'S',repeat=u'3',pos=u'020',desc=u'Patient Event Tracking Number'),
Element( u'TRN01', Properties(desc=u'Trace Type Code', req_sit=u'R', data_type=(u'ID',u'1',u'2'), position=1,
codes=[u'1', u'2'] ) ),
Element( u'TRN02', Properties(desc=u'Reference Identification', req_sit=u'R', data_type=(u'AN',u'1',u'50'), position=2,
codes=[] ) ),
Element( u'TRN03', Properties(desc=u'Originating Company Identifier', req_sit=u'R', data_type=(u'AN',u'10',u'10'), position=3,
codes=[] ) ),
Element( u'TRN04', Properties(desc=u'Reference Identification', req_sit=u'S', data_type=(u'AN',u'1',u'50'), position=4,
codes=[] ) ),
),
Segment( u'AAA', Properties(syntax='',req_sit=u'S',repeat=u'9',pos=u'030',desc=u'Subscriber Request Validation'),
Element( u'AAA01', Properties(desc=u'Yes/No Condition or Response Code', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=1,
codes=[u'N', u'Y'] ) ),
Element( u'AAA02', Properties(desc=u'Agency Qualifier Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=2,
codes=[] ) ),
Element( u'AAA03', Properties(desc=u'Reject Reason Code', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=3,
codes=[u'15', u'33', u'56'] ) ),
Element( u'AAA04', Properties(desc=u'Follow-up Action Code', req_sit=u'S', data_type=(u'ID',u'1',u'1'), position=4,
codes=[u'C', u'N'] ) ),
),
Segment( u'DTP', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'070',desc=u'Accident Date'),
Element( u'DTP01', Properties(desc=u'Date/Time Qualifier', req_sit=u'R', data_type=(u'ID',u'3',u'3'), position=1,
codes=[u'439'] ) ),
Element( u'DTP02', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'DTP03', Properties(desc=u'Date Time Period', req_sit=u'R', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
),
Segment( u'DTP', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'070',desc=u'Last Menstrual Period Date'),
Element( u'DTP01', Properties(desc=u'Date/Time Qualifier', req_sit=u'R', data_type=(u'ID',u'3',u'3'), position=1,
codes=[u'484'] ) ),
Element( u'DTP02', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'DTP03', Properties(desc=u'Date Time Period', req_sit=u'R', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
),
Segment( u'DTP', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'070',desc=u'Estimated Date of Birth'),
Element( u'DTP01', Properties(desc=u'Date/Time Qualifier', req_sit=u'R', data_type=(u'ID',u'3',u'3'), position=1,
codes=[u'ABC'] ) ),
Element( u'DTP02', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'DTP03', Properties(desc=u'Date Time Period', req_sit=u'R', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
),
Segment( u'DTP', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'070',desc=u'Onset of Current Symptoms or Illness Date'),
Element( u'DTP01', Properties(desc=u'Date/Time Qualifier', req_sit=u'R', data_type=(u'ID',u'3',u'3'), position=1,
codes=[u'431'] ) ),
Element( u'DTP02', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'DTP03', Properties(desc=u'Date Time Period', req_sit=u'R', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
),
Segment( u'HI', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'080',desc=u'Subscriber Diagnosis'),
Composite( u'C022', Properties(req_sit=u'R',refdes='',seq=u'01',desc=u'Diagnosis 1'),
Element( u'HI01-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'BF', u'BJ', u'BK', u'LOI'] ) ),
Element( u'HI01-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI01-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'HI01-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI01-05', Properties(desc=u'Monetary Amount', req_sit=u'N', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI01-06', Properties(desc=u'Quantity', req_sit=u'N', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI01-07', Properties(desc=u'Version Identifier', req_sit=u'N', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'02',desc=u'Diagnosis 2'),
Element( u'HI02-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'BF', u'BJ', u'LOI'] ) ),
Element( u'HI02-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI02-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'HI02-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI02-05', Properties(desc=u'Monetary Amount', req_sit=u'N', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI02-06', Properties(desc=u'Quantity', req_sit=u'N', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI02-07', Properties(desc=u'Version Identifier', req_sit=u'N', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'03',desc=u'Diagnosis 3'),
Element( u'HI03-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'BF', u'LOI'] ) ),
Element( u'HI03-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI03-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'HI03-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI03-05', Properties(desc=u'Monetary Amount', req_sit=u'N', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI03-06', Properties(desc=u'Quantity', req_sit=u'N', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI03-07', Properties(desc=u'Version Identifier', req_sit=u'N', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'04',desc=u'Diagnosis 4'),
Element( u'HI04-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'BF', u'LOI'] ) ),
Element( u'HI04-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI04-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'HI04-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI04-05', Properties(desc=u'Monetary Amount', req_sit=u'N', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI04-06', Properties(desc=u'Quantity', req_sit=u'N', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI04-07', Properties(desc=u'Version Identifier', req_sit=u'N', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'05',desc=u'Diagnosis 5'),
Element( u'HI05-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'BF', u'LOI'] ) ),
Element( u'HI05-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI05-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'HI05-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI05-05', Properties(desc=u'Monetary Amount', req_sit=u'N', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI05-06', Properties(desc=u'Quantity', req_sit=u'N', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI05-07', Properties(desc=u'Version Identifier', req_sit=u'N', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'06',desc=u'Diagnosis 6'),
Element( u'HI06-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'BF', u'LOI'] ) ),
Element( u'HI06-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI06-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'HI06-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI06-05', Properties(desc=u'Monetary Amount', req_sit=u'N', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI06-06', Properties(desc=u'Quantity', req_sit=u'N', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI06-07', Properties(desc=u'Version Identifier', req_sit=u'N', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'07',desc=u'Diagnosis 7'),
Element( u'HI07-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'BF', u'LOI'] ) ),
Element( u'HI07-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI07-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'HI07-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI07-05', Properties(desc=u'Monetary Amount', req_sit=u'N', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI07-06', Properties(desc=u'Quantity', req_sit=u'N', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI07-07', Properties(desc=u'Version Identifier', req_sit=u'N', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'08',desc=u'Diagnosis 8'),
Element( u'HI08-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'BF', u'LOI'] ) ),
Element( u'HI08-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI08-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'HI08-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI08-05', Properties(desc=u'Monetary Amount', req_sit=u'N', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI08-06', Properties(desc=u'Quantity', req_sit=u'N', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI08-07', Properties(desc=u'Version Identifier', req_sit=u'N', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'09',desc=u'Diagnosis 9'),
Element( u'HI09-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'BF', u'LOI'] ) ),
Element( u'HI09-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI09-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'HI09-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI09-05', Properties(desc=u'Monetary Amount', req_sit=u'N', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI09-06', Properties(desc=u'Quantity', req_sit=u'N', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI09-07', Properties(desc=u'Version Identifier', req_sit=u'N', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'10',desc=u'Diagnosis 10'),
Element( u'HI10-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'BF', u'LOI'] ) ),
Element( u'HI10-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI10-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'HI10-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI10-05', Properties(desc=u'Monetary Amount', req_sit=u'N', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI10-06', Properties(desc=u'Quantity', req_sit=u'N', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI10-07', Properties(desc=u'Version Identifier', req_sit=u'N', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'11',desc=u'Diagnosis 11'),
Element( u'HI11-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'BF', u'LOI'] ) ),
Element( u'HI11-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI11-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'HI11-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI11-05', Properties(desc=u'Monetary Amount', req_sit=u'N', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI11-06', Properties(desc=u'Quantity', req_sit=u'N', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI11-07', Properties(desc=u'Version Identifier', req_sit=u'N', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Composite( u'C022', Properties(req_sit=u'S',refdes='',seq=u'12',desc=u'Diagnosis 12'),
Element( u'HI12-01', Properties(desc=u'Code List Qualifier Code', req_sit=u'R', data_type=(u'ID',u'1',u'3'), position=0,
codes=[u'BF', u'LOI'] ) ),
Element( u'HI12-02', Properties(desc=u'Industry Code', req_sit=u'R', data_type=(u'AN',u'1',u'30'), position=1,
codes=[] ) ),
Element( u'HI12-03', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=2,
codes=[u'D8'] ) ),
Element( u'HI12-04', Properties(desc=u'Date Time Period', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=3,
codes=[] ) ),
Element( u'HI12-05', Properties(desc=u'Monetary Amount', req_sit=u'N', data_type=(u'R',u'1',u'18'), position=4,
codes=[] ) ),
Element( u'HI12-06', Properties(desc=u'Quantity', req_sit=u'N', data_type=(u'R',u'1',u'15'), position=5,
codes=[] ) ),
Element( u'HI12-07', Properties(desc=u'Version Identifier', req_sit=u'N', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
),
Segment( u'PWK', Properties(syntax='',req_sit=u'S',repeat=u'10',pos=u'155',desc=u'Additional Patient Information'),
Element( u'PWK01', Properties(desc=u'Report Type Code', req_sit=u'R', data_type=(u'ID',u'2',u'2'), position=1,
codes=[u'03', u'04', u'05', u'06', u'07', u'08', u'09', u'10', u'11', u'13', u'15', u'21', u'48', u'55', u'59', u'77', u'A3', u'A4', u'AM', u'AS', u'AT', u'B2', u'B3', u'BR', u'BS', u'BT', u'CB', u'CK', u'D2', u'DA', u'DB', u'DG', u'DJ', u'DS', u'FM', u'HC', u'HR', u'I5', u'IR', u'LA', u'M1', u'NN', u'OB', u'OC', u'OD', u'OE', u'OX', u'P4', u'P5', u'P6', u'P7', u'PE', u'PN', u'PO', u'PQ', u'PY', u'PZ', u'QC', u'QR', u'RB', u'RR', u'RT', u'RX', u'SG', u'V5', u'XP'] ) ),
Element( u'PWK02', Properties(desc=u'Report Transmission Code', req_sit=u'R', data_type=(u'ID',u'1',u'2'), position=2,
codes=[u'BM', u'EL', u'EM', u'FX', u'VO'] ) ),
Element( u'PWK03', Properties(desc=u'Report Copies Needed', req_sit=u'N', data_type=(u'N0',u'1',u'2'), position=3,
codes=[] ) ),
Element( u'PWK04', Properties(desc=u'Entity Identifier Code', req_sit=u'N', data_type=(u'ID',u'2',u'3'), position=4,
codes=[] ) ),
Element( u'PWK05', Properties(desc=u'Identification Code Qualifier', req_sit=u'S', data_type=(u'ID',u'1',u'2'), position=5,
codes=[u'AC'] ) ),
Element( u'PWK06', Properties(desc=u'Identification Code', req_sit=u'S', data_type=(u'AN',u'2',u'80'), position=6,
codes=[] ) ),
Element( u'PWK07', Properties(desc=u'Description', req_sit=u'S', data_type=(u'AN',u'1',u'80'), position=7,
codes=[] ) ),
Composite( u'C002', Properties(req_sit=u'N',refdes='',seq=u'08',desc=u'Actions Indicated'),
),
Element( u'PWK09', Properties(desc=u'Request Category Code', req_sit=u'N', data_type=(u'ID',u'1',u'2'), position=9,
codes=[] ) ),
),
parsed_278_2010CA,
parsed_278_2010CB,
parsed_278_2000D,
)
parsed_278_2000B = Loop( u'2000B', Properties(looptype='',repeat=u'1',pos=u'180',req_sit=u'R',desc=u'Requester Level'),
Segment( u'HL', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'010',desc=u'Requester Level'),
Element( u'HL01', Properties(desc=u'Hierarchical ID Number', req_sit=u'R', data_type=(u'AN',u'1',u'12'), position=1,
codes=[] ) ),
Element( u'HL02', Properties(desc=u'Hierarchical Parent ID Number', req_sit=u'R', data_type=(u'AN',u'1',u'12'), position=2,
codes=[] ) ),
Element( u'HL03', Properties(desc=u'Hierarchical Level Code', req_sit=u'R', data_type=(u'ID',u'1',u'2'), position=3,
codes=[u'21'] ) ),
Element( u'HL04', Properties(desc=u'Hierarchical Child Code', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=4,
codes=[u'1'] ) ),
),
parsed_278_2010B,
parsed_278_2000C,
)
parsed_278_2000A = Loop( u'2000A', Properties(looptype='',repeat=u'1',pos=u'010',req_sit=u'R',desc=u'Utilization Management Organization (UMO) Level'),
Segment( u'HL', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'010',desc=u'Utilization Management Organization (UMO) Level'),
Element( u'HL01', Properties(desc=u'Hierarchical ID Number', req_sit=u'R', data_type=(u'AN',u'1',u'12'), position=1,
codes=[] ) ),
Element( u'HL02', Properties(desc=u'Hierarchical Parent ID Number', req_sit=u'N', data_type=(u'AN',u'1',u'12'), position=2,
codes=[] ) ),
Element( u'HL03', Properties(desc=u'Hierarchical Level Code', req_sit=u'R', data_type=(u'ID',u'1',u'2'), position=3,
codes=[u'20'] ) ),
Element( u'HL04', Properties(desc=u'Hierarchical Child Code', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=4,
codes=[u'1'] ) ),
),
Segment( u'AAA', Properties(syntax='',req_sit=u'S',repeat=u'9',pos=u'030',desc=u'Request Validation'),
Element( u'AAA01', Properties(desc=u'Yes/No Condition or Response Code', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=1,
codes=[u'N', u'Y'] ) ),
Element( u'AAA02', Properties(desc=u'Agency Qualifier Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=2,
codes=[] ) ),
Element( u'AAA03', Properties(desc=u'Reject Reason Code', req_sit=u'R', data_type=(u'ID',u'2',u'2'), position=3,
codes=[u'04', u'41', u'42', u'79'] ) ),
Element( u'AAA04', Properties(desc=u'Follow-up Action Code', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=4,
codes=[u'C', u'N', u'P', u'Y'] ) ),
),
parsed_278_2010A,
parsed_278_2000B,
)
parsed_278_DETAIL = Loop( u'DETAIL', Properties(looptype=u'wrapper',repeat=u'>1',pos=u'020',req_sit=u'S',desc=u'Table 2 - Detail'),
parsed_278_2000A,
)
parsed_278_ST_LOOP = Loop( u'ST_LOOP', Properties(looptype=u'explicit',repeat=u'>1',pos=u'020',req_sit=u'R',desc=u'Transaction Set Header'),
Segment( u'ST', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'010',desc=u'Transaction Set Header'),
Element( u'ST01', Properties(desc=u'Transaction Set Identifier Code', req_sit=u'R', data_type=(u'ID',u'3',u'3'), position=1,
codes=[u'278'] ) ),
Element( u'ST02', Properties(desc=u'Transaction Set Control Number', req_sit=u'R', data_type=(u'AN',u'4',u'9'), position=2,
codes=[] ) ),
),
parsed_278_HEADER,
parsed_278_DETAIL,
Segment( u'SE', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'280',desc=u'Transaction Set Trailer'),
Element( u'SE01', Properties(desc=u'Number of Included Segments', req_sit=u'R', data_type=(u'N0',u'1',u'10'), position=1,
codes=[] ) ),
Element( u'SE02', Properties(desc=u'Transaction Set Control Number', req_sit=u'R', data_type=(u'AN',u'4',u'9'), position=2,
codes=[] ) ),
),
)
parsed_278_GS_LOOP = Loop( u'GS_LOOP', Properties(looptype=u'explicit',repeat=u'>1',pos=u'020',req_sit=u'R',desc=u'Functional Group Header'),
Segment( u'GS', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'010',desc=u'Functional Group Header'),
Element( u'GS01', Properties(desc=u'Functional Identifier Code', req_sit=u'R', data_type=(u'ID',u'2',u'2'), position=1,
codes=[u'HI'] ) ),
Element( u'GS02', Properties(desc=u'Application Senders Code', req_sit=u'R', data_type=(u'AN',u'2',u'15'), position=2,
codes=[] ) ),
Element( u'GS03', Properties(desc=u'124', req_sit=u'R', data_type=(u'AN',u'2',u'15'), position=3,
codes=[] ) ),
Element( u'GS04', Properties(desc=u'Date', req_sit=u'R', data_type=(u'DT',u'8',u'8'), position=4,
codes=[] ) ),
Element( u'GS05', Properties(desc=u'Time', req_sit=u'R', data_type=(u'TM',u'4',u'8'), position=5,
codes=[] ) ),
Element( u'GS06', Properties(desc=u'Group Control Number', req_sit=u'R', data_type=(u'N0',u'1',u'9'), position=6,
codes=[] ) ),
Element( u'GS07', Properties(desc=u'Responsible Agency Code', req_sit=u'R', data_type=(u'ID',u'1',u'2'), position=7,
codes=[u'X'] ) ),
Element( u'GS08', Properties(desc=u'Version / Release / Industry Identifier Code', req_sit=u'R', data_type=(u'AN',u'1',u'12'), position=8,
codes=[u'004010X094A1'] ) ),
),
parsed_278_ST_LOOP,
Segment( u'GE', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'030',desc=u'Functional Group Trailer'),
Element( u'GE01', Properties(desc=u'97', req_sit=u'R', data_type=(u'N0',u'1',u'6'), position=1,
codes=[] ) ),
Element( u'GE02', Properties(desc=u'Group Control Number', req_sit=u'R', data_type=(u'N0',u'1',u'9'), position=2,
codes=[] ) ),
),
)
parsed_278_ISA_LOOP = Loop( u'ISA_LOOP', Properties(looptype=u'explicit',repeat=u'>1',pos=u'001',req_sit=u'R',desc=u'Interchange Control Header'),
Segment( u'ISA', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'010',desc=u'Interchange Control Header'),
Element( u'ISA01', Properties(desc=u'I01', req_sit=u'R', data_type=(u'ID',u'2',u'2'), position=1,
codes=[u'00', u'03'] ) ),
Element( u'ISA02', Properties(desc=u'I02', req_sit=u'R', data_type=(u'AN',u'10',u'10'), position=2,
codes=[] ) ),
Element( u'ISA03', Properties(desc=u'I03', req_sit=u'R', data_type=(u'ID',u'2',u'2'), position=3,
codes=[u'00', u'01'] ) ),
Element( u'ISA04', Properties(desc=u'I04', req_sit=u'R', data_type=(u'AN',u'10',u'10'), position=4,
codes=[] ) ),
Element( u'ISA05', Properties(desc=u'I05', req_sit=u'R', data_type=(u'ID',u'2',u'2'), position=5,
codes=[u'01', u'14', u'20', u'27', u'28', u'29', u'30', u'33', u'ZZ'] ) ),
Element( u'ISA06', Properties(desc=u'I06', req_sit=u'R', data_type=(u'AN',u'15',u'15'), position=6,
codes=[] ) ),
Element( u'ISA07', Properties(desc=u'I05', req_sit=u'R', data_type=(u'ID',u'2',u'2'), position=7,
codes=[u'01', u'14', u'20', u'27', u'28', u'29', u'30', u'33', u'ZZ'] ) ),
Element( u'ISA08', Properties(desc=u'I07', req_sit=u'R', data_type=(u'AN',u'15',u'15'), position=8,
codes=[] ) ),
Element( u'ISA09', Properties(desc=u'I08', req_sit=u'R', data_type=(u'DT',u'6',u'6'), position=9,
codes=[] ) ),
Element( u'ISA10', Properties(desc=u'I09', req_sit=u'R', data_type=(u'TM',u'4',u'4'), position=10,
codes=[] ) ),
Element( u'ISA11', Properties(desc=u'I10', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=11,
codes=[u'U'] ) ),
Element( u'ISA12', Properties(desc=u'I11', req_sit=u'R', data_type=(u'ID',u'5',u'5'), position=12,
codes=[u'00401'] ) ),
Element( u'ISA13', Properties(desc=u'I12', req_sit=u'R', data_type=(u'N0',u'9',u'9'), position=13,
codes=[] ) ),
Element( u'ISA14', Properties(desc=u'I13', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=14,
codes=[u'0', u'1'] ) ),
Element( u'ISA15', Properties(desc=u'I14', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=15,
codes=[u'P', u'T'] ) ),
Element( u'ISA16', Properties(desc=u'I15', req_sit=u'R', data_type=(u'AN',u'1',u'1'), position=16,
codes=[] ) ),
),
parsed_278_GS_LOOP,
Segment( u'TA1', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'020',desc=u'Interchange Acknowledgement'),
Element( u'TA101', Properties(desc=u'I12', req_sit=u'R', data_type=(u'N0',u'9',u'9'), position=1,
codes=[] ) ),
Element( u'TA102', Properties(desc=u'I08', req_sit=u'R', data_type=(u'DT',u'6',u'6'), position=2,
codes=[] ) ),
Element( u'TA103', Properties(desc=u'I09', req_sit=u'R', data_type=(u'TM',u'4',u'4'), position=3,
codes=[] ) ),
Element( u'TA104', Properties(desc=u'I17', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=4,
codes=[u'A', u'E', u'R'] ) ),
Element( u'TA105', Properties(desc=u'I18', req_sit=u'R', data_type=(u'ID',u'3',u'3'), position=5,
codes=[u'000', u'001', u'002', u'003', u'004', u'005', u'006', u'007', u'008', u'009', u'010', u'011', u'012', u'013', u'014', u'015', u'016', u'017', u'018', u'019', u'020', u'021', u'022', u'023', u'024', u'025', u'026', u'027', u'028', u'029', u'030', u'031'] ) ),
),
Segment( u'IEA', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'030',desc=u'Interchange Control Trailer'),
Element( u'IEA01', Properties(desc=u'I16', req_sit=u'R', data_type=(u'N0',u'1',u'5'), position=1,
codes=[] ) ),
Element( u'IEA02', Properties(desc=u'I12', req_sit=u'R', data_type=(u'N0',u'9',u'9'), position=2,
codes=[] ) ),
),
)
parsed_278 = Message( u'278', Properties(desc=u'HIPAA Health Care Services Review: Response X094A1-278'),
parsed_278_ISA_LOOP,
)
| bsd-3-clause | 3,310,170,976,286,571,500 | 69.009751 | 707 | 0.610316 | false |
chrislit/abydos | abydos/distance/_yjhhr.py | 1 | 5217 | # Copyright 2019-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.distance._yjhhr.
YJHHR distance
"""
from typing import Any, Counter as TCounter, Optional, Sequence, Set, Union
from ._token_distance import _TokenDistance
from ..tokenizer import _Tokenizer
__all__ = ['YJHHR']
class YJHHR(_TokenDistance):
r"""YJHHR distance.
For two sets X and Y and a parameter p, YJHHR distance
:cite:`Yang:2016` is
.. math::
dist_{YJHHR_p}(X, Y) =
\sqrt[p]{|X \setminus Y|^p + |Y \setminus X|^p}
In :ref:`2x2 confusion table terms <confusion_table>`, where a+b+c+d=n,
this is
.. math::
dist_{YJHHR} =
\sqrt[p]{b^p + c^p}
.. versionadded:: 0.4.0
"""
def __init__(
self,
pval: int = 1,
alphabet: Optional[
Union[TCounter[str], Sequence[str], Set[str], int]
] = None,
tokenizer: Optional[_Tokenizer] = None,
intersection_type: str = 'crisp',
**kwargs: Any
) -> None:
"""Initialize YJHHR instance.
Parameters
----------
pval : int
The :math:`p`-value of the :math:`L^p`-space
alphabet : Counter, collection, int, or None
This represents the alphabet of possible tokens.
See :ref:`alphabet <alphabet>` description in
:py:class:`_TokenDistance` for details.
tokenizer : _Tokenizer
A tokenizer instance from the :py:mod:`abydos.tokenizer` package
intersection_type : str
Specifies the intersection type, and set type as a result:
See :ref:`intersection_type <intersection_type>` description in
:py:class:`_TokenDistance` for details.
**kwargs
Arbitrary keyword arguments
Other Parameters
----------------
qval : int
The length of each q-gram. Using this parameter and tokenizer=None
will cause the instance to use the QGram tokenizer with this
q value.
metric : _Distance
A string distance measure class for use in the ``soft`` and
``fuzzy`` variants.
threshold : float
A threshold value, similarities above which are counted as
members of the intersection for the ``fuzzy`` variant.
.. versionadded:: 0.4.0
"""
super(YJHHR, self).__init__(
alphabet=alphabet,
tokenizer=tokenizer,
intersection_type=intersection_type,
**kwargs
)
self.set_params(pval=pval)
def dist_abs(self, src: str, tar: str) -> float:
"""Return the YJHHR distance of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
Returns
-------
float
YJHHR distance
Examples
--------
>>> cmp = YJHHR()
>>> cmp.dist_abs('cat', 'hat')
4.0
>>> cmp.dist_abs('Niall', 'Neil')
7.0
>>> cmp.dist_abs('aluminum', 'Catalan')
15.0
>>> cmp.dist_abs('ATCG', 'TAGC')
10.0
.. versionadded:: 0.4.0
"""
if src == tar:
return 0.0
self._tokenize(src, tar)
b = self._src_only_card() ** self.params['pval']
c = self._tar_only_card() ** self.params['pval']
return float(round((b + c) ** (1 / self.params['pval']), 14))
def dist(self, src: str, tar: str) -> float:
"""Return the normalized YJHHR distance of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
Returns
-------
float
normalized YJHHR distance
Examples
--------
>>> cmp = YJHHR()
>>> cmp.dist('cat', 'hat')
0.6666666666666666
>>> cmp.dist('Niall', 'Neil')
0.7777777777777778
>>> cmp.dist('aluminum', 'Catalan')
0.9375
>>> cmp.dist('ATCG', 'TAGC')
1.0
.. versionadded:: 0.4.0
"""
distance = self.dist_abs(src, tar)
union = self._union_card()
if union == 0:
return 0.0
return distance / union
if __name__ == '__main__':
import doctest
doctest.testmod()
| gpl-3.0 | 6,945,651,139,748,284,000 | 26.75 | 78 | 0.553575 | false |
mistercrunch/panoramix | superset/queries/saved_queries/commands/importers/v1/__init__.py | 2 | 2947 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Any, Dict, Set
from marshmallow import Schema
from sqlalchemy.orm import Session
from superset.commands.importers.v1 import ImportModelsCommand
from superset.connectors.sqla.models import SqlaTable
from superset.databases.commands.importers.v1.utils import import_database
from superset.databases.schemas import ImportV1DatabaseSchema
from superset.queries.saved_queries.commands.exceptions import SavedQueryImportError
from superset.queries.saved_queries.commands.importers.v1.utils import (
import_saved_query,
)
from superset.queries.saved_queries.dao import SavedQueryDAO
from superset.queries.saved_queries.schemas import ImportV1SavedQuerySchema
class ImportSavedQueriesCommand(ImportModelsCommand):
"""Import Saved Queries"""
dao = SavedQueryDAO
model_name = "saved_queries"
prefix = "queries/"
schemas: Dict[str, Schema] = {
"databases/": ImportV1DatabaseSchema(),
"queries/": ImportV1SavedQuerySchema(),
}
import_error = SavedQueryImportError
@staticmethod
def _import(
session: Session, configs: Dict[str, Any], overwrite: bool = False
) -> None:
# discover databases associated with saved queries
database_uuids: Set[str] = set()
for file_name, config in configs.items():
if file_name.startswith("queries/"):
database_uuids.add(config["database_uuid"])
# import related databases
database_ids: Dict[str, int] = {}
for file_name, config in configs.items():
if file_name.startswith("databases/") and config["uuid"] in database_uuids:
database = import_database(session, config, overwrite=False)
database_ids[str(database.uuid)] = database.id
# import saved queries with the correct parent ref
for file_name, config in configs.items():
if (
file_name.startswith("queries/")
and config["database_uuid"] in database_ids
):
config["db_id"] = database_ids[config["database_uuid"]]
import_saved_query(session, config, overwrite=overwrite)
| apache-2.0 | -8,466,057,296,013,901,000 | 40.507042 | 87 | 0.707499 | false |
lovelysystems/pyjamas | examples/libtest/BuiltinTest.py | 1 | 4923 | from UnitTest import UnitTest
try:
builtin_value = builtin.value
except:
builtin_value = None
if False:
import builtin
import builtin
class Foo:
pass
class BuiltinTest(UnitTest):
def testMinMax(self):
self.assertEqual(max(1,2,3,4), 4)
self.assertEqual(min(1,2,3,4), 1)
self.assertEqual(max([1,2,3,4]), 4)
self.assertEqual(min([1,2,3,4]), 1)
self.assertTrue(max([5,3,4],[6,1,2]) == [6,1,2] , "max([5,3,4],[6,1,2])")
self.assertTrue(min([5,3,4],[6,1,2]) == [5,3,4] , "min([5,3,4],[6,1,2])")
def testInt(self):
self.assertEqual(int("5"), 5)
self.assertEqual(int("09"), 9)
self.assertEqual(6, 6)
try:
int('not int')
self.fail("No int() argument error raised")
except ValueError, e:
self.assertEqual(e[0], "invalid literal for int() with base 10: 'not int'")
try:
int(1, 10)
self.fail("No int() argument error raised")
except TypeError, e:
self.assertEqual(e[0], "int() can't convert non-string with explicit base")
def testOrdChr(self):
for i in range(256):
self.assertEqual(ord(chr(i)), i)
def testMod(self):
self.assertEqual(12 % 5, 2)
def testPower(self):
self.assertEqual(3 ** 4, 81)
def testPowerfunc(self):
self.assertEqual(pow(10, 3), 1000)
self.assertEqual(pow(10, 3, 7), 6)
def testHex(self):
self.assertEqual(hex(23), '0x17')
try:
h = hex(23.2)
self.fail("No hex() argument error raised")
except TypeError, why:
self.assertEqual(why.args[0], "hex() argument can't be converted to hex")
def testOct(self):
self.assertEqual(oct(23), '027')
try:
o = oct(23.2)
self.fail("No oct() argument error raised")
except TypeError, why:
self.assertEqual(str(why), "oct() argument can't be converted to oct")
def testRound(self):
self.assertEqual(round(13.12345), 13.0)
self.assertEqual(round(13.12345, 3), 13.123)
self.assertEqual(round(-13.12345), -13.0)
self.assertEqual(round(-13.12345, 3), -13.123)
self.assertEqual(round(13.62345), 14.0)
self.assertEqual(round(13.62345, 3), 13.623)
self.assertEqual(round(-13.62345), -14.0)
self.assertEqual(round(-13.62345, 3), -13.623)
def testDivmod(self):
test_set = [(14, 3, 4, 2),
(14.1, 3, 4.0, 2.1),
(14.1, 3.1, 4.0, 1.7),
]
for x, y, p, q in test_set:
d = divmod(x,y)
self.assertEqual(d[0], p)
self.assertEqual(abs(d[1] - q) < 0.00001, True)
def testFloorDiv(self):
self.assertEqual(1, 4//3)
self.assertEqual(1, 5//3)
self.assertEqual(2, 6//3)
def testAll(self):
self.assertEqual(all([True, 1, 'a']), True)
self.assertEqual(all([True, 1, None, 'a']), False)
self.assertEqual(all([True, 1, '', 'a']), False)
self.assertEqual(all([True, 1, False, 'a']), False)
def testAny(self):
self.assertEqual(any([True, 1, 'a']), True)
self.assertEqual(any([True, 1, None, 'a']), True)
self.assertEqual(any([True, 1, '', 'a']), True)
self.assertEqual(any([True, 1, False, 'a']), True)
self.assertEqual(any([False, '', None]), False)
def testRepr(self):
l1 = [1,2,3]
l2 = ["a", "b", "c"]
t1 = (4,5,6,7)
t2 = ("aa", "bb")
d1 = {'a': 1, "b": "B"}
d2 = {1: l1, 2: l2, 3: t1, 4: t2, 5:d1}
self.assertEqual(repr(l1), '[1, 2, 3]')
self.assertEqual(repr(l2), "['a', 'b', 'c']")
self.assertEqual(repr(t1), '(4, 5, 6, 7)')
self.assertEqual(repr(t2), "('aa', 'bb')")
self.assertEqual(repr(d1), "{'a': 1, 'b': 'B'}")
self.assertEqual(repr(d2), "{1: [1, 2, 3], 2: ['a', 'b', 'c'], 3: (4, 5, 6, 7), 4: ('aa', 'bb'), 5: {'a': 1, 'b': 'B'}}")
def testIsInstance(self):
s = 'hello'
self.assertTrue(isinstance(s, str), "s is a string")
self.assertFalse(isinstance(s, int), "s is a string not an integer")
s = 1
self.assertFalse(isinstance(s, str), "s is an integer not a string")
self.assertTrue(isinstance(s, int), "s is an integer")
def testImport(self):
self.assertEqual(builtin_value, None, "The builtin is loaded before import!")
try:
self.assertEqual(builtin.value, builtin.get_value())
except:
self.fail("Import failed for builtin")
def testBitOperations(self):
self.assertEqual(1 << 2 - 1, 2, "shift error 1")
self.assertEqual((1 << 2) - 1, 3, "shift error 2")
self.assertEqual(1 & 3 + 1, 0, "and error 1")
self.assertEqual((1 & 3) + 1, 2, "and error 2")
| apache-2.0 | 842,479,220,760,072,800 | 33.1875 | 129 | 0.530977 | false |
levilucio/SyVOLT | ExFamToPerson/contracts/HPos_ChildSchool_ConnectedLHS.py | 1 | 11680 | from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HPos_ChildSchool_ConnectedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HPos_ChildSchool_ConnectedLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HPos_ChildSchool_ConnectedLHS, self).__init__(name='HPos_ChildSchool_ConnectedLHS', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Pos_ChildSchool')
# Set the node attributes
# match class Child() node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["MT_dirty__"] = False
self.vs[0]["mm__"] = """MT_pre__Child"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class School() node
self.add_node()
self.vs[1]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["MT_dirty__"] = False
self.vs[1]["mm__"] = """MT_pre__School"""
self.vs[1]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class Service() node
self.add_node()
self.vs[2]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["MT_dirty__"] = False
self.vs[2]["mm__"] = """MT_pre__Service"""
self.vs[2]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# Nodes that represent the edges of the property.
# match association Child--goesTo-->School node
self.add_node()
self.vs[3]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "goesTo"
"""
self.vs[3]["MT_label__"] = """4"""
self.vs[3]["MT_subtypes__"] = []
self.vs[3]["MT_dirty__"] = False
self.vs[3]["mm__"] = """MT_pre__directLink_S"""
self.vs[3]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc3')
# match association School--special-->Service node
self.add_node()
self.vs[4]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "special"
"""
self.vs[4]["MT_label__"] = """5"""
self.vs[4]["MT_subtypes__"] = []
self.vs[4]["MT_dirty__"] = False
self.vs[4]["mm__"] = """MT_pre__directLink_S"""
self.vs[4]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc4')
# Add the edges
self.add_edges([
(0,3), # match_class Child() -> association goesTo
(3,1), # association goesTo -> match_class School()
(1,4), # match_class School() -> association special
(4,2) # association special -> match_class Service()
])
# Add the attribute equations
self["equations"] = []
def eval_attr11(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr12(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr13(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr14(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "goesTo"
def eval_attr15(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "special"
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
| mit | -8,973,953,865,711,570,000 | 50.681416 | 128 | 0.469863 | false |
leedsEM/movement | reorder4LMBFGS.py | 1 | 2635 | #!/usr/bin/python
import glob
import os
import sys
## find the labels and identify the right columns; put in labeldic
#
vers = "0.3"
# vers 0.3 updated to output in fortran formatted numbers
print "**** reorder starfile for individual particle correction using LMBFGS v {0}".format(vers)
### ---- function: reorder the starfile -----------
def reorder_starfile(filename):
relionfile = open(filename, "r")
odata = relionfile.readlines()
data = []
for i in odata:
if len(i.split()) > 3:
data.append(i)
# get the column number
for i in odata:
if '_rlnImageName' in i:
colnum = int(i.split('#')[-1]) -1
labelsdic = {}
for i in data:
if i.split()[colnum].split("/")[-1] in labelsdic.keys():
labelsdic[i.split()[colnum].split("/")[-1]].append(i.split())
if i.split()[colnum].split("/")[-1] not in labelsdic.keys():
labelsdic[i.split()[colnum].split("/")[-1]] = [i.split()]
## write new header
output = open("{0}_LMBFGS.star".format(filename.split('.')[0]),"w")
for i in odata:
if len(i.split()) < 3:
output.write(i)
# write the particles
for key in sorted(labelsdic):
for line in labelsdic[key]:
output.write("\n")
print line
for i in line:
if is_number(i):
count = len(i.split('.'))
if count > 1:
i = float(i)
if len(str(i).split('.')[0]) > 5:
output.write("{0:.6e} ".format(i))
else:
output.write("{0:12.6f} ".format(i))
else:
output.write("{0: 12d} ".format(int(i)))
else:
output.write("{0} ".format(i))
return("{0}_LMBFGS.star".format(filename.split('.')[0]))
#-----------------------------------------------------------------------
#------- function test if string is a number --------------------------#
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
#-----------------------------------------------------------------------
## get the relion file and read the data
files = glob.glob(raw_input("star files search string: ") or "*.star")
assert len(files) >= 1, "no files found"
for i in files:
print i
go = raw_input("Do it? (Y/N)")
if go in ("Y","y","yes","YES","Yes"):
for each in files:
newfile = reorder_starfile(each)
print each," --> ",newfile
| gpl-2.0 | 8,108,566,278,060,007,000 | 30 | 97 | 0.476281 | false |
chjost/analysis-code | analysis/fitresults.py | 1 | 12123 | # a class to contain the fit results
import numpy as np
from analyze_fcts import calc_error
from ensemble import LatticeEnsemble
from fit import genfit, genfit_comb, set_fit_interval
from fit import fit as fit1
from plot import genplot, genplot_comb
from input_output import write_fitresults, read_fitresults
from module_global import multiprocess
# this function circumvents the problem that only top-level functions
# of a module can be pickled, which is needed for the multiprocessing
# to work
def fitting_func(args, kwargs):
return fit1(args, kwargs)
class FitResults(object):
"""class to hold fit results.
Nothing is immutable, so be careful!
"""
def _depth(self, var):
return isinstance(var, list) and max(map(self._depth, var)) + 1
def __init__(self, ensemble, label):
if not isinstance(ensemble, LatticeEnsemble):
raise TypeError("FitResults expected LatticeEnsemble, got %s" %
(type(ensemble)))
# always needed
self.ensemble = ensemble
self.name = ensemble.get_data("name")
self.label = label
self.depth = 0
self.fitranges = []
self.verbose = False
self.old_data = None
self.data = None
self.fitfunc = None
# needed for combined fit
self.combfit = False
self.par = None
self.par_index = None
# results
self.res = None
self.pvals = None
self.chi2 = None
@classmethod
def combined_fit(cls, prev_fr, label, fitrange, par_index=0):
"""Initialize a new combined fit using the results of
prev_fr.
"""
tmp = cls(prev_fr.ensemble, label)
tmp.prepare_combined_fit(fitrange, prev_fr.fitranges[0],
prev_fr.data, par_index)
return tmp
@classmethod
def from_file(cls, ensemble, label, filename):
"""Initialize a fit from a file."""
raise NotImplementedError()
tmp = cls(ensemble, label)
res = read_fitresults(filename)
if len(res) == 4:
cls.set_fit(res)
elif len(res) == 5:
cls.set_fit_comb(res)
else:
raise RuntimeError("Cannot make sense initializing fit from file")
return tmp
def toggle_verbose(self):
if self.verbose:
self.verbose = False
else:
self.verbose = True
def add_fitrange(self, fitrange):
"""Add a fitrange for fitting a single fit"""
if len(self.fitranges) == 0:
self.fitranges.append(np.asarray(fitrange))
else:
raise RuntimeError("%s already has a fitrange, cannot add another")
def add_fitranges(self, fitrange_data, fitrange_par):
"""Add two fitrange for fitting a combined fit.
Args:
fitint_data: List of intervals for the fit of the functions.
fitint_par: List of intervals for the varying parameter
"""
if len(self.fitranges) == 0:
self.fitranges.append(np.asarray(fitrange_data))
self.fitranges.append(np.asarray(fitrange_par))
self.combfit = True
else:
raise RuntimeError("%s already has a fitrange, cannot add another"%\
self.__repr__)
def set_fitrange(self, _data, lo, upi, step=2):
"""Set fit interval"""
self.data = np.atleast_3d(_data)
self.add_fitrange(set_fit_interval(_data, lo, up, skip))
def add_par(self, par, par_index=0):
"""Add parameters for a combined fit and the index needed."""
self.par = par
self.par_index = par_index
self.combfit = True
def use_old_data(self, old_data):
"""Reuse the data located at 'old_data' if possible"""
self.old_data = old_data
def prepare_fit(self, fitrange, old_data=None):
"""Set everything needed for a fit."""
self.comb_fit = False
self.add_fitrange(fitrange)
self.use_old_data(old_data)
def prepare_combined_fit(self, fitrange_data, fitrange_par, par,
par_index=0, old_data=None):
"""Set everything needed for a combined fit."""
self.comb_fit = True
self.add_fitranges(fitrange_data, fitrange_par)
self.add_par(par, par_index)
self.use_old_data(old_data)
def do_fit(self, _data, fitfunc, start_params):
if self.data is not None:
if not (self.data==_data).all():
raise RuntimeError("Fitresult has already data which is" +
"compatible with new data")
else:
self.data = np.atleast_3d(_data)
# init variables
nboot = self.data.shape[0]
T2 = self.data.shape[1]
ncorr = self.data.shape[2]
npar = len(start_params)
ninter = [len(fitint) for fitint in self.fitranges[0]]
# set fit data
tlist = np.linspace(0., float(T2), float(T2), endpoint=False)
# initialize empty arrays
self.res = []
self.chi2 = []
self.pval = []
func_args = []
func_kwargs = []
# initialize array for every principal correlator
for _l in range(ncorr):
self.res.append(np.zeros((nboot, npar, ninter[_l])))
self.chi2.append(np.zeros((nboot, ninter[_l])))
self.pval.append(np.zeros((nboot, ninter[_l])))
def ffunc(args, kwargs):
return fit1(fitfunc, args, kwargs)
for _l in range(ncorr):
# setup
#mdata, ddata = calc_error(data[:,:,_l])
for _i in range(ninter[_l]):
lo, up = self.fitranges[0][_l][_i]
if self.verbose:
print("Interval [%d, %d]" % (lo, up))
print("correlator %d" % _l)
# fit the energy and print information
if self.verbose:
print("fitting correlation function")
print(tlist[lo:up+1])
func_args.append((tlist[lo:up+1], self.data[:,lo:up+1,_l], start_params))
y=len(func_kwargs)
func_kwargs.append({"num":y, "verbose":False})
#res[_l][:,:,_i], chi2[_l][:,_i], pval[_l][:,_i] = fitting(fitfunc,
# tlist[lo:up+1], data[:,lo:up+1,_l], start_params, verbose=False)
#if verbose:
# print("p-value %.7lf\nChi^2/dof %.7lf\nresults:"
# % (pval[_l][ 0, _i], chi2[_l][0,_i]/( (up - lo + 1) -
# len(start_params))))
# for p in enumerate(res[_l][0,:,_i]):
# print("\tpar %d = %lf" % p)
# print(" ")
#for a, b in zip(func_args, func_kwargs):
# print(a, b)
#fit1(*(func_args[0]), **(func_kwargs[0]))
multiprocess(ffunc, func_args, func_kwargs)
return
def fit(self, _data, fitfunc, start_params):
"""Fit the data using the fitfunction.
Args:
_data: The correlation functions.
fitfunc: The function to fit to the data.
start_params: The starting parameters for the fit function.
"""
if self.verbose:
print("fitting %s '%s'"% (self.name, self.label))
self.fitfunc = fitfunc
self.data = _data
if self.combfit:
# sanity checks
if len(self.fitranges) != 2:
raise RuntimeError("%s needs 2 fitranges for combined fit" %\
self.__repr__)
if not self.par:
raise RuntimeError("%s needs parameter data for combined fit"%\
self.__repr__)
# fit
myargs = [_data, self.fitranges[0], self.fitranges[1], fitfunc,
start_params, par]
mykwargs = {"par_index": self.par_index, "olddata": self.old_data,
"verbose": self.verbose}
self.res, self.chi2, self.pvals = genfit_comb(*myargs, **mykwargs)
else:
myargs = [_data, self.fitranges[0], fitfunc, start_params]
mykwargs = {"olddata": self.old_data, "verbose": self.verbose}
self.res, self.chi2, self.pvals = genfit(*myargs, **mykwargs)
self.depth = self._depth(self.res)
def set_results(self, res):
"""Set results when reading from file."""
self.res, self.chi2, self.pvals = res[:3]
self.add_fitrange(res[3])
def set_results_comb(self, res):
"""Set results when reading from file."""
self.res, self.chi2, self.pvals = res[:3]
self.add_fitranges(res[3], res[4])
self.combfit = True
def get_results(self):
"""Returns the fit results, the $\chi^2$ values and the p-values."""
return self.res, self.chi2, self.pvals, self.fitranges[0]
def save(self, filename):
"""save data to disk."""
if self.verbose:
print("saving %s '%s'"% (self.name, self.label))
if self.combfit:
write_fitresults(filename, self.fitranges[0], self.res,
self.chi2, self.pvals, self.fitranges[1],
self.verbose)
else:
write_fitresults(filename, self.fitranges[0], self.res,
self.chi2, self.pvals, self.verbose)
def save2(self, filename):
"""Save class to disk."""
if self.combfit:
raise NotImplementedError()
dic = {'fi0' : self.fitranges[0]}
dic = {'fi1' : self.fitranges[1]}
dic.update({'pi%02d' % i: p for (i, p) in enumerate(self.res)})
dic.update({'ch%02d' % i: p for (i, p) in enumerate(self.chi2)})
dic.update({'pv%02d' % i: p for (i, p) in enumerate(self.pvals)})
dic.update({'data': self.data})
dic.update({'par': self.par})
np.savez(filename, **dic)
else:
arr = numpy.array(2, dtype=object)
dic = {'fi0' : self.fitranges[0]}
dic.update({'pi%02d' % i: p for (i, p) in enumerate(self.res)})
dic.update({'ch%02d' % i: p for (i, p) in enumerate(self.chi2)})
dic.update({'pv%02d' % i: p for (i, p) in enumerate(self.pvals)})
dic.update({'data': self.data})
np.savez(filename, **dic)
def plot(self, label, path="./plots/", plotlabel="corr"):
"""Plot data.
label: Labels for the title and the axis.
path: Path to the saving place of the plot.
plotlabel: Label for the plot file.
"""
if self.verbose:
print("plotting %s '%s'"% (self.name, self.label))
if self.combfit:
myargs = [self.data, self.pvals, self.fitranges[0],
self.fitranges[1], self.fitfunc, self.res, self.par,
self.ensemble.get_data("tmin"), self.name,
self.ensemble.get_data("d"), label]
mykwargs = {"path": path, "plotlabel": plotlabel,
"verbose":self.verbose, "par_par_index": self.par_index}
genplot_comb(*myargs, **mykwargs)
else:
myargs = [self.data, self.res, self.pvals, self.fitranges[0],
self.fitfunc, self.ensemble.get_data("tmin"), self.name,
self.ensemble.get_data("d"), label]
mykwargs = {"path": path, "plotlabel": plotlabel,
"verbose":self.verbose}
genplot(*myargs, **mykwargs)
def __str__(self):
restring = "FitResult %s '%s' with depth %d" % (self.name, self.label,
self.depth)
if self.data:
restring = "\n".join((restring,"Data:\n"))
for key in self.data:
restring = "".join((restring, "\t%s: " % (str(key)),
str(self.data[key]), "\n"))
else:
retstring = "".join((retstring, "\n"))
return restring
def __repr__(self):
return "[ FitResult %s '%s' with depth %d]" % (self.name, self.label,
self.depth)
| gpl-3.0 | -143,162,281,004,933,150 | 38.23301 | 89 | 0.541203 | false |
guzmonne/meraki_api | meraki_api/networks.py | 1 | 4729 | """
Meraki Networks API Resource
"""
import urllib
from .meraki_api_resource import MerakiAPIResource
from .devices import Devices
from .ssids import SSIDs
from .site_to_site_vpn import SiteToSiteVPN
from .phone_contacts import PhoneContacts
from .sm import SM
from .static_routes import StaticRoutes
from .vlans import VLANs
from .utils import clean
from .clients import Clients
class Networks(MerakiAPIResource):
""" Meraki API Networks resource. """
resource = "networks"
parameters = ["name", "timeZone", "tags", "type"]
traffic_parameters = ["timespan", "deviceType"]
air_marshal_parameters = ["timespan"]
bind_parameters = ["configurationTemplateId", "autoBind"]
clients_parameters = ["id_or_mac_or_ip"]
events_parameters = ["productType", "includedEventTypes", "excludedEventTypes", "deviceMac", "deviceSerial", "deviceName", "clientIp", "clientMac", "clientName", "smDeviceMac", "smDeviceName", "perPage", "startingAfter", "endingBefore"]
def __init__(self, key, prefix=None, resource_id=None):
MerakiAPIResource.__init__(self, key, prefix, resource_id)
def check_timespan(self, query):
""" Checks if the query object has the timespan value configured. """
if query is None or query.get("timespan") is None:
raise ValueError("You must set the timespan query value.")
def static_routes(self, static_route_id=None):
""" Returns the Networks Static Routes API Resource. """
self.check_for_resource_id()
return StaticRoutes(self.key, self.endpoint(), static_route_id)
def devices(self, serial=None):
""" Returns the Networks Devices API Resource. """
self.check_for_resource_id()
return Devices(self.key, self.endpoint(), serial)
def ssids(self, ssid_id=None):
""" Returns the Network SSIDs API Resource."""
self.check_for_resource_id()
return SSIDs(self.key, self.endpoint(), ssid_id)
def site_to_site_vpn(self, site_to_site_vpn_id=None):
""" Returns site-to-site VPN settings API Resource. """
self.check_for_resource_id()
return SiteToSiteVPN(self.key, self.endpoint(), site_to_site_vpn_id)
def vlans(self, vlan_id=None):
""" Returns VLANs VPN settings API Resource. """
self.check_for_resource_id()
return VLANs(self.key, self.endpoint(), vlan_id)
def sm(self):
""" Returns Network SM API Resource. """
self.check_for_resource_id()
return SM(self.key, self.endpoint())
def traffic(self, query):
"""
The traffic analysis data for this network. Traffic Analysis with
Hostname Visibility must be enabled on the network.
"""
self.check_for_resource_id()
self.check_timespan(query)
query = clean(query, self.traffic_parameters)
return self.get("/traffic?" + urllib.parse.urlencode(query))
def bind(self, data):
""" Binds template to network. """
self.check_for_resource_id()
data = clean(data, self.bind_parameters)
return self.post("/bind", data)
def unbind(self):
""" Unbind template from network. """
self.check_for_resource_id()
return self.post("/unbind")
def access_policies(self):
""" List the access policies (MS). """
self.check_for_resource_id()
return self.get("/accessPolicies")
def air_marshal(self, query):
""" Air marshal scan results from a network. """
self.check_timespan(query)
self.check_for_resource_id()
query = clean(query, self.air_marshal_parameters)
return self.get("/airMarshal?" + urllib.parse.urlencode(query))
def phone_contacts(self, phone_contact_id=None):
""" List the phone contacts in a network. """
self.check_for_resource_id()
return PhoneContacts(self.key, self.endpoint(), phone_contact_id)
def phone_numbers(self):
""" List all the phone numbers in a network. """
self.check_for_resource_id()
return self.get("/phoneNumbers")
def available_phone_numbers(self):
""" List all the available phone numbers in a network. """
self.check_for_resource_id()
return self.get("/phoneNumbers/available")
def clients(self, clients_id=None):
""" Returns the Clients API Resource. """
self.check_for_resource_id()
return Clients(self.key, self.endpoint(), clients_id)
def events(self, query):
""" Returns the Clients API Resource. """
self.check_for_resource_id()
query = clean(query, self.events_parameters)
return self.get("/events?" + urllib.parse.urlencode(query))
| mit | -4,322,985,330,267,941,400 | 35.376923 | 244 | 0.642842 | false |
bjuvensjo/scripts | vang/misc/basic.py | 1 | 1626 | #!/usr/bin/env python3
from argparse import ArgumentParser
from base64 import encodebytes
from os import environ, name, system
from sys import argv
def get_basic_auth(username, password):
"""Returns basic authentication.
Args:
username (str): the username (defaults to environment variable 'U')
password (str): the password (defaults to environment variable 'P')
Return:
base 64 encoded Authorization header value
>>> get_basic_auth("foo", "bar")
'Basic Zm9vOmJhcg=='
"""
auth = f"{username}:{password}"
return f"Basic {encodebytes(auth.encode()).decode('UTF-8').strip()}"
def get_basic_auth_header(username, password):
return f"Authorization: {get_basic_auth(username, password)}"
def parse_args(args):
parser = ArgumentParser(
description='Prints and place in clipboard basic authentication header')
# parser.add_argument(
# '-u', '--username', help='Username', default=environ['U'])
# parser.add_argument(
# '-p', '--password', help='Password', default=environ['P'])
parser.add_argument(
'-u', '--username', help='Username')
parser.add_argument(
'-p', '--password', help='Password')
return parser.parse_args(args)
def main(username, password):
basic_auth_header = get_basic_auth_header(username, password)
if name == 'posix':
system(f"echo '{basic_auth_header}\c' | pbcopy")
print(f"'{basic_auth_header}' copied to clipboard")
else:
print(basic_auth_header)
if __name__ == '__main__': # pragma: no cover
main(**parse_args(argv[1:]).__dict__)
| apache-2.0 | 8,338,868,900,330,906,000 | 29.111111 | 80 | 0.641451 | false |
qisanstudio/qsapp-express | src/express/panel/account.py | 1 | 2438 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from flask import request, url_for, flash, redirect
from flask.ext.admin import expose
from flask.ext.admin.babel import gettext
from flask.ext.admin.actions import action
from studio.core.engines import db
from express.models.account import (RoleModel, PrivilegeModel,
AccountModel, EmailModel)
from express.panel.base import BaseView
class Role(BaseView):
perm = 'role'
column_list = ['id', 'title']
column_default_sort = ('id', True)
def __init__(self, **kwargs):
super(Role, self).__init__(RoleModel, db.session, **kwargs)
def create_form(self, obj=None):
form = super(Role, self).create_form(obj=obj)
delattr(form, 'accounts')
return form
def edit_form(self, obj=None):
form = super(Role, self).edit_form(obj=obj)
delattr(form, 'accounts')
return form
class Privilege(BaseView):
perm = 'role'
column_list = ['id', 'code', 'description', 'date_created']
column_default_sort = ('date_created', True)
def __init__(self, **kwargs):
super(Privilege, self).__init__(PrivilegeModel, db.session, **kwargs)
def create_form(self, obj=None):
form = super(Privilege, self).create_form(obj=obj)
delattr(form, 'date_created')
return form
def edit_form(self, obj=None):
form = super(Privilege, self).edit_form(obj=obj)
delattr(form, 'date_created')
return form
class Account(BaseView):
perm = 'account'
can_create = False
column_list = ['uid', 'nickname', 'date_created']
column_default_sort = ('date_created', True)
def __init__(self, **kwargs):
super(Account, self).__init__(AccountModel, db.session, **kwargs)
def edit_form(self, obj=None):
form = super(Account, self).edit_form(obj=obj)
delattr(form, 'addresses')
delattr(form, 'bills')
delattr(form, 'date_created')
return form
class Email(BaseView):
perm = 'account'
can_create = False
can_edit = False
column_list = ['uid', 'email', 'date_last_signed_in', 'date_created']
column_default_sort = ('date_last_signed_in', True)
def __init__(self, **kwargs):
super(Email, self).__init__(EmailModel, db.session, **kwargs)
| mit | -9,158,380,060,821,169,000 | 26.682353 | 77 | 0.597621 | false |
tcpcloud/openvstorage | webapps/api/backend/views/tasks.py | 1 | 2521 | # Copyright 2014 Open vStorage NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for working with celery tasks
"""
from rest_framework import status, viewsets
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import link
from backend.decorators import required_roles, load, log
from celery.task.control import inspect
from ovs.celery_run import celery
class TaskViewSet(viewsets.ViewSet):
"""
Information about celery tasks
"""
permission_classes = (IsAuthenticated,)
prefix = r'tasks'
base_name = 'tasks'
@log()
@required_roles(['read'])
@load()
def list(self):
"""
Overview of active, scheduled, reserved and revoked tasks
"""
inspector = inspect()
data = {'active' : inspector.active(),
'scheduled': inspector.scheduled(),
'reserved' : inspector.reserved(),
'revoked' : inspector.revoked()}
return Response(data, status=status.HTTP_200_OK)
@log()
@required_roles(['read'])
@load()
def retrieve(self, pk):
"""
Load information about a given task
"""
result = celery.AsyncResult(pk)
if result.successful():
result_data = result.result
else:
result_data = str(result.result) if result.result is not None else None
data = {'id' : result.id,
'status' : result.status,
'successful': result.successful(),
'failed' : result.failed(),
'ready' : result.ready(),
'result' : result_data}
return Response(data, status=status.HTTP_200_OK)
@link()
@log()
@required_roles(['read'])
@load()
def get(self, pk):
"""
Gets a given task's result
"""
result = celery.AsyncResult(pk)
return Response(result.get(), status=status.HTTP_200_OK)
| apache-2.0 | -2,150,323,591,975,315,000 | 30.911392 | 83 | 0.623165 | false |
slipcon/gitlint | gitlint/cli.py | 1 | 7127 | import os
import sys
import click
import gitlint
from gitlint.lint import GitLinter
from gitlint.config import LintConfig, LintConfigError, LintConfigGenerator
from gitlint.git import GitContext, GitContextError
from gitlint import hooks
DEFAULT_CONFIG_FILE = ".gitlint"
# Error codes
MAX_VIOLATION_ERROR_CODE = 252
USAGE_ERROR_CODE = 253
GIT_CONTEXT_ERROR_CODE = 254
CONFIG_ERROR_CODE = 255
# Since we use the return code to denote the amount of errors, we need to change the default click usage error code
click.UsageError.exit_code = USAGE_ERROR_CODE
def load_config_from_path(ctx, config_path=None):
""" Tries loading the config from the given path. If no path is specified, the default config path
is tried, and if no file exists at the that location, None is returned. """
config = None
try:
if config_path:
config = LintConfig.load_from_file(config_path)
elif os.path.exists(DEFAULT_CONFIG_FILE):
config = LintConfig.load_from_file(DEFAULT_CONFIG_FILE)
except LintConfigError as e:
click.echo("Error during config file parsing: {0}".format(str(e)))
ctx.exit(CONFIG_ERROR_CODE)
return config
def get_config(ctx, target, config_path, c, ignore, verbose, silent, debug):
""" Creates a LintConfig object based on a set of commandline parameters. """
try:
# Config precedence:
# First, load default config or config from configfile
lint_config = load_config_from_path(ctx, config_path)
# default to default configuration when no config file was loaded
if lint_config:
if debug:
click.echo("Using config from {0}".format(lint_config.config_path))
else:
lint_config = LintConfig()
# Then process any commandline configuration flags
lint_config.apply_config_options(c)
# Finally, overwrite with any convenience commandline flags
lint_config.apply_on_csv_string(ignore, lint_config.disable_rule)
if silent:
lint_config.verbosity = 0
elif verbose > 0:
lint_config.verbosity = verbose
if debug:
lint_config.debug = True
# Set target
lint_config.target = target
return lint_config
except LintConfigError as e:
click.echo("Config Error: {0}".format(str(e)))
ctx.exit(CONFIG_ERROR_CODE) # return CONFIG_ERROR_CODE on config error
@click.group(invoke_without_command=True, epilog="When no COMMAND is specified, gitlint defaults to 'gitlint lint'.")
@click.option('--target', type=click.Path(exists=True, resolve_path=True, file_okay=False, readable=True),
default=os.getcwd(), help="Path of the target git repository. [default: current working directory]")
@click.option('-C', '--config', type=click.Path(exists=True, dir_okay=False, readable=True, resolve_path=True),
help="Config file location [default: {0}]".format(DEFAULT_CONFIG_FILE))
@click.option('-c', multiple=True,
help="Config flags in format <rule>.<option>=<value> (e.g.: -c T1.line-length=80). " +
"Flag can be used multiple times to set multiple config values.") # pylint: disable=bad-continuation
@click.option('--ignore', default="", help="Ignore rules (comma-separated by id or name).")
@click.option('-v', '--verbose', count=True, default=0,
help="Verbosity, more v's for more verbose output (e.g.: -v, -vv, -vvv). [default: -vvv]", )
@click.option('-s', '--silent', help="Silent mode (no output). Takes precedence over -v, -vv, -vvv.", is_flag=True)
@click.option('-d', '--debug', help="Enable debugging output.", is_flag=True)
@click.version_option(version=gitlint.__version__)
@click.pass_context
def cli(ctx, target, config, c, ignore, verbose, silent, debug):
""" Git lint tool, checks your git commit messages for styling issues """
# Get the lint config from the commandline parameters and
# store it in the context (click allows storing an arbitrary object in ctx.obj).
lint_config = get_config(ctx, target, config, c, ignore, verbose, silent, debug)
ctx.obj = lint_config
# If no subcommand is specified, then just lint
if ctx.invoked_subcommand is None:
ctx.invoke(lint)
@cli.command("lint")
@click.pass_context
def lint(ctx):
""" Lints a git repository [default command] """
lint_config = ctx.obj
try:
if sys.stdin.isatty():
gitcontext = GitContext.from_local_repository(lint_config.target)
else:
gitcontext = GitContext.from_commit_msg(sys.stdin.read())
except GitContextError as e:
click.echo(str(e))
ctx.exit(GIT_CONTEXT_ERROR_CODE)
last_commit = gitcontext.commits[-1]
# Apply an additional config that is specified in the last commit message
lint_config.apply_config_from_commit(last_commit)
# Let's get linting!
linter = GitLinter(lint_config)
violations = linter.lint(last_commit, gitcontext)
linter.print_violations(violations)
exit_code = min(MAX_VIOLATION_ERROR_CODE, len(violations))
ctx.exit(exit_code)
@cli.command("install-hook")
@click.pass_context
def install_hook(ctx):
""" Install gitlint as a git commit-msg hook. """
try:
lint_config = ctx.obj
hooks.GitHookInstaller.install_commit_msg_hook(lint_config)
# declare victory :-)
hook_path = hooks.GitHookInstaller.commit_msg_hook_path(lint_config)
click.echo("Successfully installed gitlint commit-msg hook in {0}".format(hook_path))
ctx.exit(0)
except hooks.GitHookInstallerError as e:
click.echo(str(e), err=True)
ctx.exit(GIT_CONTEXT_ERROR_CODE)
@cli.command("uninstall-hook")
@click.pass_context
def uninstall_hook(ctx):
""" Uninstall gitlint commit-msg hook. """
try:
lint_config = ctx.obj
hooks.GitHookInstaller.uninstall_commit_msg_hook(lint_config)
# declare victory :-)
hook_path = hooks.GitHookInstaller.commit_msg_hook_path(lint_config)
click.echo("Successfully uninstalled gitlint commit-msg hook from {0}".format(hook_path))
ctx.exit(0)
except hooks.GitHookInstallerError as e:
click.echo(str(e), err=True)
ctx.exit(GIT_CONTEXT_ERROR_CODE)
@cli.command("generate-config")
@click.pass_context
def generate_config(ctx):
""" Generates a sample gitlint config file. """
path = click.prompt('Please specify a location for the sample gitlint config file', default=DEFAULT_CONFIG_FILE)
path = os.path.abspath(path)
dir_name = os.path.dirname(path)
if not os.path.exists(dir_name):
click.echo("Error: Directory '{0}' does not exist.".format(dir_name), err=True)
ctx.exit(USAGE_ERROR_CODE)
elif os.path.exists(path):
click.echo("Error: File \"{0}\" already exists.".format(path), err=True)
ctx.exit(USAGE_ERROR_CODE)
LintConfigGenerator.generate_config(path)
click.echo("Successfully generated {0}".format(path))
ctx.exit(0)
if __name__ == "__main__":
cli() # pylint: disable=no-value-for-parameter
| mit | -7,511,260,571,122,820,000 | 38.815642 | 120 | 0.672232 | false |
azunite/pdfium_ch | testing/tools/run_javascript_tests.py | 1 | 4010 | #!/usr/bin/env python
# Copyright 2015 The PDFium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os
import re
import subprocess
import sys
# Nomenclature:
# x_root - "x"
# x_filename - "x.ext"
# x_path - "path/to/a/b/c/x.ext"
# c_dir - "path/to/a/b/c"
def generate_and_test(input_filename, source_dir, working_dir,
fixup_path, pdfium_test_path, text_diff_path):
input_root, _ = os.path.splitext(input_filename)
input_path = os.path.join(source_dir, input_root + '.in')
pdf_path = os.path.join(working_dir, input_root + '.pdf')
txt_path = os.path.join(working_dir, input_root + '.txt')
expected_path = os.path.join(source_dir, input_root + '_expected.txt')
try:
sys.stdout.flush()
subprocess.check_call(
[sys.executable, fixup_path, '--output-dir=' + working_dir, input_path])
with open(txt_path, 'w') as outfile:
subprocess.check_call([pdfium_test_path, pdf_path], stdout=outfile)
subprocess.check_call(
[sys.executable, text_diff_path, expected_path, txt_path])
except subprocess.CalledProcessError as e:
print "FAILURE: " + input_filename + "; " + str(e)
return False
return True
def main():
parser = optparse.OptionParser()
parser.add_option('--build-dir', default=os.path.join('out', 'Debug'),
help='relative path from the base source directory')
options, args = parser.parse_args()
# Expect |my_dir| to be .../pdfium/testing/tools.
my_dir = os.path.dirname(os.path.realpath(__file__))
testing_dir = os.path.dirname(my_dir)
pdfium_dir = os.path.dirname(testing_dir)
if (os.path.basename(my_dir) != 'tools' or
os.path.basename(testing_dir) != 'testing'):
print 'Confused, can not find pdfium root directory, aborting.'
return 1
# Other scripts are found in the same directory as this one.
fixup_path = os.path.join(my_dir, 'fixup_pdf_template.py')
text_diff_path = os.path.join(my_dir, 'text_diff.py')
# test files are in .../pdfium/testing/resources/javascript.
source_dir = os.path.join(testing_dir, 'resources', 'javascript')
# Find path to build directory. This depends on whether this is a
# standalone build vs. a build as part of a chromium checkout. For
# standalone, we expect a path like .../pdfium/out/Debug, but for
# chromium, we expect a path like .../src/out/Debug two levels
# higher (to skip over the third_party/pdfium path component under
# which chromium sticks pdfium).
base_dir = pdfium_dir
one_up_dir = os.path.dirname(base_dir)
two_up_dir = os.path.dirname(one_up_dir)
if (os.path.basename(two_up_dir) == 'src' and
os.path.basename(one_up_dir) == 'third_party'):
base_dir = two_up_dir
build_dir = os.path.join(base_dir, options.build_dir)
# Compiled binaries are found under the build path.
pdfium_test_path = os.path.join(build_dir, 'pdfium_test')
if sys.platform.startswith('win'):
pdfium_test_path = pdfium_test_path + '.exe'
# TODO(tsepez): Mac may require special handling here.
# Place generated files under the build directory, not source directory.
gen_dir = os.path.join(build_dir, 'gen', 'pdfium')
working_dir = os.path.join(gen_dir, 'testing', 'javascript')
if not os.path.exists(working_dir):
os.makedirs(working_dir)
failures = []
input_file_re = re.compile('^[a-zA-Z0-9_.]+[.]in$')
for input_filename in os.listdir(source_dir):
if input_file_re.match(input_filename):
input_path = os.path.join(source_dir, input_filename)
if os.path.isfile(input_path):
if not generate_and_test(input_filename, source_dir, working_dir,
fixup_path, pdfium_test_path, text_diff_path):
failures.append(input_path)
if failures:
print '\n\nSummary of Failures:'
for failure in failures:
print failure
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | -8,676,647,283,513,023,000 | 37.190476 | 80 | 0.667332 | false |
gregunz/ada2017 | project/src/fetch_source_country.py | 1 | 11025 | # -*- coding: utf-8 -*-
import csv
import os
import re
import pandas as pd
import requests
from bs4 import BeautifulSoup
def get_all_newspapers_to_country_dict(v2=True):
"""Get the country associated to each newspapers url in a dict following the format: {'Clean URL' : 'Country name'}
This function is not sufficient on its own to get the country of every newspaper, please see get_countries_for_dataframe in data_cleaning.py
"""
def clean_url(url):
""" This function clean a url.
For example https://example.com will be returned as example.com
Keyword arguments:
url -- The url
"""
url_pair = re.findall(r'\b(?!www\.)([a-zA-Z0-9-]+(\.[a-z]+)+)', url.lower())
if (url_pair == []): # If it is not a url
return url
else:
return url_pair[0][0]
if v2:
columns = ['Country name', 'Newspaper Name', 'Newspaper Url']
df = pd.DataFrame(columns=columns)
if not os.path.isfile('../data/locations/clean_url_to_country_v2.csv'):
if not os.path.isfile('../data/locations/brute_newspapers_to_country_v2.csv'):
base_url = 'http://www.abyznewslinks.com/'
def get_newspapers(url, country_name):
df = pd.DataFrame(columns=columns)
r = requests.get(url)
# Find newspaper links
soup = BeautifulSoup(r.text, 'html.parser')
divs = soup.find_all('div')[3:]
for div in divs:
news_links = div.find_all('a')
for a_news in news_links:
a_news_link = a_news.get('href')
if a_news_link is not None:
# Check whether it links to a page of the website with more newspapers instead of a newspaper webstes
if a_news_link[-3:] == 'htm' and a_news_link[:3] != 'htt' and a_news_link[:3] != 'www':
df = df.append(get_newspapers(base_url + a_news.get('href'), country_name))
else:
newspaper_name = a_news.text
newspaper_url = a_news.get('href')
news_df = pd.DataFrame([[country_name, newspaper_name, newspaper_url]],
columns=columns)
df = df.append(news_df)
return df
r = requests.get(base_url + 'allco.htm')
soup = BeautifulSoup(r.text, 'html.parser')
countries = soup.find_all('table')[5].find_all('a')
for a in countries:
# Get specific page
country_name = a.text
df = df.append(get_newspapers(base_url + a.get('href'), country_name))
df.to_csv('../data/locations/brute_newspapers_to_country_v2.csv', index=False)
df = df.drop_duplicates(subset=['Newspaper Url'], keep='first')
df.to_csv('../data/locations/no_duplicate_brute_newspapers_to_country_v2.csv', index=False)
else:
if not os.path.isfile('../data_locations/no_duplicate_brute_newspapers_to_country_v2.csv'):
print('hi2')
df = pd.read_csv('../data/locations/brute_newspapers_to_country_v2.csv')
df = df.drop_duplicates(subset=['Newspaper Url'], keep='first')
df.to_csv('../data/locations/no_duplicate_brute_newspapers_to_country_v2.csv', index=False)
else:
df = pd.read_csv('../data_locations/no_duplicate_brute_newspapers_to_country_v2.csv')
df['Clean URL'] = df['Newspaper Url'].apply(lambda x: clean_url(x))
df.to_csv('../data/locations/clean_url_to_country_v2.csv', index=False)
else:
df = pd.read_csv('../data/locations/clean_url_to_country_v2.csv')
return df[["Clean URL", "Country name"]].set_index("Clean URL").to_dict().get("Country name")
else:
# If the file does not exist, fetch everything (takes ~ 16 hours)
if not os.path.isfile('../data/locations/clean_url_to_country.csv'):
# First get newspaper per country as referenced in https://www.thepaperboy.com/newspapers-by-country.cfm
if not os.path.isfile('../data/locations/countries_news.csv'):
# Get country specific page and get all the newspapers
def get_country_newspaper(country_name, url):
df = pd.DataFrame(columns=columns)
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
newspapers = soup.find_all('td', align='left')[1:]
for n in newspapers:
newspaper_name = n.find('strong').text
# Get the newspaper specific page
r = requests.get(base_url + n.find('a').get('href'))
soup = BeautifulSoup(r.text, 'html.parser')
n_url = soup.find_all('h1')
newspaper_url = n_url[0].find('a').get('href') # Newspaper url
news_df = pd.DataFrame([[country_name, newspaper_name, newspaper_url]], columns=columns)
df = df.append(news_df)
return df
r = requests.get('https://www.thepaperboy.com/newspapers-by-country.cfm')
soup = BeautifulSoup(r.text, 'html.parser')
base_url = 'https://www.thepaperboy.com'
country_links = soup.find_all('a', class_='mediumlink')
columns = ['Country name', 'Newspaper Name', 'Newspaper Url']
df = pd.DataFrame(columns=columns)
for a in country_links:
country_name = a.text.split(sep='(')[0]
if country_name[0] == ' ':
country_name = country_name[1:]
if country_name[-1] == ' ':
country_name = country_name[:-1]
# United states has a different structure
if country_name != 'United States':
df = df.append(get_country_newspaper(country_name, base_url + a.get('href')))
else:
r = requests.get('https://www.thepaperboy.com/united-states/newspapers/country.cfm')
soup = BeautifulSoup(r.text, 'html.parser')
us_states = soup.find_all('a', class_='mediumlink')
for state in us_states:
df = df.append(get_country_newspaper(country_name, base_url + state.get('href')))
df.to_csv('../data/locations/countries_news.csv')
original_websites_df = df
else:
original_websites_df = pd.read_csv('../data/locations/countries_news.csv')
def get_new_url(base_url):
""" This function try to get a redirection url and return it otherwise return the base url
"""
try:
response = requests.get(base_url, timeout=5)
if response.history:
return response.url
else:
return base_url
except:
return base_url
# As the website is not up to date, we get the new address of the newspapers which changed their address over time (But keep both for older news!). Basically we follow the previous link and get if there is a redirection
if not os.path.isfile('../data/locations/revisited_website_url.csv'):
for _, row in original_websites_df.iterrows():
new_url = get_new_url(row['Newspaper Url'])
df = pd.DataFrame({'Country name': [row['Country name']], 'New URL': [new_url]})
with open('../data/locations/revisited_website_url.csv', 'a') as f:
df.to_csv(f, header=False)
redirected_websites_df = pd.read_csv('../data/locations/revisited_website_url.csv',
names=['Country name', 'New URL'])
original_websites_df['Clean URL'] = original_websites_df['Newspaper Url'].apply(lambda x: clean_url(x))
original_websites_df.to_csv('../data/locations/original_clean_url_to_country.csv', index=False)
redirected_websites_df['Clean URL'] = redirected_websites_df['New URL'].apply(lambda x: clean_url(x))
redirected_websites_df.to_csv('../data/locations/redirected_clean_url_to_country.csv', index=False)
websites_df = original_websites_df[["Clean URL", "Country name"]].append(
redirected_websites_df[["Clean URL", "Country name"]])
websites_df.to_csv('../data/locations/clean_url_to_country.csv', index=False)
return websites_df[["Clean URL", "Country name"]].set_index("Clean URL").to_dict().get("Country name")
else:
return pd.read_csv('../data/locations/clean_url_to_country.csv')[["Clean URL", "Country name"]].set_index(
"Clean URL").to_dict().get("Country name")
def get_tld_to_country_dict():
"""Get the country associated to a top level domain in the formt: {'TLD' : 'Country name'}
This function is not sufficient on its own to get the country of every newspaper, please see get_countries_for_dataframe in data_cleaning.py
"""
if not os.path.isfile('../data/locations/top_level_domain_to_country.csv'):
r = requests.get('https://raw.githubusercontent.com/mledoze/countries/master/countries.json')
# Get the mapping
countries = [(c['name']['common'], c['tld']) for c in r.json()]
tld_to_country = {}
for c in countries:
for domain in c[1]:
if not domain in tld_to_country:
tld_to_country[domain] = c[0]
tld_to_country['.us'] = 'United States' # hardcoded otherwise it's not the good value
with open('../data/locations/top_level_domain_to_country.csv', 'w', encoding='utf-8') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(['tld', 'Country name'])
for key, value in tld_to_country.items():
writer.writerow([key, value])
return pd.read_csv('../data/locations/top_level_domain_to_country.csv').set_index('tld').to_dict().get(
'Country name')
| mit | 7,856,140,663,845,053,000 | 49.27907 | 231 | 0.524444 | false |
Asana/boto | tests/unit/auth/test_sigv4.py | 4 | 23947 | # Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import copy
import pickle
import os
from tests.compat import unittest, mock
from tests.unit import MockServiceWithConfigTestCase
from boto.auth import HmacAuthV4Handler
from boto.auth import S3HmacAuthV4Handler
from boto.auth import detect_potential_s3sigv4
from boto.auth import detect_potential_sigv4
from boto.connection import HTTPRequest
from boto.provider import Provider
from boto.regioninfo import RegionInfo
class TestSigV4Handler(unittest.TestCase):
def setUp(self):
self.provider = mock.Mock()
self.provider.access_key = 'access_key'
self.provider.secret_key = 'secret_key'
self.request = HTTPRequest(
'POST', 'https', 'glacier.us-east-1.amazonaws.com', 443,
'/-/vaults/foo/archives', None, {},
{'x-amz-glacier-version': '2012-06-01'}, '')
def test_not_adding_empty_qs(self):
self.provider.security_token = None
auth = HmacAuthV4Handler('glacier.us-east-1.amazonaws.com', mock.Mock(), self.provider)
req = copy.copy(self.request)
auth.add_auth(req)
self.assertEqual(req.path, '/-/vaults/foo/archives')
def test_inner_whitespace_is_collapsed(self):
auth = HmacAuthV4Handler('glacier.us-east-1.amazonaws.com',
mock.Mock(), self.provider)
self.request.headers['x-amz-archive-description'] = 'two spaces'
self.request.headers['x-amz-quoted-string'] = ' "a b c" '
headers = auth.headers_to_sign(self.request)
self.assertEqual(headers, {'Host': 'glacier.us-east-1.amazonaws.com',
'x-amz-archive-description': 'two spaces',
'x-amz-glacier-version': '2012-06-01',
'x-amz-quoted-string': ' "a b c" '})
# Note the single space between the "two spaces".
self.assertEqual(auth.canonical_headers(headers),
'host:glacier.us-east-1.amazonaws.com\n'
'x-amz-archive-description:two spaces\n'
'x-amz-glacier-version:2012-06-01\n'
'x-amz-quoted-string:"a b c"')
def test_canonical_query_string(self):
auth = HmacAuthV4Handler('glacier.us-east-1.amazonaws.com',
mock.Mock(), self.provider)
request = HTTPRequest(
'GET', 'https', 'glacier.us-east-1.amazonaws.com', 443,
'/-/vaults/foo/archives', None, {},
{'x-amz-glacier-version': '2012-06-01'}, '')
request.params['Foo.1'] = 'aaa'
request.params['Foo.10'] = 'zzz'
query_string = auth.canonical_query_string(request)
self.assertEqual(query_string, 'Foo.1=aaa&Foo.10=zzz')
def test_query_string(self):
auth = HmacAuthV4Handler('sns.us-east-1.amazonaws.com',
mock.Mock(), self.provider)
params = {
'Message': u'We \u2665 utf-8'.encode('utf-8'),
}
request = HTTPRequest(
'POST', 'https', 'sns.us-east-1.amazonaws.com', 443,
'/', None, params, {}, '')
query_string = auth.query_string(request)
self.assertEqual(query_string, 'Message=We%20%E2%99%A5%20utf-8')
def test_canonical_uri(self):
auth = HmacAuthV4Handler('glacier.us-east-1.amazonaws.com',
mock.Mock(), self.provider)
request = HTTPRequest(
'GET', 'https', 'glacier.us-east-1.amazonaws.com', 443,
'x/./././x .html', None, {},
{'x-amz-glacier-version': '2012-06-01'}, '')
canonical_uri = auth.canonical_uri(request)
# This should be both normalized & urlencoded.
self.assertEqual(canonical_uri, 'x/x%20.html')
auth = HmacAuthV4Handler('glacier.us-east-1.amazonaws.com',
mock.Mock(), self.provider)
request = HTTPRequest(
'GET', 'https', 'glacier.us-east-1.amazonaws.com', 443,
'x/./././x/html/', None, {},
{'x-amz-glacier-version': '2012-06-01'}, '')
canonical_uri = auth.canonical_uri(request)
# Trailing slashes should be preserved.
self.assertEqual(canonical_uri, 'x/x/html/')
request = HTTPRequest(
'GET', 'https', 'glacier.us-east-1.amazonaws.com', 443,
'/', None, {},
{'x-amz-glacier-version': '2012-06-01'}, '')
canonical_uri = auth.canonical_uri(request)
# There should not be two-slashes.
self.assertEqual(canonical_uri, '/')
# Make sure Windows-style slashes are converted properly
request = HTTPRequest(
'GET', 'https', 'glacier.us-east-1.amazonaws.com', 443,
'\\x\\x.html', None, {},
{'x-amz-glacier-version': '2012-06-01'}, '')
canonical_uri = auth.canonical_uri(request)
self.assertEqual(canonical_uri, '/x/x.html')
def test_credential_scope(self):
# test the AWS standard regions IAM endpoint
auth = HmacAuthV4Handler('iam.amazonaws.com',
mock.Mock(), self.provider)
request = HTTPRequest(
'POST', 'https', 'iam.amazonaws.com', 443,
'/', '/',
{'Action': 'ListAccountAliases', 'Version': '2010-05-08'},
{
'Content-Length': '44',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Amz-Date': '20130808T013210Z'
},
'Action=ListAccountAliases&Version=2010-05-08')
credential_scope = auth.credential_scope(request)
region_name = credential_scope.split('/')[1]
self.assertEqual(region_name, 'us-east-1')
# test the AWS GovCloud region IAM endpoint
auth = HmacAuthV4Handler('iam.us-gov.amazonaws.com',
mock.Mock(), self.provider)
request = HTTPRequest(
'POST', 'https', 'iam.us-gov.amazonaws.com', 443,
'/', '/',
{'Action': 'ListAccountAliases', 'Version': '2010-05-08'},
{
'Content-Length': '44',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Amz-Date': '20130808T013210Z'
},
'Action=ListAccountAliases&Version=2010-05-08')
credential_scope = auth.credential_scope(request)
region_name = credential_scope.split('/')[1]
self.assertEqual(region_name, 'us-gov-west-1')
# iam.us-west-1.amazonaws.com does not exist however this
# covers the remaining region_name control structure for a
# different region name
auth = HmacAuthV4Handler('iam.us-west-1.amazonaws.com',
mock.Mock(), self.provider)
request = HTTPRequest(
'POST', 'https', 'iam.us-west-1.amazonaws.com', 443,
'/', '/',
{'Action': 'ListAccountAliases', 'Version': '2010-05-08'},
{
'Content-Length': '44',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Amz-Date': '20130808T013210Z'
},
'Action=ListAccountAliases&Version=2010-05-08')
credential_scope = auth.credential_scope(request)
region_name = credential_scope.split('/')[1]
self.assertEqual(region_name, 'us-west-1')
# Test connections to custom locations, e.g. localhost:8080
auth = HmacAuthV4Handler('localhost', mock.Mock(), self.provider,
service_name='iam')
request = HTTPRequest(
'POST', 'http', 'localhost', 8080,
'/', '/',
{'Action': 'ListAccountAliases', 'Version': '2010-05-08'},
{
'Content-Length': '44',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Amz-Date': '20130808T013210Z'
},
'Action=ListAccountAliases&Version=2010-05-08')
credential_scope = auth.credential_scope(request)
timestamp, region, service, v = credential_scope.split('/')
self.assertEqual(region, 'localhost')
self.assertEqual(service, 'iam')
def test_headers_to_sign(self):
auth = HmacAuthV4Handler('glacier.us-east-1.amazonaws.com',
mock.Mock(), self.provider)
request = HTTPRequest(
'GET', 'http', 'glacier.us-east-1.amazonaws.com', 80,
'x/./././x .html', None, {},
{'x-amz-glacier-version': '2012-06-01'}, '')
headers = auth.headers_to_sign(request)
# Port 80 & not secure excludes the port.
self.assertEqual(headers['Host'], 'glacier.us-east-1.amazonaws.com')
request = HTTPRequest(
'GET', 'https', 'glacier.us-east-1.amazonaws.com', 443,
'x/./././x .html', None, {},
{'x-amz-glacier-version': '2012-06-01'}, '')
headers = auth.headers_to_sign(request)
# SSL port excludes the port.
self.assertEqual(headers['Host'], 'glacier.us-east-1.amazonaws.com')
request = HTTPRequest(
'GET', 'https', 'glacier.us-east-1.amazonaws.com', 8080,
'x/./././x .html', None, {},
{'x-amz-glacier-version': '2012-06-01'}, '')
headers = auth.headers_to_sign(request)
# URL should include port.
self.assertEqual(headers['Host'], 'glacier.us-east-1.amazonaws.com:8080')
def test_region_and_service_can_be_overriden(self):
auth = HmacAuthV4Handler('queue.amazonaws.com',
mock.Mock(), self.provider)
self.request.headers['X-Amz-Date'] = '20121121000000'
auth.region_name = 'us-west-2'
auth.service_name = 'sqs'
scope = auth.credential_scope(self.request)
self.assertEqual(scope, '20121121/us-west-2/sqs/aws4_request')
def test_pickle_works(self):
provider = Provider('aws', access_key='access_key',
secret_key='secret_key')
auth = HmacAuthV4Handler('queue.amazonaws.com', None, provider)
# Pickle it!
pickled = pickle.dumps(auth)
# Now restore it
auth2 = pickle.loads(pickled)
self.assertEqual(auth.host, auth2.host)
def test_bytes_header(self):
auth = HmacAuthV4Handler('glacier.us-east-1.amazonaws.com',
mock.Mock(), self.provider)
request = HTTPRequest(
'GET', 'http', 'glacier.us-east-1.amazonaws.com', 80,
'x/./././x .html', None, {},
{'x-amz-glacier-version': '2012-06-01', 'x-amz-hash': b'f00'}, '')
canonical = auth.canonical_request(request)
self.assertIn('f00', canonical)
class TestS3HmacAuthV4Handler(unittest.TestCase):
def setUp(self):
self.provider = mock.Mock()
self.provider.access_key = 'access_key'
self.provider.secret_key = 'secret_key'
self.provider.security_token = 'sekret_tokens'
self.request = HTTPRequest(
'GET', 'https', 's3-us-west-2.amazonaws.com', 443,
'/awesome-bucket/?max-keys=0', None, {},
{}, ''
)
self.awesome_bucket_request = HTTPRequest(
method='GET',
protocol='https',
host='awesome-bucket.s3-us-west-2.amazonaws.com',
port=443,
path='/',
auth_path=None,
params={
'max-keys': 0,
},
headers={
'User-Agent': 'Boto',
'X-AMZ-Content-sha256': 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855',
'X-AMZ-Date': '20130605T193245Z',
},
body=''
)
self.auth = S3HmacAuthV4Handler(
host='awesome-bucket.s3-us-west-2.amazonaws.com',
config=mock.Mock(),
provider=self.provider,
region_name='s3-us-west-2'
)
def test_clean_region_name(self):
# Untouched.
cleaned = self.auth.clean_region_name('us-west-2')
self.assertEqual(cleaned, 'us-west-2')
# Stripped of the ``s3-`` prefix.
cleaned = self.auth.clean_region_name('s3-us-west-2')
self.assertEqual(cleaned, 'us-west-2')
# Untouched (classic).
cleaned = self.auth.clean_region_name('s3.amazonaws.com')
self.assertEqual(cleaned, 's3.amazonaws.com')
# Untouched.
cleaned = self.auth.clean_region_name('something-s3-us-west-2')
self.assertEqual(cleaned, 'something-s3-us-west-2')
def test_region_stripping(self):
auth = S3HmacAuthV4Handler(
host='s3-us-west-2.amazonaws.com',
config=mock.Mock(),
provider=self.provider
)
self.assertEqual(auth.region_name, None)
# What we wish we got.
auth = S3HmacAuthV4Handler(
host='s3-us-west-2.amazonaws.com',
config=mock.Mock(),
provider=self.provider,
region_name='us-west-2'
)
self.assertEqual(auth.region_name, 'us-west-2')
# What we actually get (i.e. ``s3-us-west-2``).
self.assertEqual(self.auth.region_name, 'us-west-2')
def test_determine_region_name(self):
name = self.auth.determine_region_name('s3-us-west-2.amazonaws.com')
self.assertEqual(name, 'us-west-2')
def test_canonical_uri(self):
request = HTTPRequest(
'GET', 'https', 's3-us-west-2.amazonaws.com', 443,
'x/./././x .html', None, {},
{}, ''
)
canonical_uri = self.auth.canonical_uri(request)
# S3 doesn't canonicalize the way other SigV4 services do.
# This just urlencoded, no normalization of the path.
self.assertEqual(canonical_uri, 'x/./././x%20.html')
def test_determine_service_name(self):
# What we wish we got.
name = self.auth.determine_service_name(
's3.us-west-2.amazonaws.com'
)
self.assertEqual(name, 's3')
# What we actually get.
name = self.auth.determine_service_name(
's3-us-west-2.amazonaws.com'
)
self.assertEqual(name, 's3')
# What we wish we got with virtual hosting.
name = self.auth.determine_service_name(
'bucket.s3.us-west-2.amazonaws.com'
)
self.assertEqual(name, 's3')
# What we actually get with virtual hosting.
name = self.auth.determine_service_name(
'bucket.s3-us-west-2.amazonaws.com'
)
self.assertEqual(name, 's3')
def test_add_auth(self):
# The side-effects sideshow.
self.assertFalse('x-amz-content-sha256' in self.request.headers)
self.auth.add_auth(self.request)
self.assertTrue('x-amz-content-sha256' in self.request.headers)
the_sha = self.request.headers['x-amz-content-sha256']
self.assertEqual(
the_sha,
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
)
def test_host_header(self):
host = self.auth.host_header(
self.awesome_bucket_request.host,
self.awesome_bucket_request
)
self.assertEqual(host, 'awesome-bucket.s3-us-west-2.amazonaws.com')
def test_canonical_query_string(self):
qs = self.auth.canonical_query_string(self.awesome_bucket_request)
self.assertEqual(qs, 'max-keys=0')
def test_correct_handling_of_plus_sign(self):
request = HTTPRequest(
'GET', 'https', 's3-us-west-2.amazonaws.com', 443,
'hello+world.txt', None, {},
{}, ''
)
canonical_uri = self.auth.canonical_uri(request)
# Ensure that things are properly quoted.
self.assertEqual(canonical_uri, 'hello%2Bworld.txt')
request = HTTPRequest(
'GET', 'https', 's3-us-west-2.amazonaws.com', 443,
'hello%2Bworld.txt', None, {},
{}, ''
)
canonical_uri = self.auth.canonical_uri(request)
# Verify double escaping hasn't occurred.
self.assertEqual(canonical_uri, 'hello%2Bworld.txt')
def test_mangle_path_and_params(self):
request = HTTPRequest(
method='GET',
protocol='https',
host='awesome-bucket.s3-us-west-2.amazonaws.com',
port=443,
# LOOK AT THIS PATH. JUST LOOK AT IT.
path='/?delete&max-keys=0',
auth_path=None,
params={
'key': 'why hello there',
# This gets overwritten, to make sure back-compat is maintained.
'max-keys': 1,
},
headers={
'User-Agent': 'Boto',
'X-AMZ-Content-sha256': 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855',
'X-AMZ-Date': '20130605T193245Z',
},
body=''
)
mod_req = self.auth.mangle_path_and_params(request)
self.assertEqual(mod_req.path, '/?delete&max-keys=0')
self.assertEqual(mod_req.auth_path, '/')
self.assertEqual(mod_req.params, {
'max-keys': '0',
'key': 'why hello there',
'delete': ''
})
def test_canonical_request(self):
expected = """GET
/
max-keys=0
host:awesome-bucket.s3-us-west-2.amazonaws.com
user-agent:Boto
x-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
x-amz-date:20130605T193245Z
host;user-agent;x-amz-content-sha256;x-amz-date
e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"""
authed_req = self.auth.canonical_request(self.awesome_bucket_request)
self.assertEqual(authed_req, expected)
# Now the way ``boto.s3`` actually sends data.
request = copy.copy(self.awesome_bucket_request)
request.path = request.auth_path = '/?max-keys=0'
request.params = {}
expected = """GET
/
max-keys=0
host:awesome-bucket.s3-us-west-2.amazonaws.com
user-agent:Boto
x-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
x-amz-date:20130605T193245Z
host;user-agent;x-amz-content-sha256;x-amz-date
e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"""
# Pre-mangle it. In practice, this happens as part of ``add_auth``,
# but that's a side-effect that's hard to test.
request = self.auth.mangle_path_and_params(request)
authed_req = self.auth.canonical_request(request)
self.assertEqual(authed_req, expected)
def test_non_string_headers(self):
self.awesome_bucket_request.headers['Content-Length'] = 8
canonical_headers = self.auth.canonical_headers(
self.awesome_bucket_request.headers)
self.assertEqual(
canonical_headers,
'content-length:8\n'
'user-agent:Boto\n'
'x-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae'
'41e4649b934ca495991b7852b855\n'
'x-amz-date:20130605T193245Z'
)
class FakeS3Connection(object):
def __init__(self, *args, **kwargs):
self.host = kwargs.pop('host', None)
@detect_potential_s3sigv4
def _required_auth_capability(self):
return ['nope']
def _mexe(self, *args, **kwargs):
pass
class FakeEC2Connection(object):
def __init__(self, *args, **kwargs):
self.region = kwargs.pop('region', None)
@detect_potential_sigv4
def _required_auth_capability(self):
return ['nope']
def _mexe(self, *args, **kwargs):
pass
class TestS3SigV4OptIn(MockServiceWithConfigTestCase):
connection_class = FakeS3Connection
def test_sigv4_opt_out(self):
# Default is opt-out.
fake = FakeS3Connection(host='s3.amazonaws.com')
self.assertEqual(fake._required_auth_capability(), ['nope'])
def test_sigv4_non_optional(self):
region_groups = ['.cn-north', '.eu-central', '-eu-central']
specific_regions = ['.ap-northeast-2', '-ap-northeast-2']
# Create a connection for a sample region in each of these groups
# and ensure sigv4 is used.
for region in region_groups:
fake = FakeS3Connection(host='s3' + region + '-1.amazonaws.com')
self.assertEqual(
fake._required_auth_capability(), ['hmac-v4-s3'])
# Create a connection from the specific regions and make sure
# that these use sigv4.
for region in specific_regions:
fake = FakeS3Connection(host='s3' + region + '.amazonaws.com')
self.assertEqual(
fake._required_auth_capability(), ['hmac-v4-s3'])
def test_sigv4_opt_in_config(self):
# Opt-in via the config.
self.config = {
's3': {
'use-sigv4': True,
},
}
fake = FakeS3Connection()
self.assertEqual(fake._required_auth_capability(), ['hmac-v4-s3'])
def test_sigv4_opt_in_env(self):
# Opt-in via the ENV.
self.environ['S3_USE_SIGV4'] = True
fake = FakeS3Connection(host='s3.amazonaws.com')
self.assertEqual(fake._required_auth_capability(), ['hmac-v4-s3'])
class TestSigV4OptIn(MockServiceWithConfigTestCase):
connection_class = FakeEC2Connection
def setUp(self):
super(TestSigV4OptIn, self).setUp()
self.standard_region = RegionInfo(
name='us-west-2',
endpoint='ec2.us-west-2.amazonaws.com'
)
self.sigv4_region = RegionInfo(
name='cn-north-1',
endpoint='ec2.cn-north-1.amazonaws.com.cn'
)
def test_sigv4_opt_out(self):
# Default is opt-out.
fake = FakeEC2Connection(region=self.standard_region)
self.assertEqual(fake._required_auth_capability(), ['nope'])
def test_sigv4_non_optional(self):
# Requires SigV4.
fake = FakeEC2Connection(region=self.sigv4_region)
self.assertEqual(fake._required_auth_capability(), ['hmac-v4'])
def test_sigv4_opt_in_config(self):
# Opt-in via the config.
self.config = {
'ec2': {
'use-sigv4': True,
},
}
fake = FakeEC2Connection(region=self.standard_region)
self.assertEqual(fake._required_auth_capability(), ['hmac-v4'])
def test_sigv4_opt_in_env(self):
# Opt-in via the ENV.
self.environ['EC2_USE_SIGV4'] = True
fake = FakeEC2Connection(region=self.standard_region)
self.assertEqual(fake._required_auth_capability(), ['hmac-v4'])
| mit | -4,269,395,807,772,690,400 | 38.516502 | 107 | 0.585543 | false |
ksetyadi/Sahana-Eden | models/assess.py | 1 | 92989 | # -*- coding: utf-8 -*-
""" Assessment - Model
@author: Fran Boon
@author: Dominic König
@author: Michael Howden
This module currently contains 2 types of Assessments
* Flexible Impact Assessments
* Rapid Assessment Tool (from ECB: http://www.ecbproject.org/page/48)
@ToDo Validation similar to shn_sitrep_school_report_onvalidation()
http://bazaar.launchpad.net/~flavour/sahana-eden/trunk/annotate/head:/models/sitrep.py#L99
"""
module = "assess"
if deployment_settings.has_module(module):
# ---------------------------------------------------------------------
# Flexible Impact Assessments
# ---------------------------------------------------------------------
# Assessment
resourcename = "assess"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
Field("datetime", "datetime"),
location_id(),
organisation_id(),
person_id("assessor_person_id"),
comments(),
ireport_id(), # Assessment can be linked to an Incident Report
migrate=migrate, *s3_meta_fields()
)
table.datetime.label = T("Date & Time")
table.datetime.default = request.utcnow
table.assessor_person_id.label = T("Assessor")
if auth.is_logged_in():
table.assessor_person_id.default = shn_get_db_field_value(db = db,
table = "pr_person",
field = "pe_id",
look_up = session.auth.user.person_uuid,
look_up_field = "uuid"
)
assess_id = S3ReusableField("assess_id", table,
requires = IS_NULL_OR(IS_ONE_OF(db, "assess_assess.id", "%(id)s")),
represent = lambda id: id,
label = T("Assessment"),
ondelete = "RESTRICT"
)
# CRUD strings
ADD_ASSESSMENT = T("Add Assessment")
LIST_ASSESSMENTS = T("List Assessments")
s3.crud_strings[tablename] = Storage(
title_create = ADD_ASSESSMENT,
title_display = T("Assessment Details"),
title_list = LIST_ASSESSMENTS,
title_update = T("Edit Assessment"),
title_search = T("Search Assessments"),
subtitle_create = T("Add New Assessment"),
subtitle_list = T("Assessments"),
label_list_button = LIST_ASSESSMENTS,
label_create_button = ADD_ASSESSMENT,
label_delete_button = T("Delete Assessment"),
msg_record_created = T("Assessment added"),
msg_record_modified = T("Assessment updated"),
msg_record_deleted = T("Assessment deleted"),
msg_list_empty = T("No Assessments currently registered"))
# assess_assess as component of org_organisation
s3xrc.model.add_component(module, resourcename,
multiple=True,
joinby=dict(org_organisation="organisation_id")
)
#==============================================================================
# Baseline Type
resourcename = "baseline_type"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
Field("name", length=128, notnull=True, unique=True),
migrate=migrate, *s3_meta_fields()
)
# CRUD strings
ADD_BASELINE_TYPE = T("Add Baseline Type")
LIST_BASELINE_TYPE = T("List Baseline Types")
s3.crud_strings[tablename] = Storage(
title_create = ADD_BASELINE_TYPE,
title_display = T("Baseline Type Details"),
title_list = LIST_BASELINE_TYPE,
title_update = T("Edit Baseline Type"),
title_search = T("Search Baseline Type"),
subtitle_create = T("Add New Baseline Type"),
subtitle_list = T("Baseline Types"),
label_list_button = LIST_BASELINE_TYPE,
label_create_button = ADD_BASELINE_TYPE,
label_delete_button = T("Delete Baseline Type"),
msg_record_created = T("Baseline Type added"),
msg_record_modified = T("Baseline Type updated"),
msg_record_deleted = T("Baseline Type deleted"),
msg_list_empty = T("No Baseline Types currently registered"))
def baseline_type_comment():
if auth.has_membership(auth.id_group("'Administrator'")):
return DIV(A(ADD_BASELINE_TYPE,
_class="colorbox",
_href=URL(r=request, c="assess", f="baseline_type", args="create", vars=dict(format="popup")),
_target="top",
_title=ADD_BASELINE_TYPE
)
)
else:
return None
baseline_type_id = S3ReusableField("baseline_type_id", db.assess_baseline_type, sortby="name",
requires = IS_NULL_OR(IS_ONE_OF(db, "assess_baseline_type.id","%(name)s", sort=True)),
represent = lambda id: shn_get_db_field_value(db = db,
table = "assess_baseline_type",
field = "name",
look_up = id),
label = T("Baseline Type"),
comment = baseline_type_comment(),
ondelete = "RESTRICT"
)
#==============================================================================
# Baseline
resourcename = "baseline"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
assess_id(),
baseline_type_id(),
Field("value", "double"),
comments(),
migrate=migrate, *s3_meta_fields()
)
# Hide FK fields in forms
table.assess_id.readable = table.assess_id.writable = False
# CRUD strings
ADD_BASELINE = T("Add Baseline")
LIST_BASELINE = T("List Baselines")
s3.crud_strings[tablename] = Storage(
title_create = ADD_BASELINE,
title_display = T("Impact Baselines"),
title_list = LIST_BASELINE,
title_update = T("Edit Baseline"),
title_search = T("Search Baselines"),
subtitle_create = T("Add New Baseline"),
subtitle_list = T("Baselines"),
label_list_button = LIST_BASELINE,
label_create_button = ADD_BASELINE,
label_delete_button = T("Delete Baseline"),
msg_record_created = T("Baseline added"),
msg_record_modified = T("Baseline updated"),
msg_record_deleted = T("Baseline deleted"),
msg_list_empty = T("No Baselines currently registered"))
# Baseline as component of assessments
s3xrc.model.add_component(module, resourcename,
multiple=True,
joinby=dict(assess_assess="assess_id"),
deletable=True,
editable=True)
#==============================================================================
# Summary
resourcename = "summary"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
assess_id(),
cluster_id(),
#Field("value", "double"),
Field("value", "integer",
default = 0),
comments(),
migrate=migrate, *s3_meta_fields()
)
# Hide FK fields in forms
table.assess_id.readable = table.assess_id.writable = False
assess_severity_opts = {
0: T("Low"),
1: T("Medium"),
2: T("High"),
3: T("Very High"),
}
table.value.label = T("Severity")
table.value.requires = IS_EMPTY_OR(IS_IN_SET(assess_severity_opts))
table.value.widget = SQLFORM.widgets.radio.widget
assess_colour_opts = {
0:"green",
1:"yellow",
2:"orange",
3:"red"
}
def shn_assess_severity_represent(value):
if value:
return IMG( _src="/%s/static/img/%s_circle_16px.png" % (request.application, assess_colour_opts[value]),
_alt= value,
_align="middle"
)
else:
return NONE
table.value.represent = shn_assess_severity_represent
# CRUD strings
ADD_ASSESS_SUMMARY = T("Add Assessment Summary")
LIST_ASSESS_SUMMARY = T("List Assessment Summaries")
s3.crud_strings[tablename] = Storage(
title_create = ADD_ASSESS_SUMMARY,
title_display = T("Impact Assessment Summaries"),
title_list = LIST_ASSESS_SUMMARY,
title_update = T("Edit Assessment Summary"),
title_search = T("Search Assessment Summaries"),
subtitle_create = T("Add New Assessment Summary"),
subtitle_list = T("Assessment Summaries"),
label_list_button = LIST_ASSESS_SUMMARY,
label_create_button = ADD_ASSESS_SUMMARY,
label_delete_button = T("Delete Assessment Summary"),
msg_record_created = T("Assessment Summary added"),
msg_record_modified = T("Assessment Summary updated"),
msg_record_deleted = T("Assessment Summary deleted"),
msg_list_empty = T("No Assessment Summaries currently registered"))
# Summary as component of assessments
s3xrc.model.add_component(module, resourcename,
multiple=True,
joinby=dict(assess_assess="assess_id"),
deletable=True,
editable=True)
#==============================================================================
# Rapid Assessment Tool
#==============================================================================
# Section CRUD strings
ADD_SECTION = T("Add Section")
LIST_SECTIONS = T("List Sections")
rat_section_crud_strings = Storage(
title_create = ADD_SECTION,
title_display = T("Section Details"),
title_list = LIST_SECTIONS,
title_update = "",
title_search = T("Search Sections"),
subtitle_create = "",
subtitle_list = T("Sections"),
label_list_button = LIST_SECTIONS,
label_create_button = ADD_SECTION,
label_delete_button = T("Delete Section"),
msg_record_created = T("Section updated"),
msg_record_modified = T("Section updated"),
msg_record_deleted = T("Section deleted"),
msg_list_empty = T("No Sections currently registered"))
# -------------------------------------------------------------------------
# Common options
rat_walking_time_opts = {
1: T("0-15 minutes"),
2: T("15-30 minutes"),
3: T("30-60 minutes"),
4: T("over one hour"),
999: NOT_APPLICABLE
}
# -------------------------------------------------------------------------
# Helper functions
def shn_rat_represent_multiple(set, opt):
""" Represent an IS_IN_SET with multiple=True as
comma-separated list of options
@param set: the options set as dict
@param opt: the selected option(s)
"""
if isinstance(opt, (list, tuple)):
opts = opt
vals = [str(set.get(o, o)) for o in opts]
#elif isinstance(opt, basestring):
# opts = opt.split("|")
# vals = [str(set.get(int(o), o)) for o in opts if o]
elif isinstance(opt, int):
opts = [opt]
vals = str(set.get(opt, opt))
else:
return T("None")
if len(opts) > 1:
vals = ", ".join(vals)
else:
vals = len(vals) and vals[0] or ""
return vals
def shn_rat_label_and_tooltip(field, label, tooltip, multiple=False):
""" Add label and tooltip to a field """
field.label = T(label)
if multiple:
field.comment = DIV("(%s)" % T("Select all that apply"),
DIV(_class="tooltip",
_title="%s|%s" % (T(label), T(tooltip))))
else:
field.comment = DIV(DIV(_class="tooltip",
_title="%s|%s" % (T(label), T(tooltip))))
rat_interview_location_opts = {
1:T("Village"),
2:T("Urban area"),
3:T("Collective center"),
4:T("Informal camp"),
5:T("Formal camp"),
6:T("School"),
7:T("Mosque"),
8:T("Church"),
99:T("Other")
}
rat_interviewee_opts = {
1:T("Male"),
2:T("Female"),
3:T("Village Leader"),
4:T("Informal Leader"),
5:T("Community Member"),
6:T("Religious Leader"),
7:T("Police"),
8:T("Healthcare Worker"),
9:T("School Teacher"),
10:T("Womens Focus Groups"),
11:T("Child (< 18 yrs)"),
99:T("Other")
}
rat_accessibility_opts = {
1:T("2x4 Car"),
2:T("4x4 Car"),
3:T("Truck"),
4:T("Motorcycle"),
5:T("Boat"),
6:T("Walking Only"),
7:T("No access at all"),
99:T("Other")
}
# Main Resource -----------------------------------------------------------
# contains Section 1: Identification Information
#
resourcename = "rat"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
Field("date", "date"),
location_id(),
staff_id(),
staff_id("staff2_id", label = T("Staff 2")),
Field("interview_location", "list:integer"),
Field("interviewee", "list:integer"),
Field("accessibility", "integer"),
comments(),
document_id(),
shelter_id(),
migrate=migrate, *s3_meta_fields())
table.date.requires = [IS_DATE(), IS_NOT_EMPTY()]
table.date.default = datetime.datetime.today()
table.interview_location.label = T("Interview taking place at")
table.interview_location.requires = IS_NULL_OR(IS_IN_SET(rat_interview_location_opts, multiple=True, zero=None))
table.interview_location.represent = lambda opt, set=rat_interview_location_opts: \
shn_rat_represent_multiple(set, opt)
table.interview_location.comment = "(" + T("Select all that apply") + ")"
#table.interview_location.widget = SQLFORM.widgets.checkboxes.widget
table.interviewee.label = T("Person interviewed")
table.interviewee.requires = IS_NULL_OR(IS_IN_SET(rat_interviewee_opts, multiple=True, zero=None))
table.interviewee.represent = lambda opt, set=rat_interviewee_opts: \
shn_rat_represent_multiple(set, opt)
table.interviewee.comment = "(" + T("Select all that apply") + ")"
#table.interviewee.widget = SQLFORM.widgets.checkboxes.widget
table.accessibility.requires = IS_NULL_OR(IS_IN_SET(rat_accessibility_opts, zero=None))
table.accessibility.represent = lambda opt: rat_accessibility_opts.get(opt, opt)
table.accessibility.label = T("Accessibility of Affected Location")
# CRUD strings
ADD_ASSESSMENT = T("Add Assessment")
LIST_ASSESSMENTS = T("List Assessments")
s3.crud_strings[tablename] = Storage(
title_create = ADD_ASSESSMENT,
title_display = T("Assessment Details"),
title_list = LIST_ASSESSMENTS,
title_update = T("Edit Assessment"),
title_search = T("Search Assessments"),
subtitle_create = T("Add New Assessment"),
subtitle_list = T("Assessments"),
label_list_button = LIST_ASSESSMENTS,
label_create_button = ADD_ASSESSMENT,
label_delete_button = T("Delete Assessment"),
msg_record_created = T("Assessment added"),
msg_record_modified = T("Assessment updated"),
msg_record_deleted = T("Assessment deleted"),
msg_list_empty = T("No Assessments currently registered"))
# -------------------------------------------------------------------------
def rat_assessment_onaccept(form):
id = form.vars.get("id", None)
if id:
for x in xrange(2, 10):
section = "assess_section%s" % x
set = db(db[section].assessment_id == id)
record = set.select(db[section].id, limitby=(0, 1)).first()
if not record:
db[section].insert(assessment_id=id)
# -------------------------------------------------------------------------
def shn_rat_represent(id):
""" Represent assessment as string """
table = db.assess_rat
row = db(table.id == id).select(table.date,
table.staff_id,
table.staff2_id,
table.location_id,
limitby = (0, 1)).first()
if row:
date = row.date and str(row.date) or ""
location = row.location_id and shn_gis_location_represent(row.location_id) or ""
table = db.org_staff
org = ["", ""]
i = 0
for staff_id in [row.staff_id, row.staff2_id]:
i += 1
if staff_id:
staff = db(table.id == staff_id).select(table.organisation_id,
limitby=(0, 1)).first()
if staff:
org[i] = shn_organisation_represent(staff.organisation_id)
assessment_represent = XML("<div>%s %s, %s %s</div>" % (location, org[0], org[1], date))
else:
assessment_represent = "-"
return assessment_represent
# -------------------------------------------------------------------------
# re-usable field
assessment_id = S3ReusableField("assessment_id", table,
requires = IS_NULL_OR(IS_ONE_OF(db, "assess_rat.id", shn_rat_represent, orderby="assess_rat.id")),
represent = lambda id: shn_rat_represent(id),
label = T("Rapid Assessment"),
comment = A(ADD_ASSESSMENT, _class="colorbox", _href=URL(r=request, c="assess", f="rat", args="create", vars=dict(format="popup")), _target="top", _title=ADD_ASSESSMENT),
ondelete = "RESTRICT")
# Assessment as component of doc_document and cr_shelter.
# RAT has components itself, so best not to constrain within the parent resource tabs
# - therefore disable the listadd & jump out of the tabs for Create/Update
s3xrc.model.add_component(module, resourcename,
multiple=True,
joinby=dict(cr_shelter="shelter_id", doc_document="document_id"))
s3xrc.model.configure(table,
listadd=False,
onaccept=lambda form: rat_assessment_onaccept(form))
# Section 2: Demographic --------------------------------------------------
resourcename = "section2"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
assessment_id(),
Field("population_total", "integer"),
Field("households_total", "integer"),
Field("population_affected", "integer"),
Field("households_affected", "integer"),
Field("male_05", "double"),
Field("male_612", "double"),
Field("male_1317", "double"),
Field("male_1825", "double"),
Field("male_2660", "double"),
Field("male_61", "double"),
Field("female_05", "double"),
Field("female_612", "double"),
Field("female_1317", "double"),
Field("female_1825", "double"),
Field("female_2660", "double"),
Field("female_61", "double"),
Field("dead_women", "integer"),
Field("dead_men", "integer"),
Field("dead_girl", "integer"),
Field("dead_boy", "integer"),
Field("injured_women", "integer"),
Field("injured_men", "integer"),
Field("injured_girl", "integer"),
Field("injured_boy", "integer"),
Field("missing_women", "integer"),
Field("missing_men", "integer"),
Field("missing_girl", "integer"),
Field("missing_boy", "integer"),
Field("household_head_elderly", "integer"),
Field("household_head_female", "integer"),
Field("household_head_child", "integer"),
Field("disabled_physical", "integer"),
Field("disabled_mental", "integer"),
Field("pregnant", "integer"),
Field("lactating", "integer"),
Field("minorities", "integer"),
comments(),
migrate=migrate, *s3_meta_fields())
table.assessment_id.readable = table.assessment_id.writable = False
table.population_affected.label = T("Estimated # of people who are affected by the emergency")
table.population_affected.comment = T("people")
table.households_affected.label = T("Estimated # of households who are affected by the emergency")
table.households_affected.comment = T("households")
table.population_total.label = T("Total population of site visited")
table.population_total.comment = T("people")
table.households_total.label = T("Total # of households of site visited")
table.households_total.comment = T("households")
table.male_05.label = T("Number/Percentage of affected population that is Male & Aged 0-5")
table.male_612.label = T("Number/Percentage of affected population that is Male & Aged 6-12")
table.male_1317.label = T("Number/Percentage of affected population that is Male & Aged 13-17")
table.male_1825.label = T("Number/Percentage of affected population that is Male & Aged 18-25")
table.male_2660.label = T("Number/Percentage of affected population that is Male & Aged 26-60")
table.male_61.label = T("Number/Percentage of affected population that is Male & Aged 61+")
table.female_05.label = T("Number/Percentage of affected population that is Female & Aged 0-5")
table.female_612.label = T("Number/Percentage of affected population that is Female & Aged 6-12")
table.female_1317.label = T("Number/Percentage of affected population that is Female & Aged 13-17")
table.female_1825.label = T("Number/Percentage of affected population that is Female & Aged 18-25")
table.female_2660.label = T("Number/Percentage of affected population that is Female & Aged 26-60")
table.female_61.label = T("Number/Percentage of affected population that is Female & Aged 61+")
table.dead_women.label = T("How many Women (18 yrs+) are Dead due to the crisis")
table.dead_women.comment = T("people")
table.dead_men.label = T("How many Men (18 yrs+) are Dead due to the crisis")
table.dead_men.comment = T("people")
table.dead_girl.label = T("How many Girls (0-17 yrs) are Dead due to the crisis")
table.dead_girl.comment = T("people")
table.dead_boy.label = T("How many Boys (0-17 yrs) are Dead due to the crisis")
table.dead_boy.comment = T("people")
table.missing_women.label = T("How many Women (18 yrs+) are Missing due to the crisis")
table.missing_women.comment = T("people")
table.missing_men.label = T("How many Men (18 yrs+) are Missing due to the crisis")
table.missing_men.comment = T("people")
table.missing_girl.label = T("How many Girls (0-17 yrs) are Missing due to the crisis")
table.missing_girl.comment = T("people")
table.missing_boy.label = T("How many Boys (0-17 yrs) are Missing due to the crisis")
table.missing_boy.comment = T("people")
table.injured_women.label = T("How many Women (18 yrs+) are Injured due to the crisis")
table.injured_women.comment = T("people")
table.injured_men.label = T("How many Men (18 yrs+) are Injured due to the crisis")
table.injured_men.comment = T("people")
table.injured_girl.label = T("How many Girls (0-17 yrs) are Injured due to the crisis")
table.injured_girl.comment = T("people")
table.injured_boy.label = T("How many Boys (0-17 yrs) are Injured due to the crisis")
table.injured_boy.comment = T("people")
table.household_head_elderly.label = T("Elderly person headed households (>60 yrs)")
table.household_head_elderly.comment = T("households")
table.household_head_female.label = T("Female headed households")
table.household_head_female.comment = T("households")
table.household_head_child.label = T("Child headed households (<18 yrs)")
table.household_head_child.comment = T("households")
table.disabled_physical.label = T("Persons with disability (physical)")
table.disabled_physical.comment = T("people")
table.disabled_mental.label = T("Persons with disability (mental)")
table.disabled_mental.comment = T("people")
table.pregnant.label = T("Pregnant women")
table.pregnant.comment = T("people")
table.lactating.label = T("Lactating women")
table.lactating.comment = T("people")
table.minorities.label = T("Migrants or ethnic minorities")
table.minorities.comment = T("people")
# CRUD strings
s3.crud_strings[tablename] = rat_section_crud_strings
s3xrc.model.add_component(module, resourcename,
multiple = False,
joinby = dict(assess_rat="assessment_id"))
s3xrc.model.configure(table, deletable=False)
# Section 3: Shelter & Essential NFIs -------------------------------------
rat_houses_salvmat_types = {
1: T("Wooden plank"),
2: T("Zinc roof"),
3: T("Bricks"),
4: T("Wooden poles"),
5: T("Door frame"),
6: T("Window frame"),
7: T("Roof tile"),
999: NOT_APPLICABLE
}
rat_water_container_types = {
1: T("Jerry can"),
2: T("Bucket"),
3: T("Water gallon"),
99: T("Other (specify)")
}
resourcename = "section3"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
assessment_id(),
Field("houses_total", "integer"),
Field("houses_destroyed", "integer"),
Field("houses_damaged", "integer"),
Field("houses_salvmat", "list:integer"),
Field("water_containers_available", "boolean"),
Field("water_containers_sufficient", "boolean"),
Field("water_containers_types", "list:integer"),
Field("water_containers_types_other"),
Field("cooking_equipment_available", "boolean"),
Field("sanitation_items_available", "boolean"),
Field("sanitation_items_available_women", "boolean"),
Field("bedding_materials_available", "boolean"),
Field("clothing_sets_available", "boolean"),
Field("nfi_assistance_available", "boolean"),
Field("kits_hygiene_received", "boolean"),
Field("kits_hygiene_source"),
Field("kits_household_received", "boolean"),
Field("kits_household_source"),
Field("kits_dwelling_received", "boolean"),
Field("kits_dwelling_source"),
comments(),
migrate=migrate, *s3_meta_fields())
table.assessment_id.readable = table.assessment_id.writable = False
table.houses_total.label = T("Total number of houses in the area")
table.houses_total.requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 99999999))
shn_rat_label_and_tooltip(table.houses_destroyed,
"Number of houses destroyed/uninhabitable",
"How many houses are uninhabitable (uninhabitable = foundation and structure destroyed)?")
table.houses_destroyed.requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 99999999))
shn_rat_label_and_tooltip(table.houses_damaged,
"Number of houses damaged, but usable",
"How many houses suffered damage but remain usable (usable = windows broken, cracks in walls, roof slightly damaged)?")
table.houses_destroyed.requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 99999999))
shn_rat_label_and_tooltip(table.houses_salvmat,
"Salvage material usable from destroyed houses",
"What type of salvage material can be used from destroyed houses?",
multiple=True)
table.houses_salvmat.requires = IS_NULL_OR(IS_IN_SET(rat_houses_salvmat_types, multiple=True, zero=None))
table.houses_salvmat.represent = lambda opt, set=rat_houses_salvmat_types: \
shn_rat_represent_multiple(set, opt)
shn_rat_label_and_tooltip(table.water_containers_available,
"Water storage containers available for HH",
"Do households have household water storage containers?")
shn_rat_label_and_tooltip(table.water_containers_sufficient,
"Water storage containers sufficient per HH",
"Do households each have at least 2 containers (10-20 litres each) to hold water?")
shn_rat_label_and_tooltip(table.water_containers_types,
"Types of water storage containers available",
"What types of household water storage containers are available?",
multiple=True)
table.water_containers_types.requires = IS_EMPTY_OR(IS_IN_SET(rat_water_container_types, zero=None, multiple=True))
table.water_containers_types.represents = lambda opt, set=rat_water_container_types: \
shn_rat_represent_multiple(set, opt)
table.water_containers_types_other.label = T("Other types of water storage containers")
shn_rat_label_and_tooltip(table.cooking_equipment_available,
"Appropriate cooking equipment/materials in HH",
"Do households have appropriate equipment and materials to cook their food (stove, pots, dished plates, and a mug/drinking vessel, etc)?")
shn_rat_label_and_tooltip(table.sanitation_items_available,
"Reliable access to sanitation/hygiene items",
"Do people have reliable access to sufficient sanitation/hygiene items (bathing soap, laundry soap, shampoo, toothpaste and toothbrush)?")
shn_rat_label_and_tooltip(table.sanitation_items_available_women,
"Easy access to sanitation items for women/girls",
"Do women and girls have easy access to sanitary materials?")
shn_rat_label_and_tooltip(table.bedding_materials_available,
"Bedding materials available",
"Do households have bedding materials available (tarps, plastic mats, blankets)?")
shn_rat_label_and_tooltip(table.clothing_sets_available,
"Appropriate clothing available",
"Do people have at least 2 full sets of clothing (shirts, pants/sarong, underwear)?")
shn_rat_label_and_tooltip(table.nfi_assistance_available,
"Shelter/NFI assistance received/expected",
"Have households received any shelter/NFI assistance or is assistance expected in the coming days?")
table.kits_hygiene_received.label = T("Hygiene kits received")
table.kits_hygiene_source.label = T("Hygiene kits, source")
table.kits_household_received.label = T("Household kits received")
table.kits_household_source.label = T("Household kits, source")
table.kits_dwelling_received.label = T("Family tarpaulins received")
table.kits_dwelling_source.label = T("Family tarpaulins, source")
# CRUD strings
s3.crud_strings[tablename] = rat_section_crud_strings
s3xrc.model.add_component(module, resourcename,
multiple = False,
joinby = dict(assess_rat="assessment_id"))
s3xrc.model.configure(table, deletable=False)
# Section 4 - Water and Sanitation ----------------------------------------
rat_water_source_types = {
1: T("PDAM"),
2: T("Dug Well"),
3: T("Spring"),
4: T("River"),
5: T("Other Faucet/Piped Water"),
99: T("Other (describe)"),
999: NOT_APPLICABLE
}
rat_water_coll_person_opts = {
1: T("Child"),
2: T("Adult male"),
3: T("Adult female"),
4: T("Older person (>60 yrs)"),
999: NOT_APPLICABLE
}
rat_defec_place_types = {
1: T("open defecation"),
2: T("pit"),
3: T("latrines"),
4: T("river"),
99: T("other")
}
rat_defec_place_animals_opts = {
1: T("enclosed area"),
2: T("within human habitat"),
999: NOT_APPLICABLE
}
rat_latrine_types = {
1: T("flush latrine with septic tank"),
2: T("pit latrine"),
999: NOT_APPLICABLE
}
resourcename = "section4"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
assessment_id(),
Field("water_source_pre_disaster_type", "integer"),
Field("water_source_pre_disaster_description"),
Field("dwater_source_type", "integer"),
Field("dwater_source_description"),
Field("dwater_reserve"),
Field("swater_source_type", "integer"),
Field("swater_source_description"),
Field("swater_reserve"),
Field("water_coll_time", "integer"),
Field("water_coll_safe", "boolean"),
Field("water_coll_safety_problems"),
Field("water_coll_person", "integer"),
Field("defec_place_type"),
Field("defec_place_description"),
Field("defec_place_distance", "integer"),
Field("defec_place_animals", "integer"),
Field("close_industry", "boolean"),
Field("waste_disposal"),
Field("latrines_number", "integer"),
Field("latrines_type", "integer"),
Field("latrines_separation", "boolean"),
Field("latrines_distance", "integer"),
comments(),
migrate=migrate, *s3_meta_fields())
table.assessment_id.readable = table.assessment_id.writable = False
table.water_source_pre_disaster_type.label = T("Type of water source before the disaster")
table.water_source_pre_disaster_type.requires = IS_EMPTY_OR(IS_IN_SET(rat_water_source_types, zero=None))
table.water_source_pre_disaster_type.represent = lambda opt: rat_water_source_types.get(opt, UNKNOWN_OPT)
table.water_source_pre_disaster_description.label = T("Description of water source before the disaster")
shn_rat_label_and_tooltip(table.dwater_source_type,
"Current type of source for drinking water",
"What is your major source of drinking water?")
table.dwater_source_type.requires = IS_EMPTY_OR(IS_IN_SET(rat_water_source_types, zero=None))
table.dwater_source_type.represent = lambda opt: rat_water_source_types.get(opt, UNKNOWN_OPT)
table.dwater_source_description.label = T("Description of drinking water source")
shn_rat_label_and_tooltip(table.dwater_reserve,
"How long will this water resource last?",
"Specify the minimum sustainability in weeks or days.")
shn_rat_label_and_tooltip(table.swater_source_type,
"Current type of source for sanitary water",
"What is your major source of clean water for daily use (ex: washing, cooking, bathing)?")
table.swater_source_type.requires = IS_EMPTY_OR(IS_IN_SET(rat_water_source_types, zero=None))
table.swater_source_type.represent = lambda opt: rat_water_source_types.get(opt, UNKNOWN_OPT)
table.swater_source_description.label = T("Description of sanitary water source")
shn_rat_label_and_tooltip(table.swater_reserve,
"How long will this water resource last?",
"Specify the minimum sustainability in weeks or days.")
shn_rat_label_and_tooltip(table.water_coll_time,
"Time needed to collect water",
"How long does it take you to reach the available water resources? Specify the time required to go there and back, including queuing time, by foot.")
table.water_coll_time.requires = IS_EMPTY_OR(IS_IN_SET(rat_walking_time_opts, zero=None))
table.water_coll_time.represent = lambda opt: rat_walking_time_opts.get(opt, UNKNOWN_OPT)
table.water_coll_safe.label = T("Is it safe to collect water?")
table.water_coll_safe.default = True
table.water_coll_safety_problems.label = T("If no, specify why")
table.water_coll_person.label = T("Who usually collects water for the family?")
table.water_coll_person.requires = IS_EMPTY_OR(IS_IN_SET(rat_water_coll_person_opts, zero=None))
table.water_coll_person.represent = lambda opt: rat_water_coll_person_opts.get(opt, UNKNOWN_OPT)
shn_rat_label_and_tooltip(table.defec_place_type,
"Type of place for defecation",
"Where do the majority of people defecate?",
multiple=True)
table.defec_place_type.requires = IS_EMPTY_OR(IS_IN_SET(rat_defec_place_types, zero=None, multiple=True))
table.defec_place_type.represent = lambda opt: rat_defec_place_types.get(opt, UNKNOWN_OPT)
table.defec_place_description.label = T("Description of defecation area")
table.defec_place_distance.label = T("Distance between defecation area and water source")
table.defec_place_distance.comment = T("meters")
table.defec_place_animals.label = T("Defecation area for animals")
table.defec_place_animals.requires = IS_EMPTY_OR(IS_IN_SET(rat_defec_place_animals_opts, zero = None))
table.defec_place_animals.represent = lambda opt: rat_defec_place_animals_opts.get(opt, UNKNOWN_OPT)
shn_rat_label_and_tooltip(table.close_industry,
"Industry close to village/camp",
"Is there any industrial or agro-chemical production close to the affected area/village?")
shn_rat_label_and_tooltip(table.waste_disposal,
"Place for solid waste disposal",
"Where is solid waste disposed in the village/camp?")
shn_rat_label_and_tooltip(table.latrines_number,
"Number of latrines",
"How many latrines are available in the village/IDP centre/Camp?")
table.latrines_number.requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))
shn_rat_label_and_tooltip(table.latrines_type,
"Type of latrines",
"What type of latrines are available in the village/IDP centre/Camp?")
table.latrines_type.requires = IS_EMPTY_OR(IS_IN_SET(rat_latrine_types, zero=None))
table.latrines_type.represent = lambda opt: rat_latrine_types.get(opt, UNKNOWN_OPT)
shn_rat_label_and_tooltip(table.latrines_separation,
"Separate latrines for women and men",
"Are there separate latrines for women and men available?")
shn_rat_label_and_tooltip(table.latrines_distance,
"Distance between shelter and latrines",
"Distance between latrines and temporary shelter in meters")
# CRUD strings
s3.crud_strings[tablename] = rat_section_crud_strings
s3xrc.model.add_component(module, resourcename,
multiple = False,
joinby = dict(assess_rat="assessment_id"))
s3xrc.model.configure(table, deletable=False)
# Section 5 - Health ------------------------------------------------------
rat_health_services_types = {
1: T("Community Health Center"),
2: T("Hospital")
}
rat_health_problems_opts = {
1: T("Respiratory Infections"),
2: T("Diarrhea"),
3: T("Dehydration"),
99: T("Other (specify)")
}
rat_infant_nutrition_alternative_opts = {
1: T("Porridge"),
2: T("Banana"),
3: T("Instant Porridge"),
4: T("Air tajin"),
99: T("Other (specify)")
}
resourcename = "section5"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
assessment_id(),
Field("health_services_pre_disaster", "boolean"),
Field("medical_supplies_pre_disaster", "boolean"),
Field("health_services_post_disaster", "boolean"),
Field("medical_supplies_post_disaster", "boolean"),
Field("medical_supplies_reserve", "integer"),
Field("health_services_available_types", "list:integer"),
Field("staff_number_doctors", "integer"),
Field("staff_number_nurses", "integer"),
Field("staff_number_midwives", "integer"),
Field("health_service_walking_time", "integer"),
Field("health_problems_adults", "list:integer"),
Field("health_problems_adults_other"),
Field("health_problems_children", "list:integer"),
Field("health_problems_children_other"),
Field("chronical_illness_cases", "boolean"),
Field("chronical_illness_children", "boolean"),
Field("chronical_illness_elderly", "boolean"),
Field("chronical_care_sufficient", "boolean"),
Field("malnutrition_present_pre_disaster", "boolean"),
Field("mmd_present_pre_disaster", "boolean"),
Field("breast_milk_substitutes_pre_disaster", "boolean"),
Field("breast_milk_substitutes_post_disaster", "boolean"),
Field("infant_nutrition_alternative", "list:integer"),
Field("infant_nutrition_alternative_other"),
Field("u5_diarrhea", "boolean"),
Field("u5_diarrhea_rate_48h", "integer"),
comments(),
migrate=migrate, *s3_meta_fields())
table.assessment_id.readable = table.assessment_id.writable = False
shn_rat_label_and_tooltip(table.health_services_pre_disaster,
"Health services functioning prior to disaster",
"Were there health services functioning for the community prior to the disaster?")
shn_rat_label_and_tooltip(table.medical_supplies_pre_disaster,
"Basic medical supplies available prior to disaster",
"Were basic medical supplies available for health services prior to the disaster?")
shn_rat_label_and_tooltip(table.health_services_post_disaster,
"Health services functioning since disaster",
"Are there health services functioning for the community since the disaster?")
shn_rat_label_and_tooltip(table.medical_supplies_post_disaster,
"Basic medical supplies available since disaster",
"Are basic medical supplies available for health services since the disaster?")
table.medical_supplies_reserve.label = T("How many days will the supplies last?")
shn_rat_label_and_tooltip(table.health_services_available_types,
"Types of health services available",
"What types of health services are still functioning in the affected area?",
multiple=True)
table.health_services_available_types.requires = IS_EMPTY_OR(IS_IN_SET(rat_health_services_types,
zero=None, multiple=True))
table.health_services_available_types.represent = lambda opt: \
shn_rat_represent_multiple(rat_health_service_types, opt)
shn_rat_label_and_tooltip(table.staff_number_doctors,
"Number of doctors actively working",
"How many doctors in the health centers are still actively working?")
table.staff_number_doctors.requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))
shn_rat_label_and_tooltip(table.staff_number_nurses,
"Number of nurses actively working",
"How many nurses in the health centers are still actively working?")
table.staff_number_nurses.requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))
shn_rat_label_and_tooltip(table.staff_number_midwives,
"Number of midwives actively working",
"How many midwives in the health centers are still actively working?")
table.staff_number_midwives.requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))
shn_rat_label_and_tooltip(table.health_service_walking_time,
"Walking time to the health service",
"How long does it take you to walk to the health service?")
table.health_service_walking_time.requires = IS_EMPTY_OR(IS_IN_SET(rat_walking_time_opts, zero=None))
table.health_service_walking_time.represent = lambda opt: rat_walking_time_opts.get(opt, UNKNOWN_OPT)
shn_rat_label_and_tooltip(table.health_problems_adults,
"Current type of health problems, adults",
"What types of health problems do people currently have?",
multiple=True)
table.health_problems_adults.requires = IS_EMPTY_OR(IS_IN_SET(rat_health_problems_opts, zero=None, multiple=True))
table.health_problems_adults.represent = lambda opt, set=rat_health_problems_opts: \
shn_rat_represent_multiple(set, opt)
table.health_problems_adults_other.label = T("Other current health problems, adults")
shn_rat_label_and_tooltip(table.health_problems_children,
"Current type of health problems, children",
"What types of health problems do children currently have?",
multiple=True)
table.health_problems_children.requires = IS_EMPTY_OR(IS_IN_SET(rat_health_problems_opts, zero=None, multiple=True))
table.health_problems_children.represent = lambda opt, set=rat_health_problems_opts: \
shn_rat_represent_multiple(set, opt)
table.health_problems_children_other.label = T("Other current health problems, children")
shn_rat_label_and_tooltip(table.chronical_illness_cases,
"People with chronical illnesses",
"Are there people with chronical illnesses in your community?")
shn_rat_label_and_tooltip(table.chronical_illness_children,
"Children with chronical illnesses",
"Are there children with chronical illnesses in your community?")
shn_rat_label_and_tooltip(table.chronical_illness_elderly,
"Older people with chronical illnesses",
"Are there older people with chronical illnesses in your community?")
shn_rat_label_and_tooltip(table.chronical_care_sufficient,
"Sufficient care/assistance for chronically ill",
"Are the chronically ill receiving sufficient care and assistance?")
shn_rat_label_and_tooltip(table.malnutrition_present_pre_disaster,
"Malnutrition present prior to disaster",
"Were there cases of malnutrition in this area prior to the disaster?")
shn_rat_label_and_tooltip(table.mmd_present_pre_disaster,
"Micronutrient malnutrition prior to disaster",
"Were there reports or evidence of outbreaks of any micronutrient malnutrition disorders before the emergency?")
shn_rat_label_and_tooltip(table.breast_milk_substitutes_pre_disaster,
"Breast milk substitutes used prior to disaster",
"Were breast milk substitutes used prior to the disaster?")
shn_rat_label_and_tooltip(table.breast_milk_substitutes_post_disaster,
"Breast milk substitutes in use since disaster",
"Are breast milk substitutes being used here since the disaster?")
shn_rat_label_and_tooltip(table.infant_nutrition_alternative,
"Alternative infant nutrition in use",
"Babies who are not being breastfed, what are they being fed on?",
multiple=True)
table.infant_nutrition_alternative.requires = \
IS_EMPTY_OR(IS_IN_SET(rat_infant_nutrition_alternative_opts, zero=None, multiple=True))
table.infant_nutrition_alternative.represent = lambda opt, set=rat_infant_nutrition_alternative_opts: \
shn_rat_represent_multiple(set, opt)
table.infant_nutrition_alternative_other.label = T("Other alternative infant nutrition in use")
shn_rat_label_and_tooltip(table.u5_diarrhea,
"Diarrhea among children under 5",
"Are there cases of diarrhea among children under the age of 5?")
shn_rat_label_and_tooltip(table.u5_diarrhea_rate_48h,
"Approx. number of cases/48h",
"Approximately how many children under 5 with diarrhea in the past 48 hours?"),
# CRUD strings
s3.crud_strings[tablename] = rat_section_crud_strings
s3xrc.model.add_component(module, resourcename,
multiple = False,
joinby = dict(assess_rat="assessment_id"))
s3xrc.model.configure(table, deletable=False)
# Section 6 - Nutrition/Food Security -------------------------------------
rat_main_dish_types = {
1: T("Rice"),
2: T("Noodles"),
3: T("Biscuits"),
4: T("Corn"),
5: T("Wheat"),
6: T("Cassava"),
7: T("Cooking Oil")
}
rat_side_dish_types = {
1: T("Salted Fish"),
2: T("Canned Fish"),
3: T("Chicken"),
4: T("Eggs"),
99: T("Other (specify)")
}
rat_food_stock_reserve_opts = {
1: T("1-3 days"),
2: T("4-7 days"),
3: T("8-14 days")
}
rat_food_source_types = {
1: "Local market",
2: "Field cultivation",
3: "Food stall",
4: "Animal husbandry",
5: "Raising poultry",
99: "Other (specify)"
}
resourcename = "section6"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
assessment_id(),
Field("food_stocks_main_dishes", "list:integer"),
Field("food_stocks_side_dishes", "list:integer"),
Field("food_stocks_other_side_dishes"),
Field("food_stocks_reserve", "integer"),
Field("food_sources", "list:integer"),
Field("food_sources_other"),
Field("food_sources_disruption", "boolean"),
Field("food_sources_disruption_details"),
Field("food_assistance_available", "boolean"),
Field("food_assistance_details", "text"),
comments(),
migrate=migrate, *s3_meta_fields())
table.assessment_id.readable = table.assessment_id.writable = False
shn_rat_label_and_tooltip(table.food_stocks_main_dishes,
"Existing food stocks, main dishes",
"What food stocks exist? (main dishes)",
multiple=True)
table.food_stocks_main_dishes.requires = IS_EMPTY_OR(IS_IN_SET(rat_main_dish_types, zero=None, multiple=True))
table.food_stocks_main_dishes.represent = lambda opt, set=rat_main_dish_types: \
shn_rat_represent_multiple(set, opt)
shn_rat_label_and_tooltip(table.food_stocks_side_dishes,
"Existing food stocks, side dishes",
"What food stocks exist? (side dishes)",
multiple=True)
table.food_stocks_side_dishes.requires = IS_EMPTY_OR(IS_IN_SET(rat_side_dish_types, zero=None, multiple=True))
table.food_stocks_side_dishes.represent = lambda opt, set=rat_side_dish_types: \
shn_rat_represent_multiple(set, opt)
table.food_stocks_other_side_dishes.label = T("Other side dishes in stock")
table.food_stocks_reserve.label = T("How long will the food last?")
table.food_stocks_reserve.requires = IS_EMPTY_OR(IS_IN_SET(rat_food_stock_reserve_opts, zero=None))
table.food_stocks_reserve.represent = lambda opt: rat_food_stock_reserve_opts.get(opt, UNKNOWN_OPT)
shn_rat_label_and_tooltip(table.food_sources,
"Usual food sources in the area",
"What are the people's normal ways to obtain food in this area?",
multiple=True)
table.food_sources.requires = IS_EMPTY_OR(IS_IN_SET(rat_food_source_types, zero=None, multiple=True))
table.food_sources.represent = lambda opt, set=rat_food_source_types: \
shn_rat_represent_multiple(set, opt)
table.food_sources_other.label = T("Other ways to obtain food")
shn_rat_label_and_tooltip(table.food_sources_disruption,
"Normal food sources disrupted",
"Have normal food sources been disrupted?")
table.food_sources_disruption_details.label = T("If yes, which and how")
shn_rat_label_and_tooltip(table.food_assistance_available,
"Food assistance available/expected",
"Have the people received or are you expecting any medical or food assistance in the coming days?")
table.food_assistance_details.label = T("If yes, specify what and by whom")
# CRUD strings
s3.crud_strings[tablename] = rat_section_crud_strings
s3xrc.model.add_component(module, resourcename,
multiple = False,
joinby = dict(assess_rat="assessment_id"))
s3xrc.model.configure(table, deletable=False)
# Section 7 - Livelihood --------------------------------------------------
rat_income_source_opts = {
1: T("Agriculture"),
2: T("Fishing"),
3: T("Poultry"),
4: T("Casual Labor"),
5: T("Small Trade"),
6: T("Other")
}
rat_expense_types = {
1: T("Education"),
2: T("Health"),
3: T("Food"),
4: T("Hygiene"),
5: T("Shelter"),
6: T("Clothing"),
7: T("Funeral"),
8: T("Alcohol"),
99: T("Other (specify)")
}
rat_cash_source_opts = {
1: T("Family/friends"),
2: T("Government"),
3: T("Bank/micro finance"),
4: T("Humanitarian NGO"),
99: T("Other (specify)")
}
rat_ranking_opts = xrange(1,7)
resourcename = "section7"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
assessment_id(),
Field("income_sources_pre_disaster", "list:integer"),
Field("income_sources_post_disaster", "list:integer"),
Field("main_expenses", "list:integer"),
Field("main_expenses_other"),
Field("business_damaged", "boolean"),
Field("business_cash_available", "boolean"),
Field("business_cash_source", "list:integer"),
Field("rank_reconstruction_assistance", "integer"),
Field("rank_farmland_fishing_assistance", "integer"),
Field("rank_poultry_restocking", "integer"),
Field("rank_health_care_assistance", "integer"),
Field("rank_transportation_assistance", "integer"),
Field("other_assistance_needed"),
Field("rank_other_assistance", "integer"),
comments(),
migrate=migrate, *s3_meta_fields())
table.assessment_id.readable = table.assessment_id.writable = False
shn_rat_label_and_tooltip(table.income_sources_pre_disaster,
"Main income sources before disaster",
"What were your main sources of income before the disaster?",
multiple=True)
table.income_sources_pre_disaster.requires = IS_EMPTY_OR(IS_IN_SET(rat_income_source_opts, zero=None, multiple=True))
table.income_sources_pre_disaster.represent = lambda opt, set=rat_income_source_opts: \
shn_rat_represent_multiple(set, opt)
shn_rat_label_and_tooltip(table.income_sources_post_disaster,
"Current main income sources",
"What are your main sources of income now?",
multiple=True)
table.income_sources_post_disaster.requires = IS_EMPTY_OR(IS_IN_SET(rat_income_source_opts, zero=None, multiple=True))
table.income_sources_post_disaster.represent = lambda opt, set=rat_income_source_opts: \
shn_rat_represent_multiple(set, opt)
shn_rat_label_and_tooltip(table.main_expenses,
"Current major expenses",
"What do you spend most of your income on now?",
multiple=True)
table.main_expenses.requires = IS_EMPTY_OR(IS_IN_SET(rat_expense_types, zero=None, multiple=True))
table.main_expenses.represent = lambda opt, set=rat_expense_types: \
shn_rat_represent_multiple(set, opt)
table.main_expenses_other.label = T("Other major expenses")
shn_rat_label_and_tooltip(table.business_damaged,
"Business damaged",
"Has your business been damaged in the course of the disaster?")
shn_rat_label_and_tooltip(table.business_cash_available,
"Cash available to restart business",
"Do you have access to cash to restart your business?")
shn_rat_label_and_tooltip(table.business_cash_source,
"Main cash source",
"What are your main sources of cash to restart your business?")
table.business_cash_source.requires = IS_EMPTY_OR(IS_IN_SET(rat_cash_source_opts, zero=None, multiple=True))
table.business_cash_source.represent = lambda opt, set=rat_cash_source_opts: \
shn_rat_represent_multiple(set, opt)
shn_rat_label_and_tooltip(table.rank_reconstruction_assistance,
"Immediate reconstruction assistance, Rank",
"Assistance for immediate repair/reconstruction of houses")
table.rank_reconstruction_assistance.requires = IS_EMPTY_OR(IS_IN_SET(rat_ranking_opts, zero=None))
table.rank_farmland_fishing_assistance.label = T("Farmland/fishing material assistance, Rank")
table.rank_farmland_fishing_assistance.requires = IS_EMPTY_OR(IS_IN_SET(rat_ranking_opts, zero=None))
table.rank_poultry_restocking.label = T("Poultry restocking, Rank")
table.rank_poultry_restocking.requires = IS_EMPTY_OR(IS_IN_SET(rat_ranking_opts, zero=None))
table.rank_health_care_assistance.label = T("Health care assistance, Rank")
table.rank_health_care_assistance.requires = IS_EMPTY_OR(IS_IN_SET(rat_ranking_opts, zero=None))
table.rank_transportation_assistance.label = T("Transportation assistance, Rank")
table.rank_transportation_assistance.requires = IS_EMPTY_OR(IS_IN_SET(rat_ranking_opts, zero=None))
table.other_assistance_needed.label = T("Other assistance needed")
table.rank_other_assistance.label = T("Other assistance, Rank")
table.rank_other_assistance.requires = IS_EMPTY_OR(IS_IN_SET(rat_ranking_opts, zero=None))
# CRUD strings
s3.crud_strings[tablename] = rat_section_crud_strings
s3xrc.model.add_component(module, resourcename,
multiple = False,
joinby = dict(assess_rat="assessment_id"))
s3xrc.model.configure(table, deletable=False)
# Section 8 - Education ---------------------------------------------------
rat_schools_salvmat_types = {
1: T("Wooden plank"),
2: T("Zinc roof"),
3: T("Bricks"),
4: T("Wooden poles"),
5: T("Door frame"),
6: T("Window frame"),
7: T("Roof tile"),
999: NOT_APPLICABLE
}
rat_alternative_study_places = {
1: T("Community Centre"),
2: T("Church"),
3: T("Mosque"),
4: T("Open area"),
5: T("Government building"),
6: T("Other (specify)"),
999: NOT_APPLICABLE
}
rat_school_attendance_barriers_opts = {
1: T("School used for other purpose"),
2: T("School destroyed"),
3: T("Lack of school uniform"),
4: T("Lack of transport to school"),
5: T("Children not enrolled in new school"),
6: T("School heavily damaged"),
7: T("Desire to remain with family"),
8: T("Lack of supplies at school"),
9: T("Displaced"),
10: T("Other (specify)"),
999: NOT_APPLICABLE
}
resourcename = "section8"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
assessment_id(),
Field("schools_total", "integer"),
Field("schools_public", "integer"),
Field("schools_private", "integer"),
Field("schools_religious", "integer"),
Field("schools_destroyed", "integer"),
Field("schools_damaged", "integer"),
Field("schools_salvmat", "list:integer"),
Field("alternative_study_places_available", "boolean"),
Field("alternative_study_places_number", "integer"),
Field("alternative_study_places", "list:integer"),
Field("alternative_study_places_other"),
Field("schools_open_pre_disaster", "integer"),
Field("schools_open_post_disaster", "integer"),
Field("teachers_active_pre_disaster", "integer"),
Field("teachers_affected_by_disaster", "integer"),
Field("children_0612_female", "integer"),
Field("children_0612_male", "integer"),
Field("children_0612_not_in_school_female", "integer"),
Field("children_0612_not_in_school_male", "integer"),
Field("children_1318_female", "integer"),
Field("children_1318_male", "integer"),
Field("children_1318_not_in_school_female", "integer"),
Field("children_1318_not_in_school_male", "integer"),
Field("school_attendance_barriers", "list:integer"),
Field("school_attendance_barriers_other"),
Field("school_assistance_available", "boolean"),
Field("school_assistance_tents_available", "boolean"),
Field("school_assistence_tents_source"),
Field("school_assistance_materials_available", "boolean"),
Field("school_assistance_materials_source"),
Field("school_assistance_other_available", "boolean"),
Field("school_assistance_other"),
Field("school_assistance_other_source"),
comments(),
migrate=migrate, *s3_meta_fields())
table.assessment_id.readable = table.assessment_id.writable = False
# @todo: onvalidation!
table.schools_total.label = T("Total number of schools in affected area")
table.schools_total.requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))
table.schools_public.label = T("Number of public schools")
table.schools_public.requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))
table.schools_private.label = T("Number of private schools")
table.schools_private.requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))
table.schools_religious.label = T("Number of religious schools")
table.schools_religious.requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))
shn_rat_label_and_tooltip(table.schools_destroyed,
"Number of schools destroyed/uninhabitable",
"uninhabitable = foundation and structure destroyed")
table.schools_destroyed.requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))
shn_rat_label_and_tooltip(table.schools_damaged,
"Number of schools damaged but usable",
"windows broken, cracks in walls, roof slightly damaged")
table.schools_damaged.requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))
shn_rat_label_and_tooltip(table.schools_salvmat,
"Salvage material usable from destroyed schools",
"What type of salvage material can be used from destroyed schools?",
multiple=True)
table.schools_salvmat.requires = IS_EMPTY_OR(IS_IN_SET(rat_schools_salvmat_types, zero=None, multiple=True))
table.schools_salvmat.represent = lambda opt, set=rat_schools_salvmat_types: \
shn_rat_represent_multiple(set, opt)
shn_rat_label_and_tooltip(table.alternative_study_places_available,
"Alternative places for studying available",
"Are there alternative places for studying?")
table.alternative_study_places_number.label = T("Number of alternative places for studying")
table.alternative_study_places_number.requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))
shn_rat_label_and_tooltip(table.alternative_study_places,
"Alternative places for studying",
"Where are the alternative places for studying?",
multiple=True)
table.alternative_study_places.requires = IS_EMPTY_OR(IS_IN_SET(rat_alternative_study_places,
zero=None, multiple=True))
table.alternative_study_places.represent = lambda opt, set=rat_alternative_study_places: \
shn_rat_represent_multiple(set, opt)
table.alternative_study_places_other.label = T("Other alternative places for study")
shn_rat_label_and_tooltip(table.schools_open_pre_disaster,
"Number of schools open before disaster",
"How many primary/secondary schools were opening prior to the disaster?")
table.schools_open_pre_disaster.requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))
shn_rat_label_and_tooltip(table.schools_open_post_disaster,
"Number of schools open now",
"How many of the primary/secondary schools are now open and running a regular schedule of class?")
table.schools_open_post_disaster.requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))
shn_rat_label_and_tooltip(table.teachers_active_pre_disaster,
"Number of teachers before disaster",
"How many teachers worked in the schools prior to the disaster?")
table.teachers_active_pre_disaster.requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))
shn_rat_label_and_tooltip(table.teachers_affected_by_disaster,
"Number of teachers affected by disaster",
"How many teachers have been affected by the disaster (affected = unable to work)?")
table.teachers_affected_by_disaster.requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))
shn_rat_label_and_tooltip(table.children_0612_female,
"Girls 6-12 yrs in affected area",
"How many primary school age girls (6-12) are in the affected area?")
table.children_0612_female.requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))
shn_rat_label_and_tooltip(table.children_0612_male,
"Boys 6-12 yrs in affected area",
"How many primary school age boys (6-12) are in the affected area?")
table.children_0612_male.requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))
shn_rat_label_and_tooltip(table.children_0612_not_in_school_female,
"Girls 6-12 yrs not attending school",
"How many of the primary school age girls (6-12) in the area are not attending school?")
table.children_0612_not_in_school_female.requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))
shn_rat_label_and_tooltip(table.children_0612_not_in_school_male,
"Boys 6-12 yrs not attending school",
"How many of the primary school age boys (6-12) in the area are not attending school?")
table.children_0612_not_in_school_male.requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))
shn_rat_label_and_tooltip(table.children_1318_female,
"Girls 13-18 yrs in affected area",
"How many secondary school age girls (13-18) are in the affected area?")
table.children_1318_female.requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))
shn_rat_label_and_tooltip(table.children_1318_male,
"Boys 13-18 yrs in affected area",
"How many secondary school age boys (13-18) are in the affected area?")
table.children_1318_male.requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))
shn_rat_label_and_tooltip(table.children_1318_not_in_school_female,
"Girls 13-18 yrs not attending school",
"How many of the secondary school age girls (13-18) in the area are not attending school?")
table.children_1318_not_in_school_female.requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))
shn_rat_label_and_tooltip(table.children_1318_not_in_school_male,
"Boys 13-18 yrs not attending school",
"How many of the secondary school age boys (13-18) in the area are not attending school?")
table.children_1318_not_in_school_male.requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))
shn_rat_label_and_tooltip(table.school_attendance_barriers,
"Factors affecting school attendance",
"What are the factors affecting school attendance?",
multiple=True)
table.school_attendance_barriers.requires = IS_EMPTY_OR(IS_IN_SET(rat_school_attendance_barriers_opts,
zero=None, multiple=True))
table.school_attendance_barriers.represent = lambda opt, set=rat_school_attendance_barriers_opts: \
shn_rat_represent_multiple(set, opt)
table.school_attendance_barriers_other.label = T("Other factors affecting school attendance")
shn_rat_label_and_tooltip(table.school_assistance_available,
"School assistance received/expected",
"Have schools received or are expecting to receive any assistance?")
table.school_assistance_tents_available.label = T("School tents received")
table.school_assistence_tents_source.label = T("School tents, source")
table.school_assistance_materials_available.label = T("Education materials received")
table.school_assistance_materials_source.label = T("Education materials, source")
table.school_assistance_other_available.label = T("Other school assistance received")
table.school_assistance_other.label = T("Other school assistance, details")
table.school_assistance_other_source.label = T("Other school assistance, source")
# CRUD strings
s3.crud_strings[tablename] = rat_section_crud_strings
s3xrc.model.add_component(module, resourcename,
multiple = False,
joinby = dict(assess_rat="assessment_id"))
s3xrc.model.configure(table, deletable=False)
# Section 9 - Protection --------------------------------------------------
rat_fuzzy_quantity_opts = {
1: T("None"),
2: T("Few"),
3: T("Some"),
4: T("Many")
}
rat_quantity_opts = {
1: "1-10",
2: "11-50",
3: "51-100",
4: "100+"
}
rat_child_activity_opts = {
1: T("Playing"),
2: T("Domestic chores"),
3: T("School/studying"),
4: T("Doing nothing (no structured activity)"),
5: T("Working or other to provide money/food"),
99: T("Other (specify)")
}
rat_child_activity_post_disaster_opts = rat_child_activity_opts.copy()
rat_child_activity_post_disaster_opts.update({
6: T("Disaster clean-up/repairs")
})
resourcename = "section9"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
assessment_id(),
Field("vulnerable_groups_safe_env", "boolean"),
Field("safety_children_women_affected", "boolean"),
Field("sec_incidents", "boolean"),
Field("sec_incidents_gbv", "boolean"),
Field("sec_current_needs"),
Field("children_separated", "integer"),
Field("children_separated_origin"),
Field("children_missing", "integer"),
Field("children_orphaned", "integer"),
Field("children_unattended", "integer"),
Field("children_disappeared", "integer"),
Field("children_evacuated", "integer"),
Field("children_evacuated_to"),
Field("children_with_older_caregivers", "integer"),
Field("children_in_disabled_homes", "boolean"),
Field("children_in_orphanages", "boolean"),
Field("children_in_boarding_schools", "boolean"),
Field("children_in_juvenile_detention", "boolean"),
Field("children_in_adult_prisons", "boolean"),
Field("people_in_adult_prisons", "boolean"),
Field("people_in_care_homes", "boolean"),
Field("people_in_institutions_est_total", "integer"),
Field("staff_in_institutions_present", "boolean"),
Field("adequate_food_water_in_institutions", "boolean"),
Field("child_activities_u12f_pre_disaster", "list:integer"),
Field("child_activities_u12f_pre_disaster_other"),
Field("child_activities_u12m_pre_disaster", "list:integer"),
Field("child_activities_u12m_pre_disaster_other"),
Field("child_activities_o12f_pre_disaster", "list:integer"),
Field("child_activities_o12f_pre_disaster_other"),
Field("child_activities_o12m_pre_disaster", "list:integer"),
Field("child_activities_o12m_pre_disaster_other"),
Field("child_activities_u12f_post_disaster", "list:integer"),
Field("child_activities_u12f_post_disaster_other"),
Field("child_activities_u12m_post_disaster", "list:integer"),
Field("child_activities_u12m_post_disaster_other"),
Field("child_activities_o12f_post_disaster", "list:integer"),
Field("child_activities_o12f_post_disaster_other"),
Field("child_activities_o12m_post_disaster", "list:integer"),
Field("child_activities_o12m_post_disaster_other"),
Field("coping_activities_elderly", "boolean"),
Field("coping_activities_women", "boolean"),
Field("coping_activities_disabled", "boolean"),
Field("coping_activities_minorities", "boolean"),
Field("coping_activities_adolescent", "boolean"),
Field("current_general_needs", "text"),
comments(),
migrate=migrate, *s3_meta_fields())
table.assessment_id.readable = table.assessment_id.writable = False
shn_rat_label_and_tooltip(table.vulnerable_groups_safe_env,
"Safe environment for vulnerable groups",
"Are the areas that children, older people, and people with disabilities live in, play in and walk through on a daily basis physically safe?")
shn_rat_label_and_tooltip(table.safety_children_women_affected,
"Safety of children and women affected by disaster",
"Has the safety and security of women and children in your community changed since the emergency?")
shn_rat_label_and_tooltip(table.sec_incidents,
"Known incidents of violence since disaster",
"Do you know of any incidents of violence?")
shn_rat_label_and_tooltip(table.sec_incidents_gbv,
"Known incidents of violence against women/girls",
"Without mentioning any names or indicating anyone, do you know of any incidents of violence against women or girls occuring since the disaster?")
shn_rat_label_and_tooltip(table.sec_current_needs,
"Needs to reduce vulnerability to violence",
"What should be done to reduce women and children's vulnerability to violence?")
shn_rat_label_and_tooltip(table.children_separated,
"Children separated from their parents/caregivers",
"Do you know of children separated from their parents or caregivers?")
table.children_separated.requires = IS_EMPTY_OR(IS_IN_SET(rat_fuzzy_quantity_opts, zero=None))
table.children_separated.represent = lambda opt: rat_fuzzy_quantity_opts.get(opt, UNKNOWN_OPT)
shn_rat_label_and_tooltip(table.children_separated_origin,
"Origin of the separated children",
"Where are the separated children originally from?")
shn_rat_label_and_tooltip(table.children_missing,
"Parents/Caregivers missing children",
"Do you know of parents/caregivers missing children?")
table.children_missing.requires = IS_EMPTY_OR(IS_IN_SET(rat_fuzzy_quantity_opts, zero=None))
table.children_missing.represent = lambda opt: rat_fuzzy_quantity_opts.get(opt, UNKNOWN_OPT)
shn_rat_label_and_tooltip(table.children_orphaned,
"Children orphaned by the disaster",
"Do you know of children that have been orphaned by the disaster?")
table.children_orphaned.requires = IS_EMPTY_OR(IS_IN_SET(rat_fuzzy_quantity_opts, zero=None))
table.children_orphaned.represent = lambda opt: rat_fuzzy_quantity_opts.get(opt, UNKNOWN_OPT)
shn_rat_label_and_tooltip(table.children_evacuated,
"Children that have been sent to safe places",
"Do you know of children that have been sent to safe places?")
table.children_evacuated.requires = IS_EMPTY_OR(IS_IN_SET(rat_fuzzy_quantity_opts, zero=None))
table.children_evacuated.represent = lambda opt: rat_fuzzy_quantity_opts.get(opt, UNKNOWN_OPT)
shn_rat_label_and_tooltip(table.children_evacuated_to,
"Places the children have been sent to",
"Where have the children been sent?")
shn_rat_label_and_tooltip(table.children_unattended,
"Children living on their own (without adults)",
"Do you know of children living on their own (without adults)?")
table.children_unattended.requires = IS_EMPTY_OR(IS_IN_SET(rat_fuzzy_quantity_opts, zero=None))
table.children_unattended.represent = lambda opt: rat_fuzzy_quantity_opts.get(opt, UNKNOWN_OPT)
shn_rat_label_and_tooltip(table.children_disappeared,
"Children who have disappeared since the disaster",
"Do you know of children that have disappeared without explanation in the period since the disaster?")
table.children_disappeared.requires = IS_EMPTY_OR(IS_IN_SET(rat_fuzzy_quantity_opts, zero=None))
table.children_disappeared.represent = lambda opt: rat_fuzzy_quantity_opts.get(opt, UNKNOWN_OPT)
shn_rat_label_and_tooltip(table.children_with_older_caregivers,
"Older people as primary caregivers of children",
"Do you know of older people who are primary caregivers of children?")
table.children_with_older_caregivers.requires = IS_EMPTY_OR(IS_IN_SET(rat_fuzzy_quantity_opts, zero=None))
table.children_with_older_caregivers.represent = lambda opt: rat_fuzzy_quantity_opts.get(opt, UNKNOWN_OPT)
shn_rat_label_and_tooltip(table.children_in_disabled_homes,
"Children in homes for disabled children",
"Are there children living in homes for disabled children in this area?")
shn_rat_label_and_tooltip(table.children_in_orphanages,
"Children in orphanages",
"Are there children living in orphanages in this area?")
shn_rat_label_and_tooltip(table.children_in_boarding_schools,
"Children in boarding schools",
"Are there children living in boarding schools in this area?")
shn_rat_label_and_tooltip(table.children_in_juvenile_detention,
"Children in juvenile detention",
"Are there children living in juvenile detention in this area?")
shn_rat_label_and_tooltip(table.children_in_adult_prisons,
"Children in adult prisons",
"Are there children living in adult prisons in this area?")
shn_rat_label_and_tooltip(table.people_in_adult_prisons,
"Adults in prisons",
"Are there adults living in prisons in this area?")
shn_rat_label_and_tooltip(table.people_in_care_homes,
"Older people in care homes",
"Are there older people living in care homes in this area?")
shn_rat_label_and_tooltip(table.people_in_institutions_est_total,
"Estimated total number of people in institutions",
"What is the estimated total number of people in all of these institutions?")
table.people_in_institutions_est_total.requires = IS_EMPTY_OR(IS_IN_SET(rat_quantity_opts, zero=None))
table.people_in_institutions_est_total.represent = lambda opt: rat_quantity_opts.get(opt, UNKNOWN_OPT)
shn_rat_label_and_tooltip(table.staff_in_institutions_present,
"Staff present and caring for residents",
"Are there staff present and caring for the residents in these institutions?")
shn_rat_label_and_tooltip(table.adequate_food_water_in_institutions,
"Adequate food and water available",
"Is adequate food and water available for these institutions?")
shn_rat_label_and_tooltip(table.child_activities_u12f_pre_disaster,
"Activities of girls <12yrs before disaster",
"How did girls <12yrs spend most of their time prior to the disaster?",
multiple=True)
table.child_activities_u12f_pre_disaster.requires = IS_EMPTY_OR(IS_IN_SET(rat_child_activity_opts,
zero=None, multiple=True))
table.child_activities_u12f_pre_disaster.represent = lambda opt, set=rat_child_activity_opts: \
shn_rat_represent_multiple(set, opt)
table.child_activities_u12f_pre_disaster_other.label = T("Other activities of girls<12yrs before disaster")
shn_rat_label_and_tooltip(table.child_activities_u12m_pre_disaster,
"Activities of boys <12yrs before disaster",
"How did boys <12yrs spend most of their time prior to the disaster?",
multiple=True)
table.child_activities_u12m_pre_disaster.requires = IS_EMPTY_OR(IS_IN_SET(rat_child_activity_opts,
zero=None, multiple=True))
table.child_activities_u12m_pre_disaster.represent = lambda opt, set=rat_child_activity_opts: \
shn_rat_represent_multiple(set, opt)
table.child_activities_u12m_pre_disaster_other.label = T("Other activities of boys <12yrs before disaster")
shn_rat_label_and_tooltip(table.child_activities_o12f_pre_disaster,
"Activities of girls 13-17yrs before disaster",
"How did boys girls 13-17yrs spend most of their time prior to the disaster?",
multiple=True)
table.child_activities_o12f_pre_disaster.requires = IS_EMPTY_OR(IS_IN_SET(rat_child_activity_opts,
zero=None, multiple=True))
table.child_activities_o12f_pre_disaster.represent = lambda opt, set=rat_child_activity_opts: \
shn_rat_represent_multiple(set, opt)
table.child_activities_o12f_pre_disaster_other.label = T("Other activities of girls 13-17yrs before disaster")
shn_rat_label_and_tooltip(table.child_activities_o12m_pre_disaster,
"Activities of boys 13-17yrs before disaster",
"How did boys 13-17yrs spend most of their time prior to the disaster?",
multiple=True)
table.child_activities_o12m_pre_disaster.requires = IS_EMPTY_OR(IS_IN_SET(rat_child_activity_opts,
zero=None, multiple=True))
table.child_activities_o12m_pre_disaster.represent = lambda opt, set=rat_child_activity_opts: \
shn_rat_represent_multiple(set, opt)
table.child_activities_o12m_pre_disaster_other.label = T("Other activities of boys 13-17yrs before disaster")
shn_rat_label_and_tooltip(table.child_activities_u12f_post_disaster,
"Activities of girls <12yrs now",
"How do girls <12yrs spend most of their time now?",
multiple=True)
table.child_activities_u12f_post_disaster.requires = IS_EMPTY_OR(IS_IN_SET(rat_child_activity_opts,
zero=None, multiple=True))
table.child_activities_u12f_post_disaster.represent = lambda opt, set=rat_child_activity_opts: \
shn_rat_represent_multiple(set, opt)
table.child_activities_u12f_post_disaster_other.label = T("Other activities of girls<12yrs")
shn_rat_label_and_tooltip(table.child_activities_u12m_post_disaster,
"Activities of boys <12yrs now",
"How do boys <12yrs spend most of their time now?",
multiple=True)
table.child_activities_u12m_post_disaster.requires = IS_EMPTY_OR(IS_IN_SET(rat_child_activity_opts,
zero=None, multiple=True))
table.child_activities_u12m_post_disaster.represent = lambda opt, set=rat_child_activity_opts: \
shn_rat_represent_multiple(set, opt)
table.child_activities_u12m_post_disaster_other.label = T("Other activities of boys <12yrs")
shn_rat_label_and_tooltip(table.child_activities_o12f_post_disaster,
"Activities of girls 13-17yrs now",
"How do girls 13-17yrs spend most of their time now?",
multiple=True)
table.child_activities_o12f_post_disaster.requires = IS_EMPTY_OR(IS_IN_SET(rat_child_activity_opts,
zero=None, multiple=True))
table.child_activities_o12f_post_disaster.represent = lambda opt, set=rat_child_activity_opts: \
shn_rat_represent_multiple(set, opt)
table.child_activities_o12f_post_disaster_other.label = T("Other activities of girls 13-17yrs")
shn_rat_label_and_tooltip(table.child_activities_o12m_post_disaster,
"Activities of boys 13-17yrs now",
"How do boys 13-17yrs spend most of their time now?",
multiple=True)
table.child_activities_o12m_post_disaster.requires = IS_EMPTY_OR(IS_IN_SET(rat_child_activity_opts,
zero=None, multiple=True))
table.child_activities_o12m_post_disaster.represent = lambda opt, set=rat_child_activity_opts: \
shn_rat_represent_multiple(set, opt)
table.child_activities_o12m_post_disaster_other.label = T("Other activities of boys 13-17yrs")
shn_rat_label_and_tooltip(table.coping_activities_elderly,
"Older people participating in coping activities",
"Do older people in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)")
shn_rat_label_and_tooltip(table.coping_activities_women,
"Women participating in coping activities",
"Do women in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)")
shn_rat_label_and_tooltip(table.coping_activities_disabled,
"Disabled participating in coping activities",
"Do people with disabilities in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)")
shn_rat_label_and_tooltip(table.coping_activities_minorities,
"Minorities participating in coping activities",
"Do minority members in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)")
shn_rat_label_and_tooltip(table.coping_activities_adolescent,
"Adolescent participating in coping activities",
"Do adolescent and youth in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)")
shn_rat_label_and_tooltip(table.current_general_needs,
"Current greatest needs of vulnerable groups",
"In general, what are the greatest needs of older people, people with disabilities, children, youth and women in your community?")
# CRUD strings
s3.crud_strings[tablename] = rat_section_crud_strings
s3xrc.model.add_component(module, resourcename,
multiple = False,
joinby = dict(assess_rat="assessment_id"))
s3xrc.model.configure(table, deletable=False)
# -----------------------------------------------------------------------------
def shn_rat_summary(r, **attr):
""" Aggregate reports """
if r.name == "rat":
if r.representation == "html":
return dict()
elif r.representation == "xls":
return None
else:
# Other formats?
raise HTTP(501, body=BADFORMAT)
else:
raise HTTP(501, body=BADMETHOD)
s3xrc.model.set_method(module, "rat",
method="summary",
action=shn_rat_summary)
# -----------------------------------------------------------------------------
# END | mit | -4,170,479,478,176,753,000 | 49.84199 | 206 | 0.582585 | false |
AgalmicVentures/Environment | scripts/Parallelize.py | 1 | 3978 | #!/usr/bin/env python3
# Copyright (c) 2015-2021 Agalmic Ventures LLC (www.agalmicventures.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import datetime
import multiprocessing
import os
import subprocess
import sys
import time
def main(argv=None):
"""
The main function of this script.
:param argv: List[str] Arguments to parse (default sys.argv)
:return: int
"""
parser = argparse.ArgumentParser(description='Parallel Runner')
parser.add_argument('run_script', help='The script to execute.')
parser.add_argument('run_id', help='A unique identifier for the run.')
parser.add_argument('configs', nargs='*', help='A set of configurations to run.')
parser.add_argument('-p', '--processes', type=int, default=multiprocessing.cpu_count(),
help='Number of processes to run.')
parser.add_argument('-m', '--max-load', type=float,
help='Maximum value of 1-minute load average; above this do not spawn additional processes.')
parser.add_argument('-s', '--sleep', type=float, default=5.0,
help='Time to sleep between checking process completions.')
if argv is None:
argv = sys.argv
arguments = parser.parse_args(argv[1:])
try:
getLoadAvg = os.getloadavg
except AttributeError:
getLoadAvg = lambda: (0, 0, 0)
if arguments.max_load is not None:
print('WARNING: Load averages not available on this platform; cannot limit.')
startTime = datetime.datetime.now()
print('[%s] Starting %d processes for %d configs' % (startTime, arguments.processes, len(arguments.configs)))
#Start processes
processes = {}
for i, config in enumerate(arguments.configs):
#Wait for a CPU to become available
while len(processes) >= arguments.processes or (
arguments.max_load is not None and getLoadAvg()[0] > arguments.max_load):
time.sleep(arguments.sleep)
#Check for finished processes
finishedConfigs = []
for runningConfig, process in processes.items():
exitCode = process.poll()
if exitCode is not None:
if exitCode != 0:
sys.stdout.write('[%s] Config %s failed with exit code %d.' % (
datetime.datetime.now(), runningConfig, exitCode))
sys.stdout.flush()
finishedConfigs.append(runningConfig)
for finishedConfig in finishedConfigs:
del processes[finishedConfig]
sys.stdout.write('[%s] Starting %.4d/%.4d (%5.1f%%).\n' % (
datetime.datetime.now(), i + 1, len(arguments.configs), 100.0 * (i + 1) / len(arguments.configs)))
sys.stdout.flush()
process = subprocess.Popen([arguments.run_script, arguments.run_id, config], shell=False)
processes[config] = process
#Throttle process creation if targeting a max load
if arguments.max_load is not None:
time.sleep(arguments.sleep)
#Wait for processes to finish
for process in processes.values():
process.wait()
endTime = datetime.datetime.now()
print('[%s] Finished in %.2f seconds.' % (endTime, (endTime - startTime).total_seconds()))
return 0
if __name__ == '__main__':
sys.exit(main())
| mit | -460,820,537,746,613,000 | 36.17757 | 110 | 0.726244 | false |
HybridF5/jacket | jacket/api/compute/openstack/compute/legacy_v2/contrib/certificates.py | 1 | 2777 | # Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from jacket.api.compute.openstack import extensions
import jacket.compute.cert.rpcapi
from jacket.compute import exception
from jacket.i18n import _
authorize = extensions.extension_authorizer('compute', 'certificates')
def _translate_certificate_view(certificate, private_key=None):
return {
'data': certificate,
'private_key': private_key,
}
class CertificatesController(object):
"""The x509 Certificates API controller for the OpenStack API."""
def __init__(self):
self.cert_rpcapi = jacket.compute.cert.rpcapi.CertAPI()
super(CertificatesController, self).__init__()
def show(self, req, id):
"""Return certificate information."""
context = req.environ['compute.context']
authorize(context)
if id != 'root':
msg = _("Only root certificate can be retrieved.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
try:
cert = self.cert_rpcapi.fetch_ca(context,
project_id=context.project_id)
except exception.CryptoCAFileNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return {'certificate': _translate_certificate_view(cert)}
def create(self, req, body=None):
"""Create a certificate."""
context = req.environ['compute.context']
authorize(context)
pk, cert = self.cert_rpcapi.generate_x509_cert(context,
user_id=context.user_id, project_id=context.project_id)
return {'certificate': _translate_certificate_view(cert, pk)}
class Certificates(extensions.ExtensionDescriptor):
"""Certificates support."""
name = "Certificates"
alias = "os-certificates"
namespace = ("http://docs.openstack.org/compute/ext/"
"certificates/api/v1.1")
updated = "2012-01-19T00:00:00Z"
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-certificates',
CertificatesController(),
member_actions={})
resources.append(res)
return resources
| apache-2.0 | 5,370,336,433,982,505,000 | 34.151899 | 78 | 0.658264 | false |
LaserTron/web-crs | control.py | 1 | 8891 | import web
import gradebook
import hashlib
import time
#
#Requires a DB with a table called 'States' with columns 'state' and 'page'
#Requires a DB with a tabe called 'Users' with columns 'username' and 'section'
def sha1digest(s):
salt = "boy this is salty frdew34567uhygfrer6uhgfrtyuhijhbgftrdfg"
ho = hashlib.sha1(s+salt)
return ho.hexdigest()
ctrl = web.database(dbn="sqlite",db="control.db")
ctrl.ctx.db.text_factory=str #erm... I have NO CLUE what this means :-/
def isInTable(table,col,entry):
wherestring = '{0}=\"{1}\"'.format(col,entry)
res = ctrl.select(table,where=wherestring)
return bool(res)
def getEntry(table,col,key,ID):
"""
returns value of column from corresponding key/ID. Returns
only one entry.
"""
wherestring = "{0}=\"{1}\"".format(key,ID)
bob = ctrl.select(table,where=wherestring,what=col)
if bool(bob): #Calling bool(bob) depletes the iterator
bob = ctrl.select(table,where=wherestring,what=col)
return bob[0][col]
else:
return None
def isStudent(user):
return isInTable("students","username",user)
def isInstructor(user):
return isInTable("instructors","username",user)
def getPassHash(user):
"""
Returns the hash of the user's password or returns false if
the user doesn't exist.
"""
emp = lambda x: x==None or x=="" or x.isspace()
if isStudent(user):
paswd = getEntry("students","password","username",user)
if emp(paswd):
return None
else:
return paswd
elif isInstructor(user):
paswd = getEntry("instructors","password","username",user)
if emp(paswd):
return None
else:
return paswd
else:
return False
def setPassword(user,paswd):
"""
Stores a hash of the user's password. Returns false if
the user is not found.
"""
passhash = sha1digest(paswd)
sqldic={}
sqldic['where']="username = \"{0}\"".format(user)
sqldic['password']=passhash
if isStudent(user):
ctrl.update("students",**sqldic)
elif isInstructor(user):
ctrl.update("instructors",**sqldic)
else:
return False
def clearPassword(user):
"""
Deletes the password
"""
sqldic={}
sqldic['where']="username = \"{0}\"".format(user)
sqldic['password']=None
if isStudent(user):
ctrl.update("students",**sqldic)
elif isInstructor(user):
ctrl.update("instructors",**sqldic)
else:
return False
def validatePassword(user,pashash):
return pashash == getPassHash(user)
def addInstructor(user):
user=user.strip()
res = isInTable('instructors','username',user)
if not res:
ctrl.insert('instructors', username=user)
# def getUserSection(user):
# wherestring="username=\"{0}\"".format(user)
# bob=ctrl.select('Users', where=wherestring, what='section')
# return bob[0]['section']
def delUser(user):#UPDATE
wherestring="username=\"{0}\"".format(user)
ctrl.delete('Users',where=wherestring)
def assignInstructor(instr,section):
wherestring = 'name=\'{0}\''.format(section)
ctrl.update("sections", where=wherestring, instructor=instr)
def assignSession(qu,section):
wherestring = 'name=\'{0}\''.format(section)
ctrl.update("sections", where=wherestring, session=qu)
def getSections():
"""
Returns list of section names
"""
bob= ctrl.select('sections',what='name')
output = []
for i in bob:
output.append(i['name'])
return output
def getAssignedQuiz(sec):
"""
Returns the quiz currently assigned to a section
"""
return getEntry('sections','session','name',sec)
def getInstrSections(instr):
"""
Returns list of sections assigned to an instructor
"""
wherestring = 'instructor = \"{0}\"'.format(instr)
bob= ctrl.select('sections',what='name',where=wherestring)
output = []
for i in bob:
output.append(i['name'])
return output
def addSection(nam):
nam=nam.strip()
res = isInTable('sections','name', nam)
if not res:#no user there
ctrl.insert('sections', name = nam)
def addStudent(user,sec):#adds a student
user=user.strip()
res = isInTable('students','username', user)
if not res:#no user there
ctrl.insert('students',username = user, section = sec)
sec = sec.strip()#ensures section is added as well, if necessary
res = isInTable('sections','name',sec)
if not res:
addSection(sec)
def populateSections():
"""
This method populates the section list from the student roster.
"""
#ISSUE this is a hack
stus = ctrl.select("students")
for i in stus:
addStudent(i["username"],i["section"])
def getStudentsBySec(section):
"""
Returns the list of student usernames in a given section
"""
wherestring = "section = \"{0}\"".format(section)
students = ctrl.select("students",where=wherestring)
output = []
for i in students:
output.append(i['username'])
return output
def sessionAdd(sesname):
"""
Adds an entry to the sessions table with initialized states
"""
if isInTable("sessions","name",sesname):
return None
ctrl.insert("sessions",name = sesname, page=0, state="init")
def getInstrSession(instr):
return getEntry("instructors","session","username",instr)
def setInstrSession(instr,session):
wherestring = "username = \"{0}\"".format(instr)
sqldic={'where':wherestring,'session':session}
ctrl.update("instructors",**sqldic)
def getSessionSection(instr):
sess = getInstrSession(instr)
return getEntry("sections","name","session",sess)
def getSessionStudents(instr):
sec = getSessionSection(instr)
return getStudentsBySec(sec)
def getSessionPage(session):
return getEntry("sessions","page","name",session)
def getSessionState(session):
return getEntry("sessions","state","name",session)
def setSessionState(session,state):
sqldic={
"where" : "name = \"{0}\"".format(session),
"state": state
}
ctrl.update("sessions",**sqldic)
def getStudentSession(user):
sec = getEntry("students","section","username",user)
return getEntry("sections","session","name",sec)
def getStudentState(student):
sess = getStudentSession(student)
return getEntry("sessions","state","name",sess)
def getStudentPage(user):
return getSessionPage(getStudentSession(user))
def getUserSession(user):
if isStudent(user):
return getStudentSession(user)
if isInstructor(user):
return getInstrSession(user)
def getUserPage(user):
sess = getUserSession(user)
return getSessionPage(sess)
def getUserState(user):
sess = getUserSession(user)
return getSessionState(sess)
def updateEntry(table,col,key,ID,newvalue):
"""
Enters newvalue in the column corresponding to the given key/ID pair
"""
wherestring = "{0} = \"{1}\"".format(key,ID)
sqldict={"where":wherestring, col:newvalue}
ctrl.update(table,**sqldict) #**converts dict to keywords
def getQuizLength(session):
"""
Returns the number of questions in the quiz assigned to
session
"""
quizstr = gradebook.getSessionQuestions(session)
quizli = quizstr.split(',')
return len(quizli)
def advanceSession(session):
"""
Increments the question number. Sets the session to finished if
finished.
"""
length = getQuizLength(session)
curpage = getSessionPage(session)
if curpage >= length-1:
wherestring = "name = \"{0}\"".format(session)
sqldict={"where":wherestring,"state":"finished","page":curpage+1}
ctrl.update("sessions",**sqldict)
return False
else:
sqldict={
"where" : "name = \"{0}\"".format(session),
"page" : curpage+1,
"state" : "init"
}
ctrl.update("sessions",**sqldict)
return True
def setUltimatum(instr,duration):
"""
An ultimatum for timers.
"""
sess = getInstrSession(instr)
now = time.time()
then = now+duration+1
sqldic={
"where":"name = \"{0}\"".format(sess),
"ultimatum":then,
"state":"ultimatum",
}
ctrl.update("sessions",**sqldic)
def giveTimeLeft(user):
"""
Computes the time left in the ultimatum. If negative,
sets session to closed. Otherwise returns the string representation of
the number of seconds remaining.
"""
if isInstructor(user):
sess = getInstrSession(user)
else:
sess = getStudentSession(user)
timeup = getEntry("sessions","ultimatum","name",sess)
now = time.time()
left = int(timeup-now)
if left < -1:
setSessionState(sess,"closed")
return "closed"
return str(left)
| lgpl-3.0 | 3,575,754,073,379,911,000 | 27.225397 | 79 | 0.637611 | false |
kmunve/processgpr | core/picks.py | 1 | 1331 | '''
The I{PICKS} class handles picked data.
Created on 20.10.2010
@author: Karsten Mueller
'''
class PICKS():
def __init__(self):
self.trace = []
self.twt = []
self.x = []
self.y = []
self.elev = []
self.tell = 0 # counts the picks
self.pltid = 0 # controls if picks are plotted
def append(self, trace, twt, x, y, elev):
self.trace.append(trace)
self.twt.append(twt)
self.x.append(x)
self.y.append(y)
self.elev.append(elev)
self.tell += 1
def save(self, file):
fid = open(file, 'w')
fid.write("#trace, twt, x, y, elev\n")
for n in range(self.tell):
fid.write('%i, %f, %f, %f, %f\n' % (self.trace[n], self.twt[n],
self.x[n], self.y[n],
self.elev[n]))
fid.close()
def load(self, file):
from numpy import loadtxt, dtype, int, float
dt = dtype([('trace', int), ('twt', float),
('x', float), ('y', float), ('elev', float)])
data = loadtxt(file, dtype=dt, delimiter=',')
self.trace = data['trace']
self.twt = data['twt']
self.x = data['x']
self.y = data['y']
self.elev = data['elev'] | gpl-2.0 | -3,223,778,338,680,800,000 | 27.340426 | 75 | 0.464313 | false |
ReCodEx/monitor | monitor/zeromq_connection.py | 1 | 3194 | #!/usr/bin/env python3
"""
Handle zeromq socket.
"""
import zmq
import json
class ServerConnection:
"""
Class responsible for creating zeromq socket (server) and receiving
messages from connected clients. The message should be text with
format <ID>,<MESSAGE>, where text <MESSAGE> will be sent to websocket
client subscribed to channel <ID>.
"""
def __init__(self, address, port, logger):
"""
Initialize new instance with given address and port.
:param address: String representation of IP address
to listen to or a hostname.
:param port: String port where to listen.
:param logger: System logger
"""
self._logger = logger
context = zmq.Context()
self._receiver = context.socket(zmq.ROUTER)
self._receiver.setsockopt(zmq.IDENTITY, b"recodex-monitor")
address = "tcp://{}:{}".format(address, port)
self._receiver.bind(address)
self._logger.info("zeromq server initialized at {}".format(address))
def start(self, message_callback):
"""
Start receiving messages from underlying zeromq socket.
:param message_callback: Function to be called when new messages arrived.
This function should not block for long. Required are two parameters, first
is id of stream and second is text of the message. Both are strings.
:return: True if exited normally (by "exit" message with ID 0), False if
socket error occurred.
"""
while True:
# try to receive a message
try:
message = self._receiver.recv_multipart()
self._logger.debug("zeromq server: got message '{}'".format(message))
except Exception as e:
self._logger.error("zeromq server: socket error: {}".format(e))
return False
# split given message
try:
"""
decode the message with following parts:
0 - zeromq identity of sender
1 - byte array with channel id
2 - byte array with message command
3 - byte array with message task_id - only for TASK command
4 - byte array with message task_state - only for TASK command
"""
decoded_message = [item.decode() for item in message[1:]]
client_id = decoded_message[0]
keys = ["command", "task_id", "task_state"]
data = json.dumps(dict(zip(keys, decoded_message[1:])), sort_keys=True)
except ValueError:
continue
if client_id == "0" and data == '{"command": "exit"}':
self._logger.info("zeromq server: got shutdown command")
break
# call registered callback with given data
message_callback(client_id, data)
# after last message (command FINISHED) send also poison pill
# to close listening sockets
if decoded_message[1] == "FINISHED":
message_callback(client_id, None)
return True
| mit | 1,098,159,602,580,343,600 | 39.948718 | 87 | 0.576706 | false |
nuagenetworks/tempest | tempest/tests/lib/common/utils/test_data_utils.py | 1 | 6367 | # Copyright 2014 NEC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from tempest.lib.common.utils import data_utils
from tempest.tests.lib import base
class TestDataUtils(base.TestCase):
def test_rand_uuid(self):
actual = data_utils.rand_uuid()
self.assertIsInstance(actual, str)
self.assertRegexpMatches(actual, "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]"
"{4}-[0-9a-f]{4}-[0-9a-f]{12}$")
actual2 = data_utils.rand_uuid()
self.assertNotEqual(actual, actual2)
def test_rand_uuid_hex(self):
actual = data_utils.rand_uuid_hex()
self.assertIsInstance(actual, str)
self.assertRegexpMatches(actual, "^[0-9a-f]{32}$")
actual2 = data_utils.rand_uuid_hex()
self.assertNotEqual(actual, actual2)
def test_rand_name(self):
actual = data_utils.rand_name()
self.assertIsInstance(actual, str)
actual2 = data_utils.rand_name()
self.assertNotEqual(actual, actual2)
actual = data_utils.rand_name('foo')
self.assertTrue(actual.startswith('foo'))
actual2 = data_utils.rand_name('foo')
self.assertTrue(actual.startswith('foo'))
self.assertNotEqual(actual, actual2)
def test_rand_name_with_prefix(self):
actual = data_utils.rand_name(prefix='prefix-str')
self.assertIsInstance(actual, str)
self.assertRegexpMatches(actual, "^prefix-str-")
actual2 = data_utils.rand_name(prefix='prefix-str')
self.assertNotEqual(actual, actual2)
def test_rand_password(self):
actual = data_utils.rand_password()
self.assertIsInstance(actual, str)
self.assertRegexpMatches(actual, "[A-Za-z0-9~!@#$%^&*_=+]{15,}")
actual2 = data_utils.rand_password()
self.assertNotEqual(actual, actual2)
def test_rand_password_with_len(self):
actual = data_utils.rand_password(8)
self.assertIsInstance(actual, str)
self.assertEqual(len(actual), 8)
self.assertRegexpMatches(actual, "[A-Za-z0-9~!@#$%^&*_=+]{8}")
actual2 = data_utils.rand_password(8)
self.assertNotEqual(actual, actual2)
def test_rand_password_with_len_2(self):
actual = data_utils.rand_password(2)
self.assertIsInstance(actual, str)
self.assertEqual(len(actual), 3)
self.assertRegexpMatches(actual, "[A-Za-z0-9~!@#$%^&*_=+]{3}")
actual2 = data_utils.rand_password(2)
self.assertNotEqual(actual, actual2)
def test_rand_url(self):
actual = data_utils.rand_url()
self.assertIsInstance(actual, str)
self.assertRegexpMatches(actual, "^https://url-[0-9]*\.com$")
actual2 = data_utils.rand_url()
self.assertNotEqual(actual, actual2)
def test_rand_int(self):
actual = data_utils.rand_int_id()
self.assertIsInstance(actual, int)
actual2 = data_utils.rand_int_id()
self.assertNotEqual(actual, actual2)
def test_rand_mac_address(self):
actual = data_utils.rand_mac_address()
self.assertIsInstance(actual, str)
self.assertRegexpMatches(actual, "^([0-9a-f][0-9a-f]:){5}"
"[0-9a-f][0-9a-f]$")
actual2 = data_utils.rand_mac_address()
self.assertNotEqual(actual, actual2)
def test_parse_image_id(self):
actual = data_utils.parse_image_id("/foo/bar/deadbeaf")
self.assertEqual("deadbeaf", actual)
def test_arbitrary_string(self):
actual = data_utils.arbitrary_string()
self.assertEqual(actual, "test")
actual = data_utils.arbitrary_string(size=30, base_text="abc")
self.assertEqual(actual, "abc" * int(30 / len("abc")))
actual = data_utils.arbitrary_string(size=5, base_text="deadbeaf")
self.assertEqual(actual, "deadb")
def test_random_bytes(self):
actual = data_utils.random_bytes() # default size=1024
self.assertIsInstance(actual, str)
self.assertRegexpMatches(actual, "^[\x00-\xFF]{1024}")
actual2 = data_utils.random_bytes()
self.assertNotEqual(actual, actual2)
actual = data_utils.random_bytes(size=2048)
self.assertRegexpMatches(actual, "^[\x00-\xFF]{2048}")
def test_get_ipv6_addr_by_EUI64(self):
actual = data_utils.get_ipv6_addr_by_EUI64('2001:db8::',
'00:16:3e:33:44:55')
self.assertIsInstance(actual, netaddr.IPAddress)
self.assertEqual(actual,
netaddr.IPAddress('2001:db8::216:3eff:fe33:4455'))
def test_get_ipv6_addr_by_EUI64_with_IPv4_prefix(self):
ipv4_prefix = '10.0.8'
mac = '00:16:3e:33:44:55'
self.assertRaises(TypeError, data_utils.get_ipv6_addr_by_EUI64,
ipv4_prefix, mac)
def test_get_ipv6_addr_by_EUI64_bad_cidr_type(self):
bad_cidr = 123
mac = '00:16:3e:33:44:55'
self.assertRaises(TypeError, data_utils.get_ipv6_addr_by_EUI64,
bad_cidr, mac)
def test_get_ipv6_addr_by_EUI64_bad_cidr_value(self):
bad_cidr = 'bb'
mac = '00:16:3e:33:44:55'
self.assertRaises(TypeError, data_utils.get_ipv6_addr_by_EUI64,
bad_cidr, mac)
def test_get_ipv6_addr_by_EUI64_bad_mac_value(self):
cidr = '2001:db8::'
bad_mac = '00:16:3e:33:44:5Z'
self.assertRaises(TypeError, data_utils.get_ipv6_addr_by_EUI64,
cidr, bad_mac)
def test_get_ipv6_addr_by_EUI64_bad_mac_type(self):
cidr = '2001:db8::'
bad_mac = 99999999999999999999
self.assertRaises(TypeError, data_utils.get_ipv6_addr_by_EUI64,
cidr, bad_mac)
| apache-2.0 | -8,855,882,504,040,678,000 | 38.302469 | 78 | 0.614732 | false |
any1m1c/ipc20161 | lista4/ipc_lista4.19.py | 1 | 1551 | # EQUIPE 2
#
# Ana Beatriz Frota - 1615310027
#
#
#
#Kylciane Cristiny Lopes Freitas - 1615310052
#Questao 19
votos = [0 ,0, 0, 0, 0, 0]
sistemas =["1- Windows Server", "2- Unix", "3- Linux","4- Netware","5- Mac OS","6- Outro","0- Sair da enquete"]
continua = True
total = 0
porc_votos = []
while(continua):
print("Qual o melhor sistema operacional para uso em servidores ?")
for i in range(len(sistemas)-1):
print(sistemas[i])
escolha = int(raw_input())
if(escolha == 1):
votos[escolha - 1] += 1
elif(escolha == 2):
votos[escolha - 1] += 1
elif(escolha == 3):
votos[escolha - 1] += 1
elif(escolha == 4):
votos[escolha - 1] += 1
elif(escolha == 5):
votos[escolha - 1] += 1
elif(escolha == 6):
votos[escolha - 1] += 1
elif(escolha == 0):
continua = False
else:
print("digite um valor válido !")
for i in range(len(votos)):
total += votos[i]
for i in range(len(votos)):
x = votos[i]*100/total
porc_votos.append(x)
print("Sistema Operacional\t Votos\t %")
print("------------------\t -----\t -\n")
mensagem =''
espaco1 = 19
espaco2 = 5
espaco = ' '
for i in range(len(votos)):
campo1 = espaco * (espaco1 - len(sistemas[i]))
campo2 = espaco * (espaco2 - len(str(votos[i])))
mensagem = sistemas[i] + campo1 +"\t" + str(votos[i])+ campo2 + "\t" + str(porc_votos[i])
print(mensagem)
print("\n------------------\t -----\t -")
print("Total:" + "\t"+ str(total))
| apache-2.0 | -2,458,113,463,606,470,700 | 21.794118 | 111 | 0.53871 | false |
openstack/manila | manila/tests/api/middleware/test_faults.py | 1 | 7228 | # Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
import webob
import webob.dec
import webob.exc
from manila.api.middleware import fault
from manila.api.openstack import wsgi
from manila import exception
from manila import test
class TestFaults(test.TestCase):
"""Tests covering `manila.api.openstack.faults:Fault` class."""
def _prepare_xml(self, xml_string):
"""Remove characters from string which hinder XML equality testing."""
xml_string = xml_string.replace(" ", "")
xml_string = xml_string.replace("\n", "")
xml_string = xml_string.replace("\t", "")
return xml_string
def test_400_fault_json(self):
"""Test fault serialized to JSON via file-extension and/or header."""
requests = [
webob.Request.blank('/.json'),
webob.Request.blank('/', headers={"Accept": "application/json"}),
]
for request in requests:
fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='scram'))
response = request.get_response(fault)
expected = {
"badRequest": {
"message": "scram",
"code": 400,
},
}
actual = jsonutils.loads(response.body)
self.assertEqual("application/json", response.content_type)
self.assertEqual(expected, actual)
def test_413_fault_json(self):
"""Test fault serialized to JSON via file-extension and/or header."""
requests = [
webob.Request.blank('/.json'),
webob.Request.blank('/', headers={"Accept": "application/json"}),
]
for request in requests:
exc = webob.exc.HTTPRequestEntityTooLarge
fault = wsgi.Fault(exc(explanation='sorry',
headers={'Retry-After': 4}))
response = request.get_response(fault)
expected = {
"overLimit": {
"message": "sorry",
"code": 413,
"retryAfter": '4',
},
}
actual = jsonutils.loads(response.body)
self.assertEqual("application/json", response.content_type)
self.assertEqual(expected, actual)
def test_raise(self):
"""Ensure the ability to raise :class:`Fault` in WSGI-ified methods."""
@webob.dec.wsgify
def raiser(req):
raise wsgi.Fault(webob.exc.HTTPNotFound(explanation='whut?'))
req = webob.Request.blank('/.json')
resp = req.get_response(raiser)
self.assertEqual("application/json", resp.content_type)
self.assertEqual(404, resp.status_int)
self.assertIn('whut?'.encode("utf-8"), resp.body)
def test_raise_403(self):
"""Ensure the ability to raise :class:`Fault` in WSGI-ified methods."""
@webob.dec.wsgify
def raiser(req):
raise wsgi.Fault(webob.exc.HTTPForbidden(explanation='whut?'))
req = webob.Request.blank('/.json')
resp = req.get_response(raiser)
self.assertEqual("application/json", resp.content_type)
self.assertEqual(403, resp.status_int)
self.assertNotIn('resizeNotAllowed'.encode("utf-8"), resp.body)
self.assertIn('forbidden'.encode("utf-8"), resp.body)
def test_fault_has_status_int(self):
"""Ensure the status_int is set correctly on faults."""
fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='what?'))
self.assertEqual(400, fault.status_int)
class ExceptionTest(test.TestCase):
def _wsgi_app(self, inner_app):
return fault.FaultWrapper(inner_app)
def _do_test_exception_safety_reflected_in_faults(self, expose):
class ExceptionWithSafety(exception.ManilaException):
safe = expose
@webob.dec.wsgify
def fail(req):
raise ExceptionWithSafety('some explanation')
api = self._wsgi_app(fail)
resp = webob.Request.blank('/').get_response(api)
self.assertIn('{"computeFault', str(resp.body), resp.body)
expected = ('ExceptionWithSafety: some explanation' if expose else
'The server has either erred or is incapable '
'of performing the requested operation.')
self.assertIn(expected, str(resp.body), resp.body)
self.assertEqual(500, resp.status_int, resp.body)
def test_safe_exceptions_are_described_in_faults(self):
self._do_test_exception_safety_reflected_in_faults(True)
def test_unsafe_exceptions_are_not_described_in_faults(self):
self._do_test_exception_safety_reflected_in_faults(False)
def _do_test_exception_mapping(self, exception_type, msg):
@webob.dec.wsgify
def fail(req):
raise exception_type(msg)
api = self._wsgi_app(fail)
resp = webob.Request.blank('/').get_response(api)
self.assertIn(msg, str(resp.body), resp.body)
self.assertEqual(exception_type.code, resp.status_int, resp.body)
if hasattr(exception_type, 'headers'):
for (key, value) in exception_type.headers.items():
self.assertIn(key, resp.headers)
self.assertEqual(value, resp.headers[key])
def test_quota_error_mapping(self):
self._do_test_exception_mapping(exception.QuotaError, 'too many used')
def test_non_manila_notfound_exception_mapping(self):
class ExceptionWithCode(Exception):
code = 404
self._do_test_exception_mapping(ExceptionWithCode,
'NotFound')
def test_non_manila_exception_mapping(self):
class ExceptionWithCode(Exception):
code = 417
self._do_test_exception_mapping(ExceptionWithCode,
'Expectation failed')
def test_exception_with_none_code_throws_500(self):
class ExceptionWithNoneCode(Exception):
code = None
@webob.dec.wsgify
def fail(req):
raise ExceptionWithNoneCode()
api = self._wsgi_app(fail)
resp = webob.Request.blank('/').get_response(api)
self.assertEqual(500, resp.status_int)
def test_validate_request_unicode_decode_fault(self):
@webob.dec.wsgify
def unicode_error(req):
raise UnicodeDecodeError("ascii", "test".encode(), 0, 1, "bad")
api = self._wsgi_app(unicode_error)
resp = webob.Request.blank('/test?foo=%88').get_response(api)
self.assertEqual(400, resp.status_int)
| apache-2.0 | -5,745,190,838,005,838,000 | 36.257732 | 79 | 0.612756 | false |
davidwilson-85/easymap | graphic_output/Pillow-4.2.1/Tests/test_file_palm.py | 1 | 1353 | from helper import unittest, PillowTestCase, hopper, imagemagick_available
import os.path
class TestFilePalm(PillowTestCase):
_roundtrip = imagemagick_available()
def helper_save_as_palm(self, mode):
# Arrange
im = hopper(mode)
outfile = self.tempfile("temp_" + mode + ".palm")
# Act
im.save(outfile)
# Assert
self.assertTrue(os.path.isfile(outfile))
self.assertGreater(os.path.getsize(outfile), 0)
def roundtrip(self, mode):
if not self._roundtrip:
return
im = hopper(mode)
outfile = self.tempfile("temp.palm")
im.save(outfile)
converted = self.open_withImagemagick(outfile)
self.assert_image_equal(converted, im)
def test_monochrome(self):
# Arrange
mode = "1"
# Act / Assert
self.helper_save_as_palm(mode)
self.roundtrip(mode)
def test_p_mode(self):
# Arrange
mode = "P"
# Act / Assert
self.helper_save_as_palm(mode)
self.skipKnownBadTest("Palm P image is wrong")
self.roundtrip(mode)
def test_rgb_ioerror(self):
# Arrange
mode = "RGB"
# Act / Assert
self.assertRaises(IOError, lambda: self.helper_save_as_palm(mode))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 5,463,711,442,434,968,000 | 22.327586 | 74 | 0.583149 | false |
TristanCacqueray/tbac-reg | main/tools/src/scripts/tbac_get_attr.py | 1 | 1404 | #!/usr/bin/python -OO
# -*- coding: utf8 -*-
############################################################################
# (c) 2005-2010 freenode#rsbac
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
############################################################################
""" Filename: main/tools/src/scripts/tbac_get_attr.py
Project: tbac-reg
Last update: 2009/06/11
Purpose: Get attr command line tools
"""
import tbac
def main():
import sys
if len(sys.argv) < 3:
print "usage: %s flags|range file/dirname(s)"
return -1
for path in sys.argv[2:]:
if sys.argv[1].lower() == "flags":
val = tbac.get_flags(path)
else:
val = tbac.get_range(path)
print "%s: returned '%s'" % (path, val)
if __name__ == "__main__":
main()
| gpl-2.0 | 8,475,136,004,425,136,000 | 32.428571 | 76 | 0.626781 | false |
Katello/katello-cli | test/katello/tests/core/filters/filter_add_remove_product_test.py | 1 | 3943 | import unittest
from mock import Mock
import os
from katello.tests.core.action_test_utils import CLIOptionTestCase,\
CLIActionTestCase
from katello.tests.core.content_view_definition import content_view_definition_data
from katello.tests.core.organization import organization_data
from katello.tests.core.repo import repo_data
from katello.tests.core.product import product_data
import katello.client.core.filter
from katello.client.api.content_view_definition import ContentViewDefinitionAPI
from katello.client.core.filter import AddRemoveProduct
from katello.client.api.utils import ApiDataError
class RequiredCLIOptionsTest(object):
disallowed_options = [
('--org=ACME', '--name=def1', "--definition=foo"),
('--org=ACME', '--name=def1', '--product=photoshop'),
('--namel=def1', '--product=photoshop', "--definition=foo")
]
allowed_options = [
('--org=ACME', '--name=def1', '--product=photoshop', "--definition=foo")
]
class AddRequiredCLIOptionsTest(RequiredCLIOptionsTest, CLIOptionTestCase):
action = AddRemoveProduct(True)
class RemoveRequiredCLIOptionsTest(RequiredCLIOptionsTest, CLIOptionTestCase):
action = AddRemoveProduct(False)
class FilterAddRemoveProductTest(object):
ORG = organization_data.ORGS[0]
PRODUCT = product_data.PRODUCTS[0]
PRODUCTS = product_data.PRODUCTS
DEFINITION = content_view_definition_data.DEFS[0]
FILTER = content_view_definition_data.FILTERS[0]
OPTIONS = {
'org': ORG['name'],
'label': DEFINITION['label'],
'product': PRODUCT['label'],
'definition': DEFINITION["name"],
}
addition = True
def setUp(self):
self.set_action(AddRemoveProduct(self.addition))
self.set_module(katello.client.core.filter)
self.mock_printer()
self.mock_options(self.OPTIONS)
self.mock(self.module, 'get_cv_definition', self.DEFINITION)
self.mock(self.module, 'get_filter', self.FILTER)
self.mock(ContentViewDefinitionAPI, 'all_products', self.PRODUCTS)
self.mock(self.action.api, 'products', self.PRODUCTS)
self.mock(self.action.api, 'update_products')
def test_it_returns_with_error_if_no_def_was_found(self):
self.mock(self.module, 'get_cv_definition').side_effect = ApiDataError()
self.run_action(os.EX_DATAERR)
def test_it_returns_with_error_if_no_filter_was_found(self):
self.mock(self.module, 'get_filter').side_effect = ApiDataError()
self.run_action(os.EX_DATAERR)
def test_it_returns_with_error_if_product_was_not_found(self):
self.mock(self.action, 'identify_product').side_effect = ApiDataError()
self.run_action(os.EX_DATAERR)
def test_it_retrieves_all_definition_products(self):
self.mock(self.action, 'identify_product', return_value = self.PRODUCT)
self.run_action()
self.action.api.products.assert_called_once_with(self.FILTER['id'],
self.DEFINITION['id'], self.ORG['name'])
self.action.identify_product.assert_called_once_with(self.DEFINITION,
self.PRODUCT['name'], None, None)
class FilterAddProductTest(FilterAddRemoveProductTest, CLIActionTestCase):
addition = True
def test_it_calls_update_api(self):
repos = [r['id'] for r in self.PRODUCTS + [self.PRODUCT]]
self.run_action()
self.action.api.update_products.assert_called_once_with(self.FILTER['id'],
self.DEFINITION['id'], self.ORG["name"], repos)
class FilterRemoveProductTest(FilterAddRemoveProductTest, CLIActionTestCase):
addition = False
def test_it_calls_update_api(self):
repos = [r['id'] for r in self.PRODUCTS if r['name'] != self.PRODUCT['name']]
self.run_action()
self.action.api.update_products.assert_called_once_with(self.FILTER['id'],
self.DEFINITION['id'], self.ORG['name'], repos)
| gpl-2.0 | 6,877,199,297,364,418,000 | 39.234694 | 85 | 0.684758 | false |
auready/django | django/contrib/gis/feeds.py | 1 | 5765 | from django.contrib.syndication.views import Feed as BaseFeed
from django.utils.feedgenerator import Atom1Feed, Rss201rev2Feed
class GeoFeedMixin:
"""
This mixin provides the necessary routines for SyndicationFeed subclasses
to produce simple GeoRSS or W3C Geo elements.
"""
def georss_coords(self, coords):
"""
In GeoRSS coordinate pairs are ordered by lat/lon and separated by
a single white space. Given a tuple of coordinates, return a string
GeoRSS representation.
"""
return ' '.join('%f %f' % (coord[1], coord[0]) for coord in coords)
def add_georss_point(self, handler, coords, w3c_geo=False):
"""
Adds a GeoRSS point with the given coords using the given handler.
Handles the differences between simple GeoRSS and the more popular
W3C Geo specification.
"""
if w3c_geo:
lon, lat = coords[:2]
handler.addQuickElement('geo:lat', '%f' % lat)
handler.addQuickElement('geo:lon', '%f' % lon)
else:
handler.addQuickElement('georss:point', self.georss_coords((coords,)))
def add_georss_element(self, handler, item, w3c_geo=False):
"""
This routine adds a GeoRSS XML element using the given item and handler.
"""
# Getting the Geometry object.
geom = item.get('geometry')
if geom is not None:
if isinstance(geom, (list, tuple)):
# Special case if a tuple/list was passed in. The tuple may be
# a point or a box
box_coords = None
if isinstance(geom[0], (list, tuple)):
# Box: ( (X0, Y0), (X1, Y1) )
if len(geom) == 2:
box_coords = geom
else:
raise ValueError('Only should be two sets of coordinates.')
else:
if len(geom) == 2:
# Point: (X, Y)
self.add_georss_point(handler, geom, w3c_geo=w3c_geo)
elif len(geom) == 4:
# Box: (X0, Y0, X1, Y1)
box_coords = (geom[:2], geom[2:])
else:
raise ValueError('Only should be 2 or 4 numeric elements.')
# If a GeoRSS box was given via tuple.
if box_coords is not None:
if w3c_geo:
raise ValueError('Cannot use simple GeoRSS box in W3C Geo feeds.')
handler.addQuickElement('georss:box', self.georss_coords(box_coords))
else:
# Getting the lower-case geometry type.
gtype = str(geom.geom_type).lower()
if gtype == 'point':
self.add_georss_point(handler, geom.coords, w3c_geo=w3c_geo)
else:
if w3c_geo:
raise ValueError('W3C Geo only supports Point geometries.')
# For formatting consistent w/the GeoRSS simple standard:
# http://georss.org/1.0#simple
if gtype in ('linestring', 'linearring'):
handler.addQuickElement('georss:line', self.georss_coords(geom.coords))
elif gtype in ('polygon',):
# Only support the exterior ring.
handler.addQuickElement('georss:polygon', self.georss_coords(geom[0].coords))
else:
raise ValueError('Geometry type "%s" not supported.' % geom.geom_type)
# ### SyndicationFeed subclasses ###
class GeoRSSFeed(Rss201rev2Feed, GeoFeedMixin):
def rss_attributes(self):
attrs = super().rss_attributes()
attrs['xmlns:georss'] = 'http://www.georss.org/georss'
return attrs
def add_item_elements(self, handler, item):
super().add_item_elements(handler, item)
self.add_georss_element(handler, item)
def add_root_elements(self, handler):
super().add_root_elements(handler)
self.add_georss_element(handler, self.feed)
class GeoAtom1Feed(Atom1Feed, GeoFeedMixin):
def root_attributes(self):
attrs = super().root_attributes()
attrs['xmlns:georss'] = 'http://www.georss.org/georss'
return attrs
def add_item_elements(self, handler, item):
super().add_item_elements(handler, item)
self.add_georss_element(handler, item)
def add_root_elements(self, handler):
super().add_root_elements(handler)
self.add_georss_element(handler, self.feed)
class W3CGeoFeed(Rss201rev2Feed, GeoFeedMixin):
def rss_attributes(self):
attrs = super().rss_attributes()
attrs['xmlns:geo'] = 'http://www.w3.org/2003/01/geo/wgs84_pos#'
return attrs
def add_item_elements(self, handler, item):
super().add_item_elements(handler, item)
self.add_georss_element(handler, item, w3c_geo=True)
def add_root_elements(self, handler):
super().add_root_elements(handler)
self.add_georss_element(handler, self.feed, w3c_geo=True)
# ### Feed subclass ###
class Feed(BaseFeed):
"""
This is a subclass of the `Feed` from `django.contrib.syndication`.
This allows users to define a `geometry(obj)` and/or `item_geometry(item)`
methods on their own subclasses so that geo-referenced information may
placed in the feed.
"""
feed_type = GeoRSSFeed
def feed_extra_kwargs(self, obj):
return {'geometry': self._get_dynamic_attr('geometry', obj)}
def item_extra_kwargs(self, item):
return {'geometry': self._get_dynamic_attr('item_geometry', item)}
| bsd-3-clause | -6,600,715,152,181,236,000 | 39.598592 | 101 | 0.57294 | false |
AutorestCI/azure-sdk-for-python | azure-mgmt-datafactory/azure/mgmt/datafactory/models/amazon_redshift_linked_service.py | 1 | 3598 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .linked_service import LinkedService
class AmazonRedshiftLinkedService(LinkedService):
"""Linked service for Amazon Redshift.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param connect_via: The integration runtime reference.
:type connect_via:
~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:param description: Linked service description.
:type description: str
:param type: Constant filled by server.
:type type: str
:param server: The name of the Amazon Redshift server. Type: string (or
Expression with resultType string).
:type server: object
:param username: The username of the Amazon Redshift source. Type: string
(or Expression with resultType string).
:type username: object
:param password: The password of the Amazon Redshift source.
:type password: ~azure.mgmt.datafactory.models.SecureString
:param database: The database name of the Amazon Redshift source. Type:
string (or Expression with resultType string).
:type database: object
:param port: The TCP port number that the Amazon Redshift server uses to
listen for client connections. The default value is 5439. Type: integer
(or Expression with resultType integer).
:type port: object
:param encrypted_credential: The encrypted credential used for
authentication. Credentials are encrypted using the integration runtime
credential manager. Type: string (or Expression with resultType string).
:type encrypted_credential: object
"""
_validation = {
'type': {'required': True},
'server': {'required': True},
'database': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'connect_via': {'key': 'connectVia', 'type': 'IntegrationRuntimeReference'},
'description': {'key': 'description', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'server': {'key': 'typeProperties.server', 'type': 'object'},
'username': {'key': 'typeProperties.username', 'type': 'object'},
'password': {'key': 'typeProperties.password', 'type': 'SecureString'},
'database': {'key': 'typeProperties.database', 'type': 'object'},
'port': {'key': 'typeProperties.port', 'type': 'object'},
'encrypted_credential': {'key': 'typeProperties.encryptedCredential', 'type': 'object'},
}
def __init__(self, server, database, additional_properties=None, connect_via=None, description=None, username=None, password=None, port=None, encrypted_credential=None):
super(AmazonRedshiftLinkedService, self).__init__(additional_properties=additional_properties, connect_via=connect_via, description=description)
self.server = server
self.username = username
self.password = password
self.database = database
self.port = port
self.encrypted_credential = encrypted_credential
self.type = 'AmazonRedshift'
| mit | -5,348,006,601,177,244,000 | 46.342105 | 173 | 0.660367 | false |
boshmaf/sypy | sypy/results.py | 1 | 2512 | # SyPy: A Python framework for evaluating graph-based Sybil detection
# algorithms in social and information networks.
#
# Copyright (C) 2013 Yazan Boshmaf
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class Results:
def __init__(self, detector):
self.nodes = detector.network.graph.nodes()
self.honests_predicted = detector.honests_predicted
self.honests_truth = detector.honests_truth
self.confusion_matrix = self.__compute_confusion_matrix()
def __compute_confusion_matrix(self):
N = len(self.honests_truth)
P = len(self.nodes) - N
TN = len(
set.intersection(
set(self.honests_truth),
set(self.honests_predicted)
)
)
FN = len(
set.intersection(
set(self.honests_predicted),
(set(self.nodes) - set(self.honests_truth))
)
)
TP = len(
set.intersection(
(set(self.nodes) - set(self.honests_truth)),
(set(self.nodes) - set(self.honests_predicted))
)
)
FP = len(
set.intersection(
set(self.honests_truth),
(set(self.nodes) - set(self.honests_predicted))
)
)
confusion_matrix = {
"N": N,
"P": P,
"TN": TN,
"FN": FN,
"TP": TP,
"FP": FP
}
return confusion_matrix
def accuracy(self):
cm = self.confusion_matrix
return (cm["TP"] + cm["TN"])/(float)(cm["P"] + cm["N"])
def sensitivity(self):
cm = self.confusion_matrix
return cm["TP"]/(float)(cm["TP"]+cm["FN"])
def specificity(self):
cm = self.confusion_matrix
return cm["TN"]/(float)(cm["FP"]+cm["TN"])
| gpl-3.0 | -5,480,218,016,483,117,000 | 30.4 | 74 | 0.556927 | false |
FedoraScientific/salome-geom | doc/salome/examples/transformation_operations_ex07.py | 1 | 1374 | # Projection
import salome
salome.salome_init()
import GEOM
from salome.geom import geomBuilder
geompy = geomBuilder.New(salome.myStudy)
# create a cylindric face and a curve(edge)
cylinder = geompy.MakeCylinderRH(100, 300)
[face_cyl] = geompy.SubShapes(cylinder, [3])
p1 = geompy.MakeVertex(200, 0, 100)
p2 = geompy.MakeVertex(200, 80, 100)
p3 = geompy.MakeVertex(200, 80, 180)
p4 = geompy.MakeVertex(130, 80, 180)
p5 = geompy.MakeVertex(90, 80, 240)
curve = geompy.MakeInterpol([p1, p2, p3, p4, p5], False, False)
# create a new object as projection of the
# given curve on the given cylindric face
projection = geompy.MakeProjection(curve, face_cyl)
# add objects in the study
geompy.addToStudy(cylinder, "cylinder")
geompy.addToStudyInFather(cylinder, face_cyl, "face_cyl")
geompy.addToStudy(p1, "p1")
geompy.addToStudy(p2, "p2")
geompy.addToStudy(p3, "p3")
geompy.addToStudy(p4, "p4")
geompy.addToStudy(p5, "p5")
geompy.addToStudy(curve, "curve")
geompy.addToStudy(projection, "projection")
#projection of point on wire.
e1 = geompy.MakeLineTwoPnt(p1, p2)
e2 = geompy.MakeLineTwoPnt(p2, p3)
w1 = geompy.MakeWire([e1, e2], 1.e-7)
v1 = geompy.MakeVertex(300, 40, 100)
prj = geompy.MakeProjectionOnWire(v1, w1)
geompy.addToStudy(e1, "e1")
geompy.addToStudy(e2, "e2")
geompy.addToStudy(w1, "w1")
geompy.addToStudy(v1, "v1")
geompy.addToStudy(prj[1], "projOnWire")
| lgpl-2.1 | -6,514,882,863,137,631,000 | 27.625 | 63 | 0.739447 | false |
bmaggard/luigi | luigi/contrib/bigquery.py | 1 | 18778 | # -*- coding: utf-8 -*-
#
# Copyright 2015 Twitter Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import collections
import logging
import luigi.target
import time
logger = logging.getLogger('luigi-interface')
try:
import httplib2
import oauth2client
from googleapiclient import discovery
from googleapiclient import http
except ImportError:
logger.warning('Bigquery module imported, but google-api-python-client is '
'not installed. Any bigquery task will fail')
class CreateDisposition(object):
CREATE_IF_NEEDED = 'CREATE_IF_NEEDED'
CREATE_NEVER = 'CREATE_NEVER'
class WriteDisposition(object):
WRITE_TRUNCATE = 'WRITE_TRUNCATE'
WRITE_APPEND = 'WRITE_APPEND'
WRITE_EMPTY = 'WRITE_EMPTY'
class QueryMode(object):
INTERACTIVE = 'INTERACTIVE'
BATCH = 'BATCH'
class SourceFormat(object):
CSV = 'CSV'
DATASTORE_BACKUP = 'DATASTORE_BACKUP'
NEWLINE_DELIMITED_JSON = 'NEWLINE_DELIMITED_JSON'
BQDataset = collections.namedtuple('BQDataset', 'project_id dataset_id')
class BQTable(collections.namedtuple('BQTable', 'project_id dataset_id table_id')):
@property
def dataset(self):
return BQDataset(project_id=self.project_id, dataset_id=self.dataset_id)
@property
def uri(self):
return "bq://" + self.project_id + "/" + \
self.dataset.dataset_id + "/" + self.table_id
class BigqueryClient(object):
"""A client for Google BigQuery.
For details of how authentication and the descriptor work, see the
documentation for the GCS client. The descriptor URL for BigQuery is
https://www.googleapis.com/discovery/v1/apis/bigquery/v2/rest
"""
def __init__(self, oauth_credentials=None, descriptor='', http_=None):
http_ = http_ or httplib2.Http()
if not oauth_credentials:
oauth_credentials = oauth2client.client.GoogleCredentials.get_application_default()
if descriptor:
self.client = discovery.build_from_document(descriptor, credentials=oauth_credentials, http=http_)
else:
self.client = discovery.build('bigquery', 'v2', credentials=oauth_credentials, http=http_)
def dataset_exists(self, dataset):
"""Returns whether the given dataset exists.
:param dataset:
:type dataset: BQDataset
"""
try:
self.client.datasets().get(projectId=dataset.project_id,
datasetId=dataset.dataset_id).execute()
except http.HttpError as ex:
if ex.resp.status == 404:
return False
raise
return True
def table_exists(self, table):
"""Returns whether the given table exists.
:param table:
:type table: BQTable
"""
if not self.dataset_exists(table.dataset):
return False
try:
self.client.tables().get(projectId=table.project_id,
datasetId=table.dataset_id,
tableId=table.table_id).execute()
except http.HttpError as ex:
if ex.resp.status == 404:
return False
raise
return True
def make_dataset(self, dataset, raise_if_exists=False, body={}):
"""Creates a new dataset with the default permissions.
:param dataset:
:type dataset: BQDataset
:param raise_if_exists: whether to raise an exception if the dataset already exists.
:raises luigi.target.FileAlreadyExists: if raise_if_exists=True and the dataset exists
"""
try:
self.client.datasets().insert(projectId=dataset.project_id, body=dict(
{'id': '{}:{}'.format(dataset.project_id, dataset.dataset_id)}, **body)).execute()
except http.HttpError as ex:
if ex.resp.status == 409:
if raise_if_exists:
raise luigi.target.FileAlreadyExists()
else:
raise
def delete_dataset(self, dataset, delete_nonempty=True):
"""Deletes a dataset (and optionally any tables in it), if it exists.
:param dataset:
:type dataset: BQDataset
:param delete_nonempty: if true, will delete any tables before deleting the dataset
"""
if not self.dataset_exists(dataset):
return
self.client.datasets().delete(projectId=dataset.project_id,
datasetId=dataset.dataset_id,
deleteContents=delete_nonempty).execute()
def delete_table(self, table):
"""Deletes a table, if it exists.
:param table:
:type table: BQTable
"""
if not self.table_exists(table):
return
self.client.tables().delete(projectId=table.project_id,
datasetId=table.dataset_id,
tableId=table.table_id).execute()
def list_datasets(self, project_id):
"""Returns the list of datasets in a given project.
:param project_id:
:type project_id: str
"""
request = self.client.datasets().list(projectId=project_id,
maxResults=1000)
response = request.execute()
while response is not None:
for ds in response.get('datasets', []):
yield ds['datasetReference']['datasetId']
request = self.client.datasets().list_next(request, response)
if request is None:
break
response = request.execute()
def list_tables(self, dataset):
"""Returns the list of tables in a given dataset.
:param dataset:
:type dataset: BQDataset
"""
request = self.client.tables().list(projectId=dataset.project_id,
datasetId=dataset.dataset_id,
maxResults=1000)
response = request.execute()
while response is not None:
for t in response.get('tables', []):
yield t['tableReference']['tableId']
request = self.client.tables().list_next(request, response)
if request is None:
break
response = request.execute()
def get_view(self, table):
"""Returns the SQL query for a view, or None if it doesn't exist or is not a view.
:param table: The table containing the view.
:type table: BQTable
"""
request = self.client.tables().get(projectId=table.project_id,
datasetId=table.dataset_id,
tableId=table.table_id)
try:
response = request.execute()
except http.HttpError as ex:
if ex.resp.status == 404:
return None
raise
return response['view']['query'] if 'view' in response else None
def update_view(self, table, view):
"""Updates the SQL query for a view.
If the output table exists, it is replaced with the supplied view query. Otherwise a new
table is created with this view.
:param table: The table to contain the view.
:type table: BQTable
:param view: The SQL query for the view.
:type view: str
"""
body = {
'tableReference': {
'projectId': table.project_id,
'datasetId': table.dataset_id,
'tableId': table.table_id
},
'view': {
'query': view
}
}
if self.table_exists(table):
self.client.tables().update(projectId=table.project_id,
datasetId=table.dataset_id,
tableId=table.table_id,
body=body).execute()
else:
self.client.tables().insert(projectId=table.project_id,
datasetId=table.dataset_id,
body=body).execute()
def run_job(self, project_id, body, dataset=None):
"""Runs a bigquery "job". See the documentation for the format of body.
.. note::
You probably don't need to use this directly. Use the tasks defined below.
:param dataset:
:type dataset: BQDataset
"""
if dataset and not self.dataset_exists(dataset):
self.make_dataset(dataset)
new_job = self.client.jobs().insert(projectId=project_id, body=body).execute()
job_id = new_job['jobReference']['jobId']
logger.info('Started import job %s:%s', project_id, job_id)
while True:
status = self.client.jobs().get(projectId=project_id, jobId=job_id).execute()
if status['status']['state'] == 'DONE':
if status['status'].get('errors'):
raise Exception('Bigquery job failed: {}'.format(status['status']['errors']))
return
logger.info('Waiting for job %s:%s to complete...', project_id, job_id)
time.sleep(5.0)
def copy(self,
source_table,
dest_table,
create_disposition=CreateDisposition.CREATE_IF_NEEDED,
write_disposition=WriteDisposition.WRITE_TRUNCATE):
"""Copies (or appends) a table to another table.
:param source_table:
:type source_table: BQTable
:param dest_table:
:type dest_table: BQTable
:param create_disposition: whether to create the table if needed
:type create_disposition: CreateDisposition
:param write_disposition: whether to append/truncate/fail if the table exists
:type write_disposition: WriteDisposition
"""
job = {
"projectId": dest_table.project_id,
"configuration": {
"copy": {
"sourceTable": {
"projectId": source_table.project_id,
"datasetId": source_table.dataset_id,
"tableId": source_table.table_id,
},
"destinationTable": {
"projectId": dest_table.project_id,
"datasetId": dest_table.dataset_id,
"tableId": dest_table.table_id,
},
"createDisposition": create_disposition,
"writeDisposition": write_disposition,
}
}
}
self.run_job(dest_table.project_id, job, dataset=dest_table.dataset)
class BigqueryTarget(luigi.target.Target):
def __init__(self, project_id, dataset_id, table_id, client=None):
self.table = BQTable(project_id=project_id, dataset_id=dataset_id, table_id=table_id)
self.client = client or BigqueryClient()
@classmethod
def from_bqtable(cls, table, client=None):
"""A constructor that takes a :py:class:`BQTable`.
:param table:
:type table: BQTable
"""
return cls(table.project_id, table.dataset_id, table.table_id, client=client)
def exists(self):
return self.client.table_exists(self.table)
def __str__(self):
return str(self.table)
class MixinBigqueryBulkComplete(object):
"""
Allows to efficiently check if a range of BigqueryTargets are complete.
This enables scheduling tasks with luigi range tools.
If you implement a custom Luigi task with a BigqueryTarget output, make sure to also inherit
from this mixin to enable range support.
"""
@classmethod
def bulk_complete(cls, parameter_tuples):
if len(parameter_tuples) < 1:
return
# Instantiate the tasks to inspect them
tasks_with_params = [(cls(p), p) for p in parameter_tuples]
# Grab the set of BigQuery datasets we are interested in
datasets = set([t.output().table.dataset for t, p in tasks_with_params])
logger.info('Checking datasets %s for available tables', datasets)
# Query the available tables for all datasets
client = tasks_with_params[0][0].output().client
available_datasets = filter(client.dataset_exists, datasets)
available_tables = {d: set(client.list_tables(d)) for d in available_datasets}
# Return parameter_tuples belonging to available tables
for t, p in tasks_with_params:
table = t.output().table
if table.table_id in available_tables.get(table.dataset, []):
yield p
class BigqueryLoadTask(MixinBigqueryBulkComplete, luigi.Task):
"""Load data into bigquery from GCS."""
@property
def source_format(self):
"""The source format to use (see :py:class:`SourceFormat`)."""
return SourceFormat.NEWLINE_DELIMITED_JSON
@property
def write_disposition(self):
"""What to do if the table already exists. By default this will fail the job.
See :py:class:`WriteDisposition`"""
return WriteDisposition.WRITE_EMPTY
@property
def schema(self):
"""Schema in the format defined at https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.schema.
If the value is falsy, it is omitted and inferred by bigquery, which only works for CSV inputs."""
return []
@property
def max_bad_records(self):
return 0
@property
def source_uris(self):
"""Source data which should be in GCS."""
return [x.path for x in luigi.task.flatten(self.input())]
def run(self):
output = self.output()
assert isinstance(output, BigqueryTarget), 'Output should be a bigquery target, not %s' % (output)
bq_client = output.client
source_uris = self.source_uris()
assert all(x.startswith('gs://') for x in source_uris)
job = {
'projectId': output.table.project_id,
'configuration': {
'load': {
'destinationTable': {
'projectId': output.table.project_id,
'datasetId': output.table.dataset_id,
'tableId': output.table.table_id,
},
'sourceFormat': self.source_format,
'writeDisposition': self.write_disposition,
'sourceUris': source_uris,
'maxBadRecords': self.max_bad_records,
}
}
}
if self.schema:
job['configuration']['load']['schema'] = {'fields': self.schema}
bq_client.run_job(output.table.project_id, job, dataset=output.table.dataset)
class BigqueryRunQueryTask(MixinBigqueryBulkComplete, luigi.Task):
@property
def write_disposition(self):
"""What to do if the table already exists. By default this will fail the job.
See :py:class:`WriteDisposition`"""
return WriteDisposition.WRITE_TRUNCATE
@property
def create_disposition(self):
"""Whether to create the table or not. See :py:class:`CreateDisposition`"""
return CreateDisposition.CREATE_IF_NEEDED
@property
def query(self):
"""The query, in text form."""
raise NotImplementedError()
@property
def query_mode(self):
"""The query mode. See :py:class:`QueryMode`."""
return QueryMode.INTERACTIVE
def run(self):
output = self.output()
assert isinstance(output, BigqueryTarget), 'Output should be a bigquery target, not %s' % (output)
query = self.query
assert query, 'No query was provided'
bq_client = output.client
logger.info('Launching Query')
logger.info('Query destination: %s (%s)', output, self.write_disposition)
logger.info('Query SQL: %s', query)
job = {
'projectId': output.table.project_id,
'configuration': {
'query': {
'query': query,
'priority': self.query_mode,
'destinationTable': {
'projectId': output.table.project_id,
'datasetId': output.table.dataset_id,
'tableId': output.table.table_id,
},
'allowLargeResults': True,
'createDisposition': self.create_disposition,
'writeDisposition': self.write_disposition,
}
}
}
bq_client.run_job(output.table.project_id, job, dataset=output.table.dataset)
class BigqueryCreateViewTask(luigi.Task):
"""
Creates (or updates) a view in BigQuery.
The output of this task needs to be a BigQueryTarget.
Instances of this class should specify the view SQL in the view property.
If a view already exist in BigQuery at output(), it will be updated.
"""
@property
def view(self):
"""The SQL query for the view, in text form."""
raise NotImplementedError()
def complete(self):
output = self.output()
assert isinstance(output, BigqueryTarget), 'Output must be a bigquery target, not %s' % (output)
if not output.exists():
return False
existing_view = output.client.get_view(output.table)
return existing_view == self.view
def run(self):
output = self.output()
assert isinstance(output, BigqueryTarget), 'Output must be a bigquery target, not %s' % (output)
view = self.view
assert view, 'No view was provided'
logger.info('Create view')
logger.info('Destination: %s', output)
logger.info('View SQL: %s', view)
output.client.update_view(output.table, view)
class ExternalBigqueryTask(MixinBigqueryBulkComplete, luigi.ExternalTask):
"""
An external task for a BigQuery target.
"""
pass
| apache-2.0 | 4,507,529,935,092,633,600 | 32.834234 | 126 | 0.578017 | false |
gjhiggins/elixir | elixir/collection.py | 1 | 4508 | '''
Default entity collection implementation
'''
import sys
import re
class BaseCollection(list):
def __init__(self, entities=None):
list.__init__(self)
if entities is not None:
self.extend(entities)
def extend(self, entities):
for e in entities:
self.append(e)
def clear(self):
del self[:]
def resolve_absolute(self, key, full_path, entity=None, root=None):
if root is None:
root = entity._descriptor.resolve_root
if root:
full_path = '%s.%s' % (root, full_path)
module_path, classname = full_path.rsplit('.', 1)
module = sys.modules[module_path]
res = getattr(module, classname, None)
if res is None:
if entity is not None:
raise Exception("Couldn't resolve target '%s' <%s> in '%s'!"
% (key, full_path, entity.__name__))
else:
raise Exception("Couldn't resolve target '%s' <%s>!"
% (key, full_path))
return res
def __getattr__(self, key):
return self.resolve(key)
# default entity collection
class GlobalEntityCollection(BaseCollection):
def __init__(self, entities=None):
# _entities is a dict of entities keyed on their name.
self._entities = {}
super(GlobalEntityCollection, self).__init__(entities)
def append(self, entity):
'''
Add an entity to the collection.
'''
super(EntityCollection, self).append(entity)
existing_entities = self._entities.setdefault(entity.__name__, [])
existing_entities.append(entity)
def resolve(self, key, entity=None):
'''
Resolve a key to an Entity. The optional `entity` argument is the
"source" entity when resolving relationship targets.
'''
# Do we have a fully qualified entity name?
if '.' in key:
return self.resolve_absolute(key, key, entity)
else:
# Otherwise we look in the entities of this collection
res = self._entities.get(key, None)
if res is None:
if entity:
raise Exception("Couldn't resolve target '%s' in '%s'"
% (key, entity.__name__))
else:
raise Exception("This collection does not contain any "
"entity corresponding to the key '%s'!"
% key)
elif len(res) > 1:
raise Exception("'%s' resolves to several entities, you should"
" use the full path (including the full module"
" name) to that entity." % key)
else:
return res[0]
def clear(self):
self._entities = {}
super(GlobalEntityCollection, self).clear()
# backward compatible name
EntityCollection = GlobalEntityCollection
_leading_dots = re.compile('^([.]*).*$')
class RelativeEntityCollection(BaseCollection):
# the entity=None does not make any sense with a relative entity collection
def resolve(self, key, entity):
'''
Resolve a key to an Entity. The optional `entity` argument is the
"source" entity when resolving relationship targets.
'''
full_path = key
if '.' not in key or key.startswith('.'):
# relative target
# any leading dot is stripped and with each dot removed,
# the entity_module is stripped of one more chunk (starting with
# the last one).
num_dots = _leading_dots.match(full_path).end(1)
full_path = full_path[num_dots:]
chunks = entity.__module__.split('.')
chunkstokeep = len(chunks) - num_dots
if chunkstokeep < 0:
raise Exception("Couldn't resolve relative target "
"'%s' relative to '%s'" % (
key, entity.__module__))
entity_module = '.'.join(chunks[:chunkstokeep])
if entity_module and entity_module is not '__main__':
full_path = '%s.%s' % (entity_module, full_path)
root = ''
else:
root = None
return self.resolve_absolute(key, full_path, entity, root=root)
def __getattr__(self, key):
raise NotImplementedError
| mit | -6,948,553,760,769,409,000 | 34.21875 | 79 | 0.537045 | false |
a1ezzz/wasp-general | wasp_general/os/linux/lvm.py | 1 | 15583 | # -*- coding: utf-8 -*-
# wasp_general/os/linux/lvm.py
#
# Copyright (C) 2017 the wasp-general authors and contributors
# <see AUTHORS file>
#
# This file is part of wasp-general.
#
# Wasp-general is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Wasp-general is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with wasp-general. If not, see <http://www.gnu.org/licenses/>.
# TODO: document the code
# TODO: write tests for the code
# noinspection PyUnresolvedReferences
from wasp_general.version import __author__, __version__, __credits__, __license__, __copyright__, __email__
# noinspection PyUnresolvedReferences
from wasp_general.version import __status__
import subprocess
import os
import math
from wasp_general.verify import verify_type, verify_value
from wasp_general.os.linux.mounts import WMountPoint
class WLVMInfoCommand:
""" This is a helper, with which it is easier to call for pvdisplay, vgdisplay or lvdisplay program.
This class uses subprocess.check_output method for a program calling. And when non-zero code is returned by
the program, an subprocess.CalledProcessError exception is raised. There is a timeout for a program to be
complete. If a program wasn't completed for that period of time, subprocess.TimeoutExpired exception is
raised
"""
__lvm_cmd_default_timeout__ = 3
""" Default timeout for command to process
"""
@verify_type(command=str, fields_count=int, cmd_timeout=(int, float, None), sudo=bool)
@verify_value(cmd_timeout=lambda x: x is None or x > 0)
def __init__(self, command, fields_count, cmd_timeout=None, sudo=False):
""" Create new command
:param command: program to execute
:param fields_count: fields in a program output
:param cmd_timeout: timeout for a program (if it is None - then default value is used)
:param sudo: flag - whether to run this program with sudo or not
"""
self.__command = command
self.__fields_count = fields_count
self.__cmd_timeout = cmd_timeout if cmd_timeout is not None else self.__lvm_cmd_default_timeout__
self.__sudo = sudo
def command(self):
""" Return target program
:return: str
"""
return self.__command
def fields_count(self):
""" Return number of fields in a program output
:return: int
"""
return self.__fields_count
def cmd_timeout(self):
""" Timeout for a program to complete
:return: int, float
"""
return self.__cmd_timeout
def sudo(self):
""" Return 'sudo' flag (whether to run this program with sudo or not)
:return: bool
"""
return self.__sudo
@verify_type(name=(str, None))
def lvm_info(self, name=None):
""" Call a program
:param name: if specified - program will return information for that lvm-entity only. otherwise -
all available entries are returned
:return: tuple of str (fields)
"""
cmd = [] if self.sudo() is False else ['sudo']
cmd.extend([self.command(), '-c'])
if name is not None:
cmd.append(name)
output = subprocess.check_output(cmd, timeout=self.cmd_timeout())
output = output.decode()
result = []
fields_count = self.fields_count()
for line in output.split('\n'):
line = line.strip()
fields = line.split(':')
if len(fields) == fields_count:
result.append(fields)
if name is not None and len(result) != 1:
raise RuntimeError('Unable to parse command result')
return tuple(result)
class WLVMInfo:
""" Basic class for actual LVM information. This class creates :class:`.WLVMInfoCommand` object
which may be called on an object creation (it depends on constructor parameters)
"""
__lvm_info_cmd_timeout__ = 3
""" Timeout for a program to complete
"""
@verify_type('paranoid', command=str, fields_count=int, sudo=bool)
@verify_type(lvm_entity=(str, tuple, list, set))
@verify_value(lvm_entity=lambda x: len(x) > 0 if isinstance(x, str) else True)
def __init__(self, command, fields_count, lvm_entity, sudo=False):
""" Create new info-object
:param command: same as command in :meth:`.WLVMInfoCommand.__init__`
:param fields_count: same as fields_count in :meth:`.WLVMInfoCommand.__init__`
:param lvm_entity: if this is a list/tuple/set - then it is a collection of fields (collection length \
must be the same as 'fields_count'). If it is a string, then command is executed to get corresponding \
fields
:param sudo: same as sudo in :meth:`.WLVMInfoCommand.__init__`
"""
self.__lvm_command = WLVMInfoCommand(
command, fields_count, cmd_timeout=self.__class__.__lvm_info_cmd_timeout__, sudo=sudo
)
if isinstance(lvm_entity, (tuple, list, set)) is True:
if len(lvm_entity) != fields_count:
raise ValueError(
'Invalid lvm entity fields count: %i (expected: %i)' %
(len(lvm_entity), fields_count)
)
self.__lvm_entity = tuple(lvm_entity)
else:
self.__lvm_entity = (self.lvm_command().lvm_info(lvm_entity)[0])
def lvm_command(self):
""" Return LVM-command object
:return: WLVMInfoCommand
"""
return self.__lvm_command
def lvm_entity(self):
""" Return object fields
:return: tuple of str (fields)
"""
return self.__lvm_entity
class WPhysicalVolume(WLVMInfo):
""" Class represent a physical volume
"""
@verify_type('paranoid', physical_volume=(str, tuple, list, set), sudo=bool)
@verify_value('paranoid', physical_volume=lambda x: len(x) > 0 if isinstance(x, str) else True)
def __init__(self, physical_volume, sudo=False):
""" Create new physical volume descriptor
:param physical_volume: same as 'lvm_entity' in :meth:`.WLVMInfo.__init__`
:param sudo: same as 'sudo' in :meth:`.WLVMInfo.__init__`
"""
WLVMInfo.__init__(self, 'pvdisplay', 12, physical_volume, sudo=sudo)
def all(self):
""" Return every physical volume in the system
:return: tuple of WPhysicalVolume
"""
return tuple([WPhysicalVolume(x) for x in self.lvm_command().lvm_info()])
def device_name(self):
""" Return physical volume device name
:return: str
"""
return self.lvm_entity()[0]
def volume_group(self):
""" Return related volume group name (may be empty string if this volume is not allocated to any)
:return: str
"""
return self.lvm_entity()[1]
def sectors_count(self):
""" Return physical volume size in sectors
:return: int
"""
return int(self.lvm_entity()[2])
def extent_size(self):
""" Return physical extent size in kilobytes (may have 0 value if this volume is not allocated to any)
:return: int
"""
return int(self.lvm_entity()[7])
def total_extents(self):
""" Return total number of physical extents (may have 0 value if this volume is not allocated to any)
:return: int
"""
return int(self.lvm_entity()[8])
def free_extents(self):
""" Return free number of physical extents (may have 0 value if this volume is not allocated to any)
:return: int
"""
return int(self.lvm_entity()[9])
def allocated_extents(self):
""" Return allocated number of physical extents (may have 0 value if this volume is not allocated to \
any)
:return: int
"""
return int(self.lvm_entity()[10])
def uuid(self):
""" Return physical volume UUID
:return: str
"""
return self.lvm_entity()[11]
class WVolumeGroup(WLVMInfo):
""" Class represent a volume group
"""
@verify_type('paranoid', volume_group=(str, tuple, list, set), sudo=bool)
@verify_value('paranoid', volume_group=lambda x: len(x) > 0 if isinstance(x, str) else True)
def __init__(self, volume_group, sudo=False):
""" Create new volume group descriptor
:param volume_group: same as 'lvm_entity' in :meth:`.WLVMInfo.__init__`
:param sudo: same as 'sudo' in :meth:`.WLVMInfo.__init__`
"""
WLVMInfo.__init__(self, 'vgdisplay', 17, volume_group, sudo=sudo)
def all(self):
""" Return every volume group in the system
:return: tuple of WVolumeGroup
"""
return tuple([WVolumeGroup(x) for x in self.lvm_command().lvm_info()])
def group_name(self):
""" Return volume group name
:return: str
"""
return self.lvm_entity()[0]
def group_access(self):
""" Return volume group access
:return: str
"""
return self.lvm_entity()[1]
def maximum_logical_volumes(self):
""" Return maximum number of logical volumes (0 - for unlimited)
:return: int
"""
return int(self.lvm_entity()[4])
def logical_volumes(self):
""" Return current number of logical volumes
:return: int
"""
return int(self.lvm_entity()[5])
def opened_logical_volumes(self):
""" Return open count of all logical volumes in this volume group
:return: int
"""
return int(self.lvm_entity()[6])
def maximum_physical_volumes(self):
""" Return maximum number of physical volumes (0 - for unlimited)
:return: int
"""
return int(self.lvm_entity()[8])
def physical_volumes(self):
""" Return current number of physical volumes
:return: int
"""
return int(self.lvm_entity()[9])
def actual_volumes(self):
""" Return actual number of physical volumes
:return: int
"""
return int(self.lvm_entity()[10])
def size(self):
""" Return size of volume group in kilobytes
:return: int
"""
return int(self.lvm_entity()[11])
def extent_size(self):
""" Return physical extent size in kilobytes
:return: int
"""
return int(self.lvm_entity()[12])
def total_extents(self):
""" Return total number of physical extents for this volume group
:return: int
"""
return int(self.lvm_entity()[13])
def allocated_extents(self):
""" Return allocated number of physical extents for this volume group
:return: int
"""
return int(self.lvm_entity()[14])
def free_extents(self):
""" Return free number of physical extents for this volume group
:return: int
"""
return int(self.lvm_entity()[15])
def uuid(self):
""" Return UUID of volume group
:return: str
"""
return self.lvm_entity()[16]
class WLogicalVolume(WLVMInfo):
""" Class represent a logical volume
"""
__lvm_snapshot_create_cmd_timeout__ = 3
""" Timeout for snapshot creation command to complete
"""
__lvm_snapshot_remove_cmd_timeout__ = 3
""" Timeout for snapshot removing command to complete
"""
__lvm_snapshot_check_cmd_timeout__ = 3
""" Timeout for snapshot checking (getting parameters) command to complete
"""
__snapshot_maximum_allocation__ = 99.9
""" Maximum space usage for snapshot, till that value snapshot is treated as valid
"""
@verify_type('paranoid', logical_volume=(str, tuple, list, set), sudo=bool)
@verify_value('paranoid', logical_volume=lambda x: len(x) > 0 if isinstance(x, str) else True)
def __init__(self, logical_volume, sudo=False):
""" Create new logical volume descriptor
:param logical_volume: same as 'lvm_entity' in :meth:`.WLVMInfo.__init__`
:param sudo: same as 'sudo' in :meth:`.WLVMInfo.__init__`
"""
WLVMInfo.__init__(self, 'lvdisplay', 13, logical_volume, sudo=sudo)
def all(self):
""" Return every logical volume in the system
:return: tuple of WLogicalVolume
"""
return tuple([WLogicalVolume(x) for x in self.lvm_command().lvm_info()])
def volume_path(self):
""" Return logical volume path
:return: str
"""
return self.lvm_entity()[0]
def volume_name(self):
""" Return logical volume name
:return: str
"""
return os.path.basename(self.volume_path())
def volume_group_name(self):
""" Return volume group name
:return: str
"""
return self.lvm_entity()[1]
def volume_group(self):
""" Return volume group
:return: WVolumeGroup
"""
return WVolumeGroup(self.volume_group_name(), sudo=self.lvm_command().sudo())
def sectors_count(self):
""" Return logical volume size in sectors
:return: int
"""
return int(self.lvm_entity()[6])
def extents_count(self):
""" Return current logical extents associated to logical volume
:return: int
"""
return int(self.lvm_entity()[7])
def device_number(self):
""" Return tuple of major and minor device number of logical volume
:return: tuple of int
"""
return int(self.lvm_entity()[11]), int(self.lvm_entity()[12])
def uuid(self):
""" Return UUID of logical volume
:return: str
"""
uuid_file = '/sys/block/%s/dm/uuid' % os.path.basename(os.path.realpath(self.volume_path()))
lv_uuid = open(uuid_file).read().strip()
if lv_uuid.startswith('LVM-') is True:
return lv_uuid[4:]
return lv_uuid
@verify_type(snapshot_size=(int, float), snapshot_suffix=str)
@verify_value(snapshot_size=lambda x: x > 0, snapshot_suffix=lambda x: len(x) > 0)
def create_snapshot(self, snapshot_size, snapshot_suffix):
""" Create snapshot for this logical volume.
:param snapshot_size: size of newly created snapshot volume. This size is a fraction of the source \
logical volume space (of this logical volume)
:param snapshot_suffix: suffix for logical volume name (base part is the same as the original volume \
name)
:return: WLogicalVolume
"""
size_extent = math.ceil(self.extents_count() * snapshot_size)
size_kb = self.volume_group().extent_size() * size_extent
snapshot_name = self.volume_name() + snapshot_suffix
lvcreate_cmd = ['sudo'] if self.lvm_command().sudo() is True else []
lvcreate_cmd.extend([
'lvcreate', '-L', '%iK' % size_kb, '-s', '-n', snapshot_name, '-p', 'r', self.volume_path()
])
subprocess.check_output(lvcreate_cmd, timeout=self.__class__.__lvm_snapshot_create_cmd_timeout__)
return WLogicalVolume(self.volume_path() + snapshot_suffix, sudo=self.lvm_command().sudo())
def remove_volume(self):
""" Remove this volume
:return: None
"""
lvremove_cmd = ['sudo'] if self.lvm_command().sudo() is True else []
lvremove_cmd.extend(['lvremove', '-f', self.volume_path()])
subprocess.check_output(lvremove_cmd, timeout=self.__class__.__lvm_snapshot_remove_cmd_timeout__)
def snapshot_allocation(self):
""" Return allocated size (fraction of total snapshot volume space). If this is not a snapshot volume,
than RuntimeError exception is raised.
:return: float
"""
check_cmd = ['lvs', self.volume_path(), '-o', 'snap_percent', '--noheadings']
output = subprocess.check_output(check_cmd, timeout=self.__class__.__lvm_snapshot_check_cmd_timeout__)
output = output.decode().strip()
if len(output) == 0:
raise RuntimeError('Unable to check general logical volume')
return float(output.replace(',', '.', 1))
def snapshot_corrupted(self):
""" Check if this snapshot volume is corrupted or not
:return: bool (True if corrupted, False - otherwise)
"""
return self.snapshot_allocation() > self.__class__.__snapshot_maximum_allocation__
@classmethod
@verify_type('paranoid', file_path=str, sudo=bool)
@verify_value('paranoid', file_path=lambda x: len(x) > 0)
def logical_volume(cls, file_path, sudo=False):
""" Return logical volume that stores the given path
:param file_path: target path to search
:param sudo: same as 'sudo' in :meth:`.WLogicalVolume.__init__`
:return: WLogicalVolume or None (if file path is outside current mount points)
"""
mp = WMountPoint.mount_point(file_path)
if mp is not None:
name_file = '/sys/block/%s/dm/name' % mp.device_name()
if os.path.exists(name_file):
lv_path = '/dev/mapper/%s' % open(name_file).read().strip()
return WLogicalVolume(lv_path, sudo=sudo)
| lgpl-3.0 | -1,874,695,065,266,353,400 | 28.072761 | 108 | 0.687416 | false |
mountainpenguin/BySH | server/lib/tornado/web.py | 1 | 85300 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""``tornado.web`` provides a simple web framework with asynchronous
features that allow it to scale to large numbers of open connections,
making it ideal for `long polling
<http://en.wikipedia.org/wiki/Push_technology#Long_polling>`_.
Here is a simple "Hello, world" example app::
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
if __name__ == "__main__":
application = tornado.web.Application([
(r"/", MainHandler),
])
application.listen(8888)
tornado.ioloop.IOLoop.instance().start()
See the :doc:`Tornado overview <overview>` for more details and a good getting
started guide.
Thread-safety notes
-------------------
In general, methods on `RequestHandler` and elsewhere in Tornado are
not thread-safe. In particular, methods such as
`~RequestHandler.write()`, `~RequestHandler.finish()`, and
`~RequestHandler.flush()` must only be called from the main thread. If
you use multiple threads it is important to use `.IOLoop.add_callback`
to transfer control back to the main thread before finishing the
request.
"""
from __future__ import absolute_import, division, print_function, with_statement
import base64
import binascii
import datetime
import email.utils
import functools
import gzip
import hashlib
import hmac
import mimetypes
import numbers
import os.path
import re
import stat
import sys
import threading
import time
import tornado
import traceback
import types
import uuid
from lib.tornado import escape
from lib.tornado import httputil
from lib.tornado import locale
from lib.tornado.log import access_log, app_log, gen_log
from lib.tornado import stack_context
from lib.tornado import template
from lib.tornado.escape import utf8, _unicode
from lib.tornado.util import bytes_type, import_object, ObjectDict, raise_exc_info, unicode_type
try:
from io import BytesIO # python 3
except ImportError:
from cStringIO import StringIO as BytesIO # python 2
try:
import Cookie # py2
except ImportError:
import http.cookies as Cookie # py3
try:
import urlparse # py2
except ImportError:
import urllib.parse as urlparse # py3
try:
from urllib import urlencode # py2
except ImportError:
from urllib.parse import urlencode # py3
class RequestHandler(object):
"""Subclass this class and define `get()` or `post()` to make a handler.
If you want to support more methods than the standard GET/HEAD/POST, you
should override the class variable ``SUPPORTED_METHODS`` in your
`RequestHandler` subclass.
"""
SUPPORTED_METHODS = ("GET", "HEAD", "POST", "DELETE", "PATCH", "PUT",
"OPTIONS")
_template_loaders = {} # {path: template.BaseLoader}
_template_loader_lock = threading.Lock()
_remove_control_chars_regex = re.compile(r"[\x00-\x08\x0e-\x1f]")
def __init__(self, application, request, **kwargs):
super(RequestHandler, self).__init__()
self.application = application
self.request = request
self._headers_written = False
self._finished = False
self._auto_finish = True
self._transforms = None # will be set in _execute
self.path_args = None
self.path_kwargs = None
self.ui = ObjectDict((n, self._ui_method(m)) for n, m in
application.ui_methods.items())
# UIModules are available as both `modules` and `_modules` in the
# template namespace. Historically only `modules` was available
# but could be clobbered by user additions to the namespace.
# The template {% module %} directive looks in `_modules` to avoid
# possible conflicts.
self.ui["_modules"] = ObjectDict((n, self._ui_module(n, m)) for n, m in
application.ui_modules.items())
self.ui["modules"] = self.ui["_modules"]
self.clear()
# Check since connection is not available in WSGI
if getattr(self.request, "connection", None):
self.request.connection.set_close_callback(
self.on_connection_close)
self.initialize(**kwargs)
def initialize(self):
"""Hook for subclass initialization.
A dictionary passed as the third argument of a url spec will be
supplied as keyword arguments to initialize().
Example::
class ProfileHandler(RequestHandler):
def initialize(self, database):
self.database = database
def get(self, username):
...
app = Application([
(r'/user/(.*)', ProfileHandler, dict(database=database)),
])
"""
pass
@property
def settings(self):
"""An alias for `self.application.settings <Application.settings>`."""
return self.application.settings
def head(self, *args, **kwargs):
raise HTTPError(405)
def get(self, *args, **kwargs):
raise HTTPError(405)
def post(self, *args, **kwargs):
raise HTTPError(405)
def delete(self, *args, **kwargs):
raise HTTPError(405)
def patch(self, *args, **kwargs):
raise HTTPError(405)
def put(self, *args, **kwargs):
raise HTTPError(405)
def options(self, *args, **kwargs):
raise HTTPError(405)
def prepare(self):
"""Called at the beginning of a request before `get`/`post`/etc.
Override this method to perform common initialization regardless
of the request method.
"""
pass
def on_finish(self):
"""Called after the end of a request.
Override this method to perform cleanup, logging, etc.
This method is a counterpart to `prepare`. ``on_finish`` may
not produce any output, as it is called after the response
has been sent to the client.
"""
pass
def on_connection_close(self):
"""Called in async handlers if the client closed the connection.
Override this to clean up resources associated with
long-lived connections. Note that this method is called only if
the connection was closed during asynchronous processing; if you
need to do cleanup after every request override `on_finish`
instead.
Proxies may keep a connection open for a time (perhaps
indefinitely) after the client has gone away, so this method
may not be called promptly after the end user closes their
connection.
"""
pass
def clear(self):
"""Resets all headers and content for this response."""
self._headers = httputil.HTTPHeaders({
"Server": "TornadoServer/%s" % tornado.version,
"Content-Type": "text/html; charset=UTF-8",
"Date": httputil.format_timestamp(time.gmtime()),
})
self.set_default_headers()
if not self.request.supports_http_1_1():
if self.request.headers.get("Connection") == "Keep-Alive":
self.set_header("Connection", "Keep-Alive")
self._write_buffer = []
self._status_code = 200
self._reason = httputil.responses[200]
def set_default_headers(self):
"""Override this to set HTTP headers at the beginning of the request.
For example, this is the place to set a custom ``Server`` header.
Note that setting such headers in the normal flow of request
processing may not do what you want, since headers may be reset
during error handling.
"""
pass
def set_status(self, status_code, reason=None):
"""Sets the status code for our response.
:arg int status_code: Response status code. If ``reason`` is ``None``,
it must be present in `httplib.responses <http.client.responses>`.
:arg string reason: Human-readable reason phrase describing the status
code. If ``None``, it will be filled in from
`httplib.responses <http.client.responses>`.
"""
self._status_code = status_code
if reason is not None:
self._reason = escape.native_str(reason)
else:
try:
self._reason = httputil.responses[status_code]
except KeyError:
raise ValueError("unknown status code %d", status_code)
def get_status(self):
"""Returns the status code for our response."""
return self._status_code
def set_header(self, name, value):
"""Sets the given response header name and value.
If a datetime is given, we automatically format it according to the
HTTP specification. If the value is not a string, we convert it to
a string. All header values are then encoded as UTF-8.
"""
self._headers[name] = self._convert_header_value(value)
def add_header(self, name, value):
"""Adds the given response header and value.
Unlike `set_header`, `add_header` may be called multiple times
to return multiple values for the same header.
"""
self._headers.add(name, self._convert_header_value(value))
def clear_header(self, name):
"""Clears an outgoing header, undoing a previous `set_header` call.
Note that this method does not apply to multi-valued headers
set by `add_header`.
"""
if name in self._headers:
del self._headers[name]
def _convert_header_value(self, value):
if isinstance(value, bytes_type):
pass
elif isinstance(value, unicode_type):
value = value.encode('utf-8')
elif isinstance(value, numbers.Integral):
# return immediately since we know the converted value will be safe
return str(value)
elif isinstance(value, datetime.datetime):
return httputil.format_timestamp(value)
else:
raise TypeError("Unsupported header value %r" % value)
# If \n is allowed into the header, it is possible to inject
# additional headers or split the request. Also cap length to
# prevent obviously erroneous values.
if len(value) > 4000 or re.search(br"[\x00-\x1f]", value):
raise ValueError("Unsafe header value %r", value)
return value
_ARG_DEFAULT = []
def get_argument(self, name, default=_ARG_DEFAULT, strip=True):
"""Returns the value of the argument with the given name.
If default is not provided, the argument is considered to be
required, and we throw an HTTP 400 exception if it is missing.
If the argument appears in the url more than once, we return the
last value.
The returned value is always unicode.
"""
args = self.get_arguments(name, strip=strip)
if not args:
if default is self._ARG_DEFAULT:
raise HTTPError(400, "Missing argument %s" % name)
return default
return args[-1]
def get_arguments(self, name, strip=True):
"""Returns a list of the arguments with the given name.
If the argument is not present, returns an empty list.
The returned values are always unicode.
"""
values = []
for v in self.request.arguments.get(name, []):
v = self.decode_argument(v, name=name)
if isinstance(v, unicode_type):
# Get rid of any weird control chars (unless decoding gave
# us bytes, in which case leave it alone)
v = RequestHandler._remove_control_chars_regex.sub(" ", v)
if strip:
v = v.strip()
values.append(v)
return values
def decode_argument(self, value, name=None):
"""Decodes an argument from the request.
The argument has been percent-decoded and is now a byte string.
By default, this method decodes the argument as utf-8 and returns
a unicode string, but this may be overridden in subclasses.
This method is used as a filter for both `get_argument()` and for
values extracted from the url and passed to `get()`/`post()`/etc.
The name of the argument is provided if known, but may be None
(e.g. for unnamed groups in the url regex).
"""
return _unicode(value)
@property
def cookies(self):
"""An alias for `self.request.cookies <.httpserver.HTTPRequest.cookies>`."""
return self.request.cookies
def get_cookie(self, name, default=None):
"""Gets the value of the cookie with the given name, else default."""
if self.request.cookies is not None and name in self.request.cookies:
return self.request.cookies[name].value
return default
def set_cookie(self, name, value, domain=None, expires=None, path="/",
expires_days=None, **kwargs):
"""Sets the given cookie name/value with the given options.
Additional keyword arguments are set on the Cookie.Morsel
directly.
See http://docs.python.org/library/cookie.html#morsel-objects
for available attributes.
"""
# The cookie library only accepts type str, in both python 2 and 3
name = escape.native_str(name)
value = escape.native_str(value)
if re.search(r"[\x00-\x20]", name + value):
# Don't let us accidentally inject bad stuff
raise ValueError("Invalid cookie %r: %r" % (name, value))
if not hasattr(self, "_new_cookie"):
self._new_cookie = Cookie.SimpleCookie()
if name in self._new_cookie:
del self._new_cookie[name]
self._new_cookie[name] = value
morsel = self._new_cookie[name]
if domain:
morsel["domain"] = domain
if expires_days is not None and not expires:
expires = datetime.datetime.utcnow() + datetime.timedelta(
days=expires_days)
if expires:
morsel["expires"] = httputil.format_timestamp(expires)
if path:
morsel["path"] = path
for k, v in kwargs.items():
if k == 'max_age':
k = 'max-age'
morsel[k] = v
def clear_cookie(self, name, path="/", domain=None):
"""Deletes the cookie with the given name."""
expires = datetime.datetime.utcnow() - datetime.timedelta(days=365)
self.set_cookie(name, value="", path=path, expires=expires,
domain=domain)
def clear_all_cookies(self):
"""Deletes all the cookies the user sent with this request."""
for name in self.request.cookies:
self.clear_cookie(name)
def set_secure_cookie(self, name, value, expires_days=30, **kwargs):
"""Signs and timestamps a cookie so it cannot be forged.
You must specify the ``cookie_secret`` setting in your Application
to use this method. It should be a long, random sequence of bytes
to be used as the HMAC secret for the signature.
To read a cookie set with this method, use `get_secure_cookie()`.
Note that the ``expires_days`` parameter sets the lifetime of the
cookie in the browser, but is independent of the ``max_age_days``
parameter to `get_secure_cookie`.
Secure cookies may contain arbitrary byte values, not just unicode
strings (unlike regular cookies)
"""
self.set_cookie(name, self.create_signed_value(name, value),
expires_days=expires_days, **kwargs)
def create_signed_value(self, name, value):
"""Signs and timestamps a string so it cannot be forged.
Normally used via set_secure_cookie, but provided as a separate
method for non-cookie uses. To decode a value not stored
as a cookie use the optional value argument to get_secure_cookie.
"""
self.require_setting("cookie_secret", "secure cookies")
return create_signed_value(self.application.settings["cookie_secret"],
name, value)
def get_secure_cookie(self, name, value=None, max_age_days=31):
"""Returns the given signed cookie if it validates, or None.
The decoded cookie value is returned as a byte string (unlike
`get_cookie`).
"""
self.require_setting("cookie_secret", "secure cookies")
if value is None:
value = self.get_cookie(name)
return decode_signed_value(self.application.settings["cookie_secret"],
name, value, max_age_days=max_age_days)
def redirect(self, url, permanent=False, status=None):
"""Sends a redirect to the given (optionally relative) URL.
If the ``status`` argument is specified, that value is used as the
HTTP status code; otherwise either 301 (permanent) or 302
(temporary) is chosen based on the ``permanent`` argument.
The default is 302 (temporary).
"""
if self._headers_written:
raise Exception("Cannot redirect after headers have been written")
if status is None:
status = 301 if permanent else 302
else:
assert isinstance(status, int) and 300 <= status <= 399
self.set_status(status)
# Remove whitespace
url = re.sub(br"[\x00-\x20]+", "", utf8(url))
self.set_header("Location", urlparse.urljoin(utf8(self.request.uri),
url))
self.finish()
def write(self, chunk):
"""Writes the given chunk to the output buffer.
To write the output to the network, use the flush() method below.
If the given chunk is a dictionary, we write it as JSON and set
the Content-Type of the response to be ``application/json``.
(if you want to send JSON as a different ``Content-Type``, call
set_header *after* calling write()).
Note that lists are not converted to JSON because of a potential
cross-site security vulnerability. All JSON output should be
wrapped in a dictionary. More details at
http://haacked.com/archive/2008/11/20/anatomy-of-a-subtle-json-vulnerability.aspx
"""
if self._finished:
raise RuntimeError("Cannot write() after finish(). May be caused "
"by using async operations without the "
"@asynchronous decorator.")
if isinstance(chunk, dict):
chunk = escape.json_encode(chunk)
self.set_header("Content-Type", "application/json; charset=UTF-8")
chunk = utf8(chunk)
self._write_buffer.append(chunk)
def render(self, template_name, **kwargs):
"""Renders the template with the given arguments as the response."""
html = self.render_string(template_name, **kwargs)
# Insert the additional JS and CSS added by the modules on the page
js_embed = []
js_files = []
css_embed = []
css_files = []
html_heads = []
html_bodies = []
for module in getattr(self, "_active_modules", {}).values():
embed_part = module.embedded_javascript()
if embed_part:
js_embed.append(utf8(embed_part))
file_part = module.javascript_files()
if file_part:
if isinstance(file_part, (unicode_type, bytes_type)):
js_files.append(file_part)
else:
js_files.extend(file_part)
embed_part = module.embedded_css()
if embed_part:
css_embed.append(utf8(embed_part))
file_part = module.css_files()
if file_part:
if isinstance(file_part, (unicode_type, bytes_type)):
css_files.append(file_part)
else:
css_files.extend(file_part)
head_part = module.html_head()
if head_part:
html_heads.append(utf8(head_part))
body_part = module.html_body()
if body_part:
html_bodies.append(utf8(body_part))
def is_absolute(path):
return any(path.startswith(x) for x in ["/", "http:", "https:"])
if js_files:
# Maintain order of JavaScript files given by modules
paths = []
unique_paths = set()
for path in js_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
js = ''.join('<script src="' + escape.xhtml_escape(p) +
'" type="text/javascript"></script>'
for p in paths)
sloc = html.rindex(b'</body>')
html = html[:sloc] + utf8(js) + b'\n' + html[sloc:]
if js_embed:
js = b'<script type="text/javascript">\n//<![CDATA[\n' + \
b'\n'.join(js_embed) + b'\n//]]>\n</script>'
sloc = html.rindex(b'</body>')
html = html[:sloc] + js + b'\n' + html[sloc:]
if css_files:
paths = []
unique_paths = set()
for path in css_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
css = ''.join('<link href="' + escape.xhtml_escape(p) + '" '
'type="text/css" rel="stylesheet"/>'
for p in paths)
hloc = html.index(b'</head>')
html = html[:hloc] + utf8(css) + b'\n' + html[hloc:]
if css_embed:
css = b'<style type="text/css">\n' + b'\n'.join(css_embed) + \
b'\n</style>'
hloc = html.index(b'</head>')
html = html[:hloc] + css + b'\n' + html[hloc:]
if html_heads:
hloc = html.index(b'</head>')
html = html[:hloc] + b''.join(html_heads) + b'\n' + html[hloc:]
if html_bodies:
hloc = html.index(b'</body>')
html = html[:hloc] + b''.join(html_bodies) + b'\n' + html[hloc:]
self.finish(html)
def render_string(self, template_name, **kwargs):
"""Generate the given template with the given arguments.
We return the generated byte string (in utf8). To generate and
write a template as a response, use render() above.
"""
# If no template_path is specified, use the path of the calling file
template_path = self.get_template_path()
if not template_path:
frame = sys._getframe(0)
web_file = frame.f_code.co_filename
while frame.f_code.co_filename == web_file:
frame = frame.f_back
template_path = os.path.dirname(frame.f_code.co_filename)
with RequestHandler._template_loader_lock:
if template_path not in RequestHandler._template_loaders:
loader = self.create_template_loader(template_path)
RequestHandler._template_loaders[template_path] = loader
else:
loader = RequestHandler._template_loaders[template_path]
t = loader.load(template_name)
namespace = self.get_template_namespace()
namespace.update(kwargs)
return t.generate(**namespace)
def get_template_namespace(self):
"""Returns a dictionary to be used as the default template namespace.
May be overridden by subclasses to add or modify values.
The results of this method will be combined with additional
defaults in the `tornado.template` module and keyword arguments
to `render` or `render_string`.
"""
namespace = dict(
handler=self,
request=self.request,
current_user=self.current_user,
locale=self.locale,
_=self.locale.translate,
static_url=self.static_url,
xsrf_form_html=self.xsrf_form_html,
reverse_url=self.reverse_url
)
namespace.update(self.ui)
return namespace
def create_template_loader(self, template_path):
"""Returns a new template loader for the given path.
May be overridden by subclasses. By default returns a
directory-based loader on the given path, using the
``autoescape`` application setting. If a ``template_loader``
application setting is supplied, uses that instead.
"""
settings = self.application.settings
if "template_loader" in settings:
return settings["template_loader"]
kwargs = {}
if "autoescape" in settings:
# autoescape=None means "no escaping", so we have to be sure
# to only pass this kwarg if the user asked for it.
kwargs["autoescape"] = settings["autoescape"]
return template.Loader(template_path, **kwargs)
def flush(self, include_footers=False, callback=None):
"""Flushes the current output buffer to the network.
The ``callback`` argument, if given, can be used for flow control:
it will be run when all flushed data has been written to the socket.
Note that only one flush callback can be outstanding at a time;
if another flush occurs before the previous flush's callback
has been run, the previous callback will be discarded.
"""
if self.application._wsgi:
raise Exception("WSGI applications do not support flush()")
chunk = b"".join(self._write_buffer)
self._write_buffer = []
if not self._headers_written:
self._headers_written = True
for transform in self._transforms:
self._status_code, self._headers, chunk = \
transform.transform_first_chunk(
self._status_code, self._headers, chunk, include_footers)
headers = self._generate_headers()
else:
for transform in self._transforms:
chunk = transform.transform_chunk(chunk, include_footers)
headers = b""
# Ignore the chunk and only write the headers for HEAD requests
if self.request.method == "HEAD":
if headers:
self.request.write(headers, callback=callback)
return
self.request.write(headers + chunk, callback=callback)
def finish(self, chunk=None):
"""Finishes this response, ending the HTTP request."""
if self._finished:
raise RuntimeError("finish() called twice. May be caused "
"by using async operations without the "
"@asynchronous decorator.")
if chunk is not None:
self.write(chunk)
# Automatically support ETags and add the Content-Length header if
# we have not flushed any content yet.
if not self._headers_written:
if (self._status_code == 200 and
self.request.method in ("GET", "HEAD") and
"Etag" not in self._headers):
etag = self.compute_etag()
if etag is not None:
self.set_header("Etag", etag)
inm = self.request.headers.get("If-None-Match")
if inm and inm.find(etag) != -1:
self._write_buffer = []
self.set_status(304)
if self._status_code == 304:
assert not self._write_buffer, "Cannot send body with 304"
self._clear_headers_for_304()
elif "Content-Length" not in self._headers:
content_length = sum(len(part) for part in self._write_buffer)
self.set_header("Content-Length", content_length)
if hasattr(self.request, "connection"):
# Now that the request is finished, clear the callback we
# set on the IOStream (which would otherwise prevent the
# garbage collection of the RequestHandler when there
# are keepalive connections)
self.request.connection.stream.set_close_callback(None)
if not self.application._wsgi:
self.flush(include_footers=True)
self.request.finish()
self._log()
self._finished = True
self.on_finish()
def send_error(self, status_code=500, **kwargs):
"""Sends the given HTTP error code to the browser.
If `flush()` has already been called, it is not possible to send
an error, so this method will simply terminate the response.
If output has been written but not yet flushed, it will be discarded
and replaced with the error page.
Override `write_error()` to customize the error page that is returned.
Additional keyword arguments are passed through to `write_error`.
"""
if self._headers_written:
gen_log.error("Cannot send error response after headers written")
if not self._finished:
self.finish()
return
self.clear()
reason = None
if 'exc_info' in kwargs:
exception = kwargs['exc_info'][1]
if isinstance(exception, HTTPError) and exception.reason:
reason = exception.reason
self.set_status(status_code, reason=reason)
try:
self.write_error(status_code, **kwargs)
except Exception:
app_log.error("Uncaught exception in write_error", exc_info=True)
if not self._finished:
self.finish()
def write_error(self, status_code, **kwargs):
"""Override to implement custom error pages.
``write_error`` may call `write`, `render`, `set_header`, etc
to produce output as usual.
If this error was caused by an uncaught exception (including
HTTPError), an ``exc_info`` triple will be available as
``kwargs["exc_info"]``. Note that this exception may not be
the "current" exception for purposes of methods like
``sys.exc_info()`` or ``traceback.format_exc``.
For historical reasons, if a method ``get_error_html`` exists,
it will be used instead of the default ``write_error`` implementation.
``get_error_html`` returned a string instead of producing output
normally, and had different semantics for exception handling.
Users of ``get_error_html`` are encouraged to convert their code
to override ``write_error`` instead.
"""
if hasattr(self, 'get_error_html'):
if 'exc_info' in kwargs:
exc_info = kwargs.pop('exc_info')
kwargs['exception'] = exc_info[1]
try:
# Put the traceback into sys.exc_info()
raise_exc_info(exc_info)
except Exception:
self.finish(self.get_error_html(status_code, **kwargs))
else:
self.finish(self.get_error_html(status_code, **kwargs))
return
if self.settings.get("debug") and "exc_info" in kwargs:
# in debug mode, try to send a traceback
self.set_header('Content-Type', 'text/plain')
for line in traceback.format_exception(*kwargs["exc_info"]):
self.write(line)
self.finish()
else:
self.finish("<html><title>%(code)d: %(message)s</title>"
"<body>%(code)d: %(message)s</body></html>" % {
"code": status_code,
"message": self._reason,
})
@property
def locale(self):
"""The local for the current session.
Determined by either `get_user_locale`, which you can override to
set the locale based on, e.g., a user preference stored in a
database, or `get_browser_locale`, which uses the ``Accept-Language``
header.
"""
if not hasattr(self, "_locale"):
self._locale = self.get_user_locale()
if not self._locale:
self._locale = self.get_browser_locale()
assert self._locale
return self._locale
def get_user_locale(self):
"""Override to determine the locale from the authenticated user.
If None is returned, we fall back to `get_browser_locale()`.
This method should return a `tornado.locale.Locale` object,
most likely obtained via a call like ``tornado.locale.get("en")``
"""
return None
def get_browser_locale(self, default="en_US"):
"""Determines the user's locale from ``Accept-Language`` header.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4
"""
if "Accept-Language" in self.request.headers:
languages = self.request.headers["Accept-Language"].split(",")
locales = []
for language in languages:
parts = language.strip().split(";")
if len(parts) > 1 and parts[1].startswith("q="):
try:
score = float(parts[1][2:])
except (ValueError, TypeError):
score = 0.0
else:
score = 1.0
locales.append((parts[0], score))
if locales:
locales.sort(key=lambda pair: pair[1], reverse=True)
codes = [l[0] for l in locales]
return locale.get(*codes)
return locale.get(default)
@property
def current_user(self):
"""The authenticated user for this request.
This is a cached version of `get_current_user`, which you can
override to set the user based on, e.g., a cookie. If that
method is not overridden, this method always returns None.
We lazy-load the current user the first time this method is called
and cache the result after that.
"""
if not hasattr(self, "_current_user"):
self._current_user = self.get_current_user()
return self._current_user
def get_current_user(self):
"""Override to determine the current user from, e.g., a cookie."""
return None
def get_login_url(self):
"""Override to customize the login URL based on the request.
By default, we use the ``login_url`` application setting.
"""
self.require_setting("login_url", "@tornado.web.authenticated")
return self.application.settings["login_url"]
def get_template_path(self):
"""Override to customize template path for each handler.
By default, we use the ``template_path`` application setting.
Return None to load templates relative to the calling file.
"""
return self.application.settings.get("template_path")
@property
def xsrf_token(self):
"""The XSRF-prevention token for the current user/session.
To prevent cross-site request forgery, we set an '_xsrf' cookie
and include the same '_xsrf' value as an argument with all POST
requests. If the two do not match, we reject the form submission
as a potential forgery.
See http://en.wikipedia.org/wiki/Cross-site_request_forgery
"""
if not hasattr(self, "_xsrf_token"):
token = self.get_cookie("_xsrf")
if not token:
token = binascii.b2a_hex(uuid.uuid4().bytes)
expires_days = 30 if self.current_user else None
self.set_cookie("_xsrf", token, expires_days=expires_days)
self._xsrf_token = token
return self._xsrf_token
def check_xsrf_cookie(self):
"""Verifies that the ``_xsrf`` cookie matches the ``_xsrf`` argument.
To prevent cross-site request forgery, we set an ``_xsrf``
cookie and include the same value as a non-cookie
field with all ``POST`` requests. If the two do not match, we
reject the form submission as a potential forgery.
The ``_xsrf`` value may be set as either a form field named ``_xsrf``
or in a custom HTTP header named ``X-XSRFToken`` or ``X-CSRFToken``
(the latter is accepted for compatibility with Django).
See http://en.wikipedia.org/wiki/Cross-site_request_forgery
Prior to release 1.1.1, this check was ignored if the HTTP header
``X-Requested-With: XMLHTTPRequest`` was present. This exception
has been shown to be insecure and has been removed. For more
information please see
http://www.djangoproject.com/weblog/2011/feb/08/security/
http://weblog.rubyonrails.org/2011/2/8/csrf-protection-bypass-in-ruby-on-rails
"""
token = (self.get_argument("_xsrf", None) or
self.request.headers.get("X-Xsrftoken") or
self.request.headers.get("X-Csrftoken"))
if not token:
raise HTTPError(403, "'_xsrf' argument missing from POST")
if self.xsrf_token != token:
raise HTTPError(403, "XSRF cookie does not match POST argument")
def xsrf_form_html(self):
"""An HTML ``<input/>`` element to be included with all POST forms.
It defines the ``_xsrf`` input value, which we check on all POST
requests to prevent cross-site request forgery. If you have set
the ``xsrf_cookies`` application setting, you must include this
HTML within all of your HTML forms.
In a template, this method should be called with ``{% module
xsrf_form_html() %}``
See `check_xsrf_cookie()` above for more information.
"""
return '<input type="hidden" name="_xsrf" value="' + \
escape.xhtml_escape(self.xsrf_token) + '"/>'
def static_url(self, path, include_host=None):
"""Returns a static URL for the given relative static file path.
This method requires you set the ``static_path`` setting in your
application (which specifies the root directory of your static
files).
We append ``?v=<signature>`` to the returned URL, which makes our
static file handler set an infinite expiration header on the
returned content. The signature is based on the content of the
file.
By default this method returns URLs relative to the current
host, but if ``include_host`` is true the URL returned will be
absolute. If this handler has an ``include_host`` attribute,
that value will be used as the default for all `static_url`
calls that do not pass ``include_host`` as a keyword argument.
"""
self.require_setting("static_path", "static_url")
static_handler_class = self.settings.get(
"static_handler_class", StaticFileHandler)
if include_host is None:
include_host = getattr(self, "include_host", False)
if include_host:
base = self.request.protocol + "://" + self.request.host
else:
base = ""
return base + static_handler_class.make_static_url(self.settings, path)
def async_callback(self, callback, *args, **kwargs):
"""Obsolete - catches exceptions from the wrapped function.
This function is unnecessary since Tornado 1.1.
"""
if callback is None:
return None
if args or kwargs:
callback = functools.partial(callback, *args, **kwargs)
def wrapper(*args, **kwargs):
try:
return callback(*args, **kwargs)
except Exception as e:
if self._headers_written:
app_log.error("Exception after headers written",
exc_info=True)
else:
self._handle_request_exception(e)
return wrapper
def require_setting(self, name, feature="this feature"):
"""Raises an exception if the given app setting is not defined."""
if not self.application.settings.get(name):
raise Exception("You must define the '%s' setting in your "
"application to use %s" % (name, feature))
def reverse_url(self, name, *args):
"""Alias for `Application.reverse_url`."""
return self.application.reverse_url(name, *args)
def compute_etag(self):
"""Computes the etag header to be used for this request.
May be overridden to provide custom etag implementations,
or may return None to disable tornado's default etag support.
"""
hasher = hashlib.sha1()
for part in self._write_buffer:
hasher.update(part)
return '"%s"' % hasher.hexdigest()
def _stack_context_handle_exception(self, type, value, traceback):
try:
# For historical reasons _handle_request_exception only takes
# the exception value instead of the full triple,
# so re-raise the exception to ensure that it's in
# sys.exc_info()
raise_exc_info((type, value, traceback))
except Exception:
self._handle_request_exception(value)
return True
def _execute(self, transforms, *args, **kwargs):
"""Executes this request with the given output transforms."""
self._transforms = transforms
try:
if self.request.method not in self.SUPPORTED_METHODS:
raise HTTPError(405)
self.path_args = [self.decode_argument(arg) for arg in args]
self.path_kwargs = dict((k, self.decode_argument(v, name=k))
for (k, v) in kwargs.items())
# If XSRF cookies are turned on, reject form submissions without
# the proper cookie
if self.request.method not in ("GET", "HEAD", "OPTIONS") and \
self.application.settings.get("xsrf_cookies"):
self.check_xsrf_cookie()
self.prepare()
if not self._finished:
getattr(self, self.request.method.lower())(
*self.path_args, **self.path_kwargs)
if self._auto_finish and not self._finished:
self.finish()
except Exception as e:
self._handle_request_exception(e)
def _generate_headers(self):
reason = self._reason
lines = [utf8(self.request.version + " " +
str(self._status_code) +
" " + reason)]
lines.extend([utf8(n) + b": " + utf8(v) for n, v in self._headers.get_all()])
if hasattr(self, "_new_cookie"):
for cookie in self._new_cookie.values():
lines.append(utf8("Set-Cookie: " + cookie.OutputString(None)))
return b"\r\n".join(lines) + b"\r\n\r\n"
def _log(self):
"""Logs the current request.
Sort of deprecated since this functionality was moved to the
Application, but left in place for the benefit of existing apps
that have overridden this method.
"""
self.application.log_request(self)
def _request_summary(self):
return self.request.method + " " + self.request.uri + \
" (" + self.request.remote_ip + ")"
def _handle_request_exception(self, e):
if isinstance(e, HTTPError):
if e.log_message:
format = "%d %s: " + e.log_message
args = [e.status_code, self._request_summary()] + list(e.args)
gen_log.warning(format, *args)
if e.status_code not in httputil.responses and not e.reason:
gen_log.error("Bad HTTP status code: %d", e.status_code)
self.send_error(500, exc_info=sys.exc_info())
else:
self.send_error(e.status_code, exc_info=sys.exc_info())
else:
app_log.error("Uncaught exception %s\n%r", self._request_summary(),
self.request, exc_info=True)
self.send_error(500, exc_info=sys.exc_info())
def _ui_module(self, name, module):
def render(*args, **kwargs):
if not hasattr(self, "_active_modules"):
self._active_modules = {}
if name not in self._active_modules:
self._active_modules[name] = module(self)
rendered = self._active_modules[name].render(*args, **kwargs)
return rendered
return render
def _ui_method(self, method):
return lambda *args, **kwargs: method(self, *args, **kwargs)
def _clear_headers_for_304(self):
# 304 responses should not contain entity headers (defined in
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.1)
# not explicitly allowed by
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
headers = ["Allow", "Content-Encoding", "Content-Language",
"Content-Length", "Content-MD5", "Content-Range",
"Content-Type", "Last-Modified"]
for h in headers:
self.clear_header(h)
def asynchronous(method):
"""Wrap request handler methods with this if they are asynchronous.
If this decorator is given, the response is not finished when the
method returns. It is up to the request handler to call
`self.finish() <RequestHandler.finish>` to finish the HTTP
request. Without this decorator, the request is automatically
finished when the ``get()`` or ``post()`` method returns. Example::
class MyRequestHandler(web.RequestHandler):
@web.asynchronous
def get(self):
http = httpclient.AsyncHTTPClient()
http.fetch("http://friendfeed.com/", self._on_download)
def _on_download(self, response):
self.write("Downloaded!")
self.finish()
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if self.application._wsgi:
raise Exception("@asynchronous is not supported for WSGI apps")
self._auto_finish = False
with stack_context.ExceptionStackContext(
self._stack_context_handle_exception):
return method(self, *args, **kwargs)
return wrapper
def removeslash(method):
"""Use this decorator to remove trailing slashes from the request path.
For example, a request to ``/foo/`` would redirect to ``/foo`` with this
decorator. Your request handler mapping should use a regular expression
like ``r'/foo/*'`` in conjunction with using the decorator.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD"):
uri = self.request.path.rstrip("/")
if uri: # don't try to redirect '/' to ''
if self.request.query:
uri += "?" + self.request.query
self.redirect(uri, permanent=True)
return
else:
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
def addslash(method):
"""Use this decorator to add a missing trailing slash to the request path.
For example, a request to ``/foo`` would redirect to ``/foo/`` with this
decorator. Your request handler mapping should use a regular expression
like ``r'/foo/?'`` in conjunction with using the decorator.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD"):
uri = self.request.path + "/"
if self.request.query:
uri += "?" + self.request.query
self.redirect(uri, permanent=True)
return
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
class Application(object):
"""A collection of request handlers that make up a web application.
Instances of this class are callable and can be passed directly to
HTTPServer to serve the application::
application = web.Application([
(r"/", MainPageHandler),
])
http_server = httpserver.HTTPServer(application)
http_server.listen(8080)
ioloop.IOLoop.instance().start()
The constructor for this class takes in a list of `URLSpec` objects
or (regexp, request_class) tuples. When we receive requests, we
iterate over the list in order and instantiate an instance of the
first request class whose regexp matches the request path.
Each tuple can contain an optional third element, which should be
a dictionary if it is present. That dictionary is passed as
keyword arguments to the contructor of the handler. This pattern
is used for the `StaticFileHandler` in this example (note that a
`StaticFileHandler` can be installed automatically with the
static_path setting described below)::
application = web.Application([
(r"/static/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
])
We support virtual hosts with the `add_handlers` method, which takes in
a host regular expression as the first argument::
application.add_handlers(r"www\.myhost\.com", [
(r"/article/([0-9]+)", ArticleHandler),
])
You can serve static files by sending the ``static_path`` setting
as a keyword argument. We will serve those files from the
``/static/`` URI (this is configurable with the
``static_url_prefix`` setting), and we will serve ``/favicon.ico``
and ``/robots.txt`` from the same directory. A custom subclass of
`StaticFileHandler` can be specified with the
``static_handler_class`` setting.
"""
def __init__(self, handlers=None, default_host="", transforms=None,
wsgi=False, **settings):
if transforms is None:
self.transforms = []
if settings.get("gzip"):
self.transforms.append(GZipContentEncoding)
self.transforms.append(ChunkedTransferEncoding)
else:
self.transforms = transforms
self.handlers = []
self.named_handlers = {}
self.default_host = default_host
self.settings = settings
self.ui_modules = {'linkify': _linkify,
'xsrf_form_html': _xsrf_form_html,
'Template': TemplateModule,
}
self.ui_methods = {}
self._wsgi = wsgi
self._load_ui_modules(settings.get("ui_modules", {}))
self._load_ui_methods(settings.get("ui_methods", {}))
if self.settings.get("static_path"):
path = self.settings["static_path"]
handlers = list(handlers or [])
static_url_prefix = settings.get("static_url_prefix",
"/static/")
static_handler_class = settings.get("static_handler_class",
StaticFileHandler)
static_handler_args = settings.get("static_handler_args", {})
static_handler_args['path'] = path
for pattern in [re.escape(static_url_prefix) + r"(.*)",
r"/(favicon\.ico)", r"/(robots\.txt)"]:
handlers.insert(0, (pattern, static_handler_class,
static_handler_args))
if handlers:
self.add_handlers(".*$", handlers)
# Automatically reload modified modules
if self.settings.get("debug") and not wsgi:
from lib.tornado import autoreload
autoreload.start()
def listen(self, port, address="", **kwargs):
"""Starts an HTTP server for this application on the given port.
This is a convenience alias for creating an `.HTTPServer`
object and calling its listen method. Keyword arguments not
supported by `HTTPServer.listen <.TCPServer.listen>` are passed to the
`.HTTPServer` constructor. For advanced uses
(e.g. multi-process mode), do not use this method; create an
`.HTTPServer` and call its
`.TCPServer.bind`/`.TCPServer.start` methods directly.
Note that after calling this method you still need to call
``IOLoop.instance().start()`` to start the server.
"""
# import is here rather than top level because HTTPServer
# is not importable on appengine
from lib.tornado.httpserver import HTTPServer
server = HTTPServer(self, **kwargs)
server.listen(port, address)
def add_handlers(self, host_pattern, host_handlers):
"""Appends the given handlers to our handler list.
Host patterns are processed sequentially in the order they were
added. All matching patterns will be considered.
"""
if not host_pattern.endswith("$"):
host_pattern += "$"
handlers = []
# The handlers with the wildcard host_pattern are a special
# case - they're added in the constructor but should have lower
# precedence than the more-precise handlers added later.
# If a wildcard handler group exists, it should always be last
# in the list, so insert new groups just before it.
if self.handlers and self.handlers[-1][0].pattern == '.*$':
self.handlers.insert(-1, (re.compile(host_pattern), handlers))
else:
self.handlers.append((re.compile(host_pattern), handlers))
for spec in host_handlers:
if isinstance(spec, type(())):
assert len(spec) in (2, 3)
pattern = spec[0]
handler = spec[1]
if isinstance(handler, str):
# import the Module and instantiate the class
# Must be a fully qualified name (module.ClassName)
handler = import_object(handler)
if len(spec) == 3:
kwargs = spec[2]
else:
kwargs = {}
spec = URLSpec(pattern, handler, kwargs)
handlers.append(spec)
if spec.name:
if spec.name in self.named_handlers:
app_log.warning(
"Multiple handlers named %s; replacing previous value",
spec.name)
self.named_handlers[spec.name] = spec
def add_transform(self, transform_class):
self.transforms.append(transform_class)
def _get_host_handlers(self, request):
host = request.host.lower().split(':')[0]
matches = []
for pattern, handlers in self.handlers:
if pattern.match(host):
matches.extend(handlers)
# Look for default host if not behind load balancer (for debugging)
if not matches and "X-Real-Ip" not in request.headers:
for pattern, handlers in self.handlers:
if pattern.match(self.default_host):
matches.extend(handlers)
return matches or None
def _load_ui_methods(self, methods):
if isinstance(methods, types.ModuleType):
self._load_ui_methods(dict((n, getattr(methods, n))
for n in dir(methods)))
elif isinstance(methods, list):
for m in methods:
self._load_ui_methods(m)
else:
for name, fn in methods.items():
if not name.startswith("_") and hasattr(fn, "__call__") \
and name[0].lower() == name[0]:
self.ui_methods[name] = fn
def _load_ui_modules(self, modules):
if isinstance(modules, types.ModuleType):
self._load_ui_modules(dict((n, getattr(modules, n))
for n in dir(modules)))
elif isinstance(modules, list):
for m in modules:
self._load_ui_modules(m)
else:
assert isinstance(modules, dict)
for name, cls in modules.items():
try:
if issubclass(cls, UIModule):
self.ui_modules[name] = cls
except TypeError:
pass
def __call__(self, request):
"""Called by HTTPServer to execute the request."""
transforms = [t(request) for t in self.transforms]
handler = None
args = []
kwargs = {}
handlers = self._get_host_handlers(request)
if not handlers:
handler = RedirectHandler(
self, request, url="http://" + self.default_host + "/")
else:
for spec in handlers:
match = spec.regex.match(request.path)
if match:
handler = spec.handler_class(self, request, **spec.kwargs)
if spec.regex.groups:
# None-safe wrapper around url_unescape to handle
# unmatched optional groups correctly
def unquote(s):
if s is None:
return s
return escape.url_unescape(s, encoding=None)
# Pass matched groups to the handler. Since
# match.groups() includes both named and unnamed groups,
# we want to use either groups or groupdict but not both.
# Note that args are passed as bytes so the handler can
# decide what encoding to use.
if spec.regex.groupindex:
kwargs = dict(
(str(k), unquote(v))
for (k, v) in match.groupdict().items())
else:
args = [unquote(s) for s in match.groups()]
break
if not handler:
handler = ErrorHandler(self, request, status_code=404)
# In debug mode, re-compile templates and reload static files on every
# request so you don't need to restart to see changes
if self.settings.get("debug"):
with RequestHandler._template_loader_lock:
for loader in RequestHandler._template_loaders.values():
loader.reset()
StaticFileHandler.reset()
handler._execute(transforms, *args, **kwargs)
return handler
def reverse_url(self, name, *args):
"""Returns a URL path for handler named ``name``
The handler must be added to the application as a named `URLSpec`.
Args will be substituted for capturing groups in the `URLSpec` regex.
They will be converted to strings if necessary, encoded as utf8,
and url-escaped.
"""
if name in self.named_handlers:
return self.named_handlers[name].reverse(*args)
raise KeyError("%s not found in named urls" % name)
def log_request(self, handler):
"""Writes a completed HTTP request to the logs.
By default writes to the python root logger. To change
this behavior either subclass Application and override this method,
or pass a function in the application settings dictionary as
``log_function``.
"""
if "log_function" in self.settings:
self.settings["log_function"](handler)
return
if handler.get_status() < 400:
log_method = access_log.info
elif handler.get_status() < 500:
log_method = access_log.warning
else:
log_method = access_log.error
request_time = 1000.0 * handler.request.request_time()
log_method("%d %s %.2fms", handler.get_status(),
handler._request_summary(), request_time)
class HTTPError(Exception):
"""An exception that will turn into an HTTP error response.
Raising an `HTTPError` is a convenient alternative to calling
`RequestHandler.send_error` since it automatically ends the
current function.
:arg int status_code: HTTP status code. Must be listed in
`httplib.responses <http.client.responses>` unless the ``reason``
keyword argument is given.
:arg string log_message: Message to be written to the log for this error
(will not be shown to the user unless the `Application` is in debug
mode). May contain ``%s``-style placeholders, which will be filled
in with remaining positional parameters.
:arg string reason: Keyword-only argument. The HTTP "reason" phrase
to pass in the status line along with ``status_code``. Normally
determined automatically from ``status_code``, but can be used
to use a non-standard numeric code.
"""
def __init__(self, status_code, log_message=None, *args, **kwargs):
self.status_code = status_code
self.log_message = log_message
self.args = args
self.reason = kwargs.get('reason', None)
def __str__(self):
message = "HTTP %d: %s" % (
self.status_code,
self.reason or httputil.responses.get(self.status_code, 'Unknown'))
if self.log_message:
return message + " (" + (self.log_message % self.args) + ")"
else:
return message
class ErrorHandler(RequestHandler):
"""Generates an error response with ``status_code`` for all requests."""
def initialize(self, status_code):
self.set_status(status_code)
def prepare(self):
raise HTTPError(self._status_code)
def check_xsrf_cookie(self):
# POSTs to an ErrorHandler don't actually have side effects,
# so we don't need to check the xsrf token. This allows POSTs
# to the wrong url to return a 404 instead of 403.
pass
class RedirectHandler(RequestHandler):
"""Redirects the client to the given URL for all GET requests.
You should provide the keyword argument ``url`` to the handler, e.g.::
application = web.Application([
(r"/oldpath", web.RedirectHandler, {"url": "/newpath"}),
])
"""
def initialize(self, url, permanent=True):
self._url = url
self._permanent = permanent
def get(self):
self.redirect(self._url, permanent=self._permanent)
class StaticFileHandler(RequestHandler):
"""A simple handler that can serve static content from a directory.
To map a path to this handler for a static data directory ``/var/www``,
you would add a line to your application like::
application = web.Application([
(r"/static/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
])
The local root directory of the content should be passed as the ``path``
argument to the handler.
To support aggressive browser caching, if the argument ``v`` is given
with the path, we set an infinite HTTP expiration header. So, if you
want browsers to cache a file indefinitely, send them to, e.g.,
``/static/images/myimage.png?v=xxx``. Override `get_cache_time` method for
more fine-grained cache control.
"""
CACHE_MAX_AGE = 86400 * 365 * 10 # 10 years
_static_hashes = {}
_lock = threading.Lock() # protects _static_hashes
def initialize(self, path, default_filename=None):
self.root = os.path.abspath(path) + os.path.sep
self.default_filename = default_filename
@classmethod
def reset(cls):
with cls._lock:
cls._static_hashes = {}
def head(self, path):
self.get(path, include_body=False)
def get(self, path, include_body=True):
path = self.parse_url_path(path)
abspath = os.path.abspath(os.path.join(self.root, path))
# os.path.abspath strips a trailing /
# it needs to be temporarily added back for requests to root/
if not (abspath + os.path.sep).startswith(self.root):
raise HTTPError(403, "%s is not in root static directory", path)
if os.path.isdir(abspath) and self.default_filename is not None:
# need to look at the request.path here for when path is empty
# but there is some prefix to the path that was already
# trimmed by the routing
if not self.request.path.endswith("/"):
self.redirect(self.request.path + "/")
return
abspath = os.path.join(abspath, self.default_filename)
if not os.path.exists(abspath):
raise HTTPError(404)
if not os.path.isfile(abspath):
raise HTTPError(403, "%s is not a file", path)
stat_result = os.stat(abspath)
modified = datetime.datetime.fromtimestamp(stat_result[stat.ST_MTIME])
self.set_header("Last-Modified", modified)
mime_type, encoding = mimetypes.guess_type(abspath)
if mime_type:
self.set_header("Content-Type", mime_type)
cache_time = self.get_cache_time(path, modified, mime_type)
if cache_time > 0:
self.set_header("Expires", datetime.datetime.utcnow() +
datetime.timedelta(seconds=cache_time))
self.set_header("Cache-Control", "max-age=" + str(cache_time))
self.set_extra_headers(path)
# Check the If-Modified-Since, and don't send the result if the
# content has not been modified
ims_value = self.request.headers.get("If-Modified-Since")
if ims_value is not None:
date_tuple = email.utils.parsedate(ims_value)
if_since = datetime.datetime.fromtimestamp(time.mktime(date_tuple))
if if_since >= modified:
self.set_status(304)
return
with open(abspath, "rb") as file:
data = file.read()
if include_body:
self.write(data)
else:
assert self.request.method == "HEAD"
self.set_header("Content-Length", len(data))
def set_extra_headers(self, path):
"""For subclass to add extra headers to the response"""
pass
def get_cache_time(self, path, modified, mime_type):
"""Override to customize cache control behavior.
Return a positive number of seconds to make the result
cacheable for that amount of time or 0 to mark resource as
cacheable for an unspecified amount of time (subject to
browser heuristics).
By default returns cache expiry of 10 years for resources requested
with ``v`` argument.
"""
return self.CACHE_MAX_AGE if "v" in self.request.arguments else 0
@classmethod
def make_static_url(cls, settings, path):
"""Constructs a versioned url for the given path.
This method may be overridden in subclasses (but note that it is
a class method rather than an instance method).
``settings`` is the `Application.settings` dictionary. ``path``
is the static path being requested. The url returned should be
relative to the current host.
"""
static_url_prefix = settings.get('static_url_prefix', '/static/')
version_hash = cls.get_version(settings, path)
if version_hash:
return static_url_prefix + path + "?v=" + version_hash
return static_url_prefix + path
@classmethod
def get_version(cls, settings, path):
"""Generate the version string to be used in static URLs.
This method may be overridden in subclasses (but note that it
is a class method rather than a static method). The default
implementation uses a hash of the file's contents.
``settings`` is the `Application.settings` dictionary and ``path``
is the relative location of the requested asset on the filesystem.
The returned value should be a string, or ``None`` if no version
could be determined.
"""
abs_path = os.path.join(settings["static_path"], path)
with cls._lock:
hashes = cls._static_hashes
if abs_path not in hashes:
try:
f = open(abs_path, "rb")
hashes[abs_path] = hashlib.md5(f.read()).hexdigest()
f.close()
except Exception:
gen_log.error("Could not open static file %r", path)
hashes[abs_path] = None
hsh = hashes.get(abs_path)
if hsh:
return hsh[:5]
return None
def parse_url_path(self, url_path):
"""Converts a static URL path into a filesystem path.
``url_path`` is the path component of the URL with
``static_url_prefix`` removed. The return value should be
filesystem path relative to ``static_path``.
"""
if os.path.sep != "/":
url_path = url_path.replace("/", os.path.sep)
return url_path
class FallbackHandler(RequestHandler):
"""A `RequestHandler` that wraps another HTTP server callback.
The fallback is a callable object that accepts an
`~.httpserver.HTTPRequest`, such as an `Application` or
`tornado.wsgi.WSGIContainer`. This is most useful to use both
Tornado ``RequestHandlers`` and WSGI in the same server. Typical
usage::
wsgi_app = tornado.wsgi.WSGIContainer(
django.core.handlers.wsgi.WSGIHandler())
application = tornado.web.Application([
(r"/foo", FooHandler),
(r".*", FallbackHandler, dict(fallback=wsgi_app),
])
"""
def initialize(self, fallback):
self.fallback = fallback
def prepare(self):
self.fallback(self.request)
self._finished = True
class OutputTransform(object):
"""A transform modifies the result of an HTTP request (e.g., GZip encoding)
A new transform instance is created for every request. See the
ChunkedTransferEncoding example below if you want to implement a
new Transform.
"""
def __init__(self, request):
pass
def transform_first_chunk(self, status_code, headers, chunk, finishing):
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
return chunk
class GZipContentEncoding(OutputTransform):
"""Applies the gzip content encoding to the response.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11
"""
CONTENT_TYPES = set([
"text/plain", "text/html", "text/css", "text/xml", "application/javascript",
"application/x-javascript", "application/xml", "application/atom+xml",
"text/javascript", "application/json", "application/xhtml+xml"])
MIN_LENGTH = 5
def __init__(self, request):
self._gzipping = request.supports_http_1_1() and \
"gzip" in request.headers.get("Accept-Encoding", "")
def transform_first_chunk(self, status_code, headers, chunk, finishing):
if 'Vary' in headers:
headers['Vary'] += b', Accept-Encoding'
else:
headers['Vary'] = b'Accept-Encoding'
if self._gzipping:
ctype = _unicode(headers.get("Content-Type", "")).split(";")[0]
self._gzipping = (ctype in self.CONTENT_TYPES) and \
(not finishing or len(chunk) >= self.MIN_LENGTH) and \
(finishing or "Content-Length" not in headers) and \
("Content-Encoding" not in headers)
if self._gzipping:
headers["Content-Encoding"] = "gzip"
self._gzip_value = BytesIO()
self._gzip_file = gzip.GzipFile(mode="w", fileobj=self._gzip_value)
chunk = self.transform_chunk(chunk, finishing)
if "Content-Length" in headers:
headers["Content-Length"] = str(len(chunk))
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
if self._gzipping:
self._gzip_file.write(chunk)
if finishing:
self._gzip_file.close()
else:
self._gzip_file.flush()
chunk = self._gzip_value.getvalue()
self._gzip_value.truncate(0)
self._gzip_value.seek(0)
return chunk
class ChunkedTransferEncoding(OutputTransform):
"""Applies the chunked transfer encoding to the response.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.6.1
"""
def __init__(self, request):
self._chunking = request.supports_http_1_1()
def transform_first_chunk(self, status_code, headers, chunk, finishing):
# 304 responses have no body (not even a zero-length body), and so
# should not have either Content-Length or Transfer-Encoding headers.
if self._chunking and status_code != 304:
# No need to chunk the output if a Content-Length is specified
if "Content-Length" in headers or "Transfer-Encoding" in headers:
self._chunking = False
else:
headers["Transfer-Encoding"] = "chunked"
chunk = self.transform_chunk(chunk, finishing)
return status_code, headers, chunk
def transform_chunk(self, block, finishing):
if self._chunking:
# Don't write out empty chunks because that means END-OF-STREAM
# with chunked encoding
if block:
block = utf8("%x" % len(block)) + b"\r\n" + block + b"\r\n"
if finishing:
block += b"0\r\n\r\n"
return block
def authenticated(method):
"""Decorate methods with this to require that the user be logged in.
If the user is not logged in, they will be redirected to the configured
`login url <RequestHandler.get_login_url>`.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.current_user:
if self.request.method in ("GET", "HEAD"):
url = self.get_login_url()
if "?" not in url:
if urlparse.urlsplit(url).scheme:
# if login url is absolute, make next absolute too
next_url = self.request.full_url()
else:
next_url = self.request.uri
url += "?" + urlencode(dict(next=next_url))
self.redirect(url)
return
raise HTTPError(403)
return method(self, *args, **kwargs)
return wrapper
class UIModule(object):
"""A re-usable, modular UI unit on a page.
UI modules often execute additional queries, and they can include
additional CSS and JavaScript that will be included in the output
page, which is automatically inserted on page render.
"""
def __init__(self, handler):
self.handler = handler
self.request = handler.request
self.ui = handler.ui
self.current_user = handler.current_user
self.locale = handler.locale
def render(self, *args, **kwargs):
"""Overridden in subclasses to return this module's output."""
raise NotImplementedError()
def embedded_javascript(self):
"""Returns a JavaScript string that will be embedded in the page."""
return None
def javascript_files(self):
"""Returns a list of JavaScript files required by this module."""
return None
def embedded_css(self):
"""Returns a CSS string that will be embedded in the page."""
return None
def css_files(self):
"""Returns a list of CSS files required by this module."""
return None
def html_head(self):
"""Returns a CSS string that will be put in the <head/> element"""
return None
def html_body(self):
"""Returns an HTML string that will be put in the <body/> element"""
return None
def render_string(self, path, **kwargs):
"""Renders a template and returns it as a string."""
return self.handler.render_string(path, **kwargs)
class _linkify(UIModule):
def render(self, text, **kwargs):
return escape.linkify(text, **kwargs)
class _xsrf_form_html(UIModule):
def render(self):
return self.handler.xsrf_form_html()
class TemplateModule(UIModule):
"""UIModule that simply renders the given template.
{% module Template("foo.html") %} is similar to {% include "foo.html" %},
but the module version gets its own namespace (with kwargs passed to
Template()) instead of inheriting the outer template's namespace.
Templates rendered through this module also get access to UIModule's
automatic javascript/css features. Simply call set_resources
inside the template and give it keyword arguments corresponding to
the methods on UIModule: {{ set_resources(js_files=static_url("my.js")) }}
Note that these resources are output once per template file, not once
per instantiation of the template, so they must not depend on
any arguments to the template.
"""
def __init__(self, handler):
super(TemplateModule, self).__init__(handler)
# keep resources in both a list and a dict to preserve order
self._resource_list = []
self._resource_dict = {}
def render(self, path, **kwargs):
def set_resources(**kwargs):
if path not in self._resource_dict:
self._resource_list.append(kwargs)
self._resource_dict[path] = kwargs
else:
if self._resource_dict[path] != kwargs:
raise ValueError("set_resources called with different "
"resources for the same template")
return ""
return self.render_string(path, set_resources=set_resources,
**kwargs)
def _get_resources(self, key):
return (r[key] for r in self._resource_list if key in r)
def embedded_javascript(self):
return "\n".join(self._get_resources("embedded_javascript"))
def javascript_files(self):
result = []
for f in self._get_resources("javascript_files"):
if isinstance(f, (unicode_type, bytes_type)):
result.append(f)
else:
result.extend(f)
return result
def embedded_css(self):
return "\n".join(self._get_resources("embedded_css"))
def css_files(self):
result = []
for f in self._get_resources("css_files"):
if isinstance(f, (unicode_type, bytes_type)):
result.append(f)
else:
result.extend(f)
return result
def html_head(self):
return "".join(self._get_resources("html_head"))
def html_body(self):
return "".join(self._get_resources("html_body"))
class URLSpec(object):
"""Specifies mappings between URLs and handlers."""
def __init__(self, pattern, handler_class, kwargs=None, name=None):
"""Parameters:
* ``pattern``: Regular expression to be matched. Any groups
in the regex will be passed in to the handler's get/post/etc
methods as arguments.
* ``handler_class``: `RequestHandler` subclass to be invoked.
* ``kwargs`` (optional): A dictionary of additional arguments
to be passed to the handler's constructor.
* ``name`` (optional): A name for this handler. Used by
`Application.reverse_url`.
"""
if not pattern.endswith('$'):
pattern += '$'
self.regex = re.compile(pattern)
assert len(self.regex.groupindex) in (0, self.regex.groups), \
("groups in url regexes must either be all named or all "
"positional: %r" % self.regex.pattern)
self.handler_class = handler_class
self.kwargs = kwargs or {}
self.name = name
self._path, self._group_count = self._find_groups()
def __repr__(self):
return '%s(%r, %s, kwargs=%r, name=%r)' % \
(self.__class__.__name__, self.regex.pattern,
self.handler_class, self.kwargs, self.name)
def _find_groups(self):
"""Returns a tuple (reverse string, group count) for a url.
For example: Given the url pattern /([0-9]{4})/([a-z-]+)/, this method
would return ('/%s/%s/', 2).
"""
pattern = self.regex.pattern
if pattern.startswith('^'):
pattern = pattern[1:]
if pattern.endswith('$'):
pattern = pattern[:-1]
if self.regex.groups != pattern.count('('):
# The pattern is too complicated for our simplistic matching,
# so we can't support reversing it.
return (None, None)
pieces = []
for fragment in pattern.split('('):
if ')' in fragment:
paren_loc = fragment.index(')')
if paren_loc >= 0:
pieces.append('%s' + fragment[paren_loc + 1:])
else:
pieces.append(fragment)
return (''.join(pieces), self.regex.groups)
def reverse(self, *args):
assert self._path is not None, \
"Cannot reverse url regex " + self.regex.pattern
assert len(args) == self._group_count, "required number of arguments "\
"not found"
if not len(args):
return self._path
converted_args = []
for a in args:
if not isinstance(a, (unicode_type, bytes_type)):
a = str(a)
converted_args.append(escape.url_escape(utf8(a)))
return self._path % tuple(converted_args)
url = URLSpec
if hasattr(hmac, 'compare_digest'): # python 3.3
_time_independent_equals = hmac.compare_digest
else:
def _time_independent_equals(a, b):
if len(a) != len(b):
return False
result = 0
if isinstance(a[0], int): # python3 byte strings
for x, y in zip(a, b):
result |= x ^ y
else: # python2
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
def create_signed_value(secret, name, value):
timestamp = utf8(str(int(time.time())))
value = base64.b64encode(utf8(value))
signature = _create_signature(secret, name, value, timestamp)
value = b"|".join([value, timestamp, signature])
return value
def decode_signed_value(secret, name, value, max_age_days=31):
if not value:
return None
parts = utf8(value).split(b"|")
if len(parts) != 3:
return None
signature = _create_signature(secret, name, parts[0], parts[1])
if not _time_independent_equals(parts[2], signature):
gen_log.warning("Invalid cookie signature %r", value)
return None
timestamp = int(parts[1])
if timestamp < time.time() - max_age_days * 86400:
gen_log.warning("Expired cookie %r", value)
return None
if timestamp > time.time() + 31 * 86400:
# _cookie_signature does not hash a delimiter between the
# parts of the cookie, so an attacker could transfer trailing
# digits from the payload to the timestamp without altering the
# signature. For backwards compatibility, sanity-check timestamp
# here instead of modifying _cookie_signature.
gen_log.warning("Cookie timestamp in future; possible tampering %r", value)
return None
if parts[1].startswith(b"0"):
gen_log.warning("Tampered cookie %r", value)
return None
try:
return base64.b64decode(parts[0])
except Exception:
return None
def _create_signature(secret, *parts):
hash = hmac.new(utf8(secret), digestmod=hashlib.sha1)
for part in parts:
hash.update(utf8(part))
return utf8(hash.hexdigest())
| gpl-3.0 | 7,279,264,925,478,021,000 | 38.822596 | 96 | 0.590973 | false |
black-silence/PlanetNomadsSavegameEditor | PlanetNomads/Savegame.py | 1 | 30585 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sqlite3
import xml.etree.ElementTree as ETree
import re
import random
import zipfile
import os
import atexit
from math import sqrt
from collections import OrderedDict
class Savegame:
def __init__(self):
self.filename = ""
self.temp_extracted_file = ""
self.loaded = False
self.dbconnector = None
self.db = None
self.__machines = []
self.settings = None
atexit.register(self.cleanup)
def __del__(self):
self.cleanup()
def cleanup(self):
if self.db:
self.db.close()
self.db = None
os.remove(self.temp_extracted_file)
def load(self, filename):
self.filename = filename
with zipfile.ZipFile(filename, "r") as myzip:
self.temp_extracted_file = myzip.extract("_working.db", "PNSE_extract")
self.dbconnector = sqlite3.connect(self.temp_extracted_file)
self.db = self.dbconnector.cursor()
self.db.row_factory = sqlite3.Row
self.loaded = True
self.reset()
def reset(self):
self.__machines = []
def get_name(self):
if not self.loaded:
raise ValueError("No file loaded")
self.db.execute("select value from simple_storage where key = 'game_name'")
return self.db.fetchone()["value"]
def teleport_player(self, x, y, z):
self.db.execute("select value from simple_storage where key = 'playerData'")
player_data = self.db.fetchone()["value"]
lines = player_data.split("\n")
for key, line in enumerate(lines):
if line.startswith("PL"):
continue
current_position = line.split(" ")
current_position[0] = "{:0.3f}".format(x)
current_position[1] = "{:0.3f}".format(y)
current_position[2] = "{:0.3f}".format(z)
lines[key] = " ".join(current_position)
player_data = "\n".join(lines)
self.db.execute("update simple_storage set value = ? where key = 'playerData'", (player_data,))
self.on_save()
return True
def get_player_position(self):
self.db.execute("select value from simple_storage where key = 'playerData'")
player_data = self.db.fetchone()["value"]
lines = player_data.split("\n")
for key, line in enumerate(lines):
if line.startswith("PL"):
continue
return [float(x) for x in line.split(" ")[:3]]
raise IOError("Player data not found in simple_storage")
def get_setting(self, name):
if not self.settings:
self.db.execute("select value from simple_storage where key='advanced_settings'")
try:
self.settings = ETree.fromstring(self.db.fetchone()["value"])
except TypeError:
# Old games don't have advanced settings in simple storage
return None
for tag in self.settings:
if tag.tag == name:
return tag.text
return None
@property
def machines(self):
if not self.__machines:
self.__load_machines()
return self.__machines
def __load_machines(self):
self.db.execute("select * from machine")
for row in self.db.fetchall():
self.__machines.append(Machine(row, self.db))
self.db.execute("select * from active_blocks")
active_block_data = self.db.fetchall()
for m in self.__machines:
m.set_active_blocks(active_block_data)
def on_save(self):
self.dbconnector.commit()
self.write_zip()
def save(self):
for m in self.__machines:
if not m.is_changed():
continue
data = '<?xml version="1.0" encoding="utf-8"?>' + m.get_xml_string()
update = (data, m.transform, m.identifier)
self.db.execute("update machine set data = ?, transform = ? where id = ?", update)
# write changed active blocks too, required for pushing stuff around
active_blocks = m.get_changed_active_blocks()
for b in active_blocks:
update = (active_blocks[b].get_xml_string(), b)
self.db.execute("update active_blocks set data = ? where id = ?", update)
self.on_save()
def write_zip(self):
# PN uses deflate so to be safe this is the mode we want to use
with zipfile.ZipFile(self.filename, "w", zipfile.ZIP_DEFLATED) as myzip:
myzip.write(os.path.join("PNSE_extract", "_working.db"), "_working.db")
def unlock_recipes(self):
unlock_string = "PL1\n" + "_".join([str(i) for i in range(1, 100)])
self.db.execute("update simple_storage set value = ? where key = 'playerTechnology'", (unlock_string,))
affected = self.db.rowcount
self.on_save()
return affected > 0
def debug(self):
print("Debug info")
print('Name: {}'.format(self.get_name()))
print("Number of machines: {}".format(len(self.machines)))
def get_planet_size(self):
radius = self.get_setting("PlanetRadius")
if radius:
return int(radius)
# Old games had 10k, even older games may have 16k. Not important enough to calculate it.
return 10000
def get_player_inventory(self):
inventory = Container(self.db, self.on_save)
if not inventory.load(0):
return
return inventory
def create_north_pole_beacon(self):
"""Create a solar beacon with navigation C on at the north pole."""
self.create_beacon(0, self.get_planet_size(), 0)
def create_south_pole_beacon(self):
"""Create a solar beacon with navigation C on at the south pole."""
self.create_beacon(0, -1 * self.get_planet_size(), 0, rot_z=-180)
def create_gps_beacons(self):
self.create_beacon(0, self.get_planet_size(), 0) # North pole
self.create_beacon(self.get_planet_size(), 0, 0, rot_z=90)
self.create_beacon(0, 0, self.get_planet_size(), rot_z=90)
def create_beacon(self, x, y, z, rot_x=0, rot_y=0, rot_z=0):
self.db.execute("select max(id) as mx from active_blocks")
next_active_id = int(self.db.fetchone()["mx"]) + 1
xml = '<ActiveBlock xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ' \
'xmlns:xsd="http://www.w3.org/2001/XMLSchema" ID="{}" Type_ID="56" Container_ID="-1" Name="">' \
'<Module ID="0" Type="SwitchModule"><Prop key="TurnState"><value xsi:type="xsd:int">1</value></Prop></Module>' \
'<Module ID="1" Type="PowerIn" />' \
'<Module ID="2" Type="PositionModule"><Prop key="BasePosition"><value xsi:type="xsd:string">{:0.0f};{:0.0f};{:0.0f}</value></Prop></Module>' \
'<Module ID="3" Type="PowerOut"><Prop key="PowerState"><value xsi:type="xsd:int">0</value></Prop></Module>' \
'<Module ID="4" Type="SwitchModule"><Prop key="TurnState"><value xsi:type="xsd:int">0</value></Prop></Module>' \
'<Module ID="5" Type="SensorModule" />' \
'<Module ID="6" Type="RenameModule" />' \
'<Module ID="7" Type="ConnectPowerInOutModule" />' \
'<Module ID="8" Type="NavigationModule"><Prop key="Icon"><value xsi:type="xsd:int">2</value></Prop><Prop key="TurnState"><value xsi:type="xsd:int">1</value></Prop></Module>' \
'</ActiveBlock>'.format(next_active_id, x, y, z)
sql = "INSERT INTO active_blocks (id, type_id, data, container_id) VALUES (?, 56, ?, -1)"
self.db.execute(sql, (next_active_id, xml))
sql = 'INSERT INTO machine (id, data, transform) VALUES (?, ?, ' \
'"{:0.0f} {:0.0f} {:0.0f} {:0.0f} {:0.0f} {:0.0f}")'.format(x, y, z, rot_x, rot_y, rot_z)
machine_id = random.Random().randint(1000000, 10000000) # Is there a system behind the ID?
xml = '<?xml version="1.0" encoding="utf-8"?>\n' \
'<MachineSaveData xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">\n' \
'<Grid ID="{}">\n' \
'<BasePosition X="{:0.0f}" Y="{:0.0f}" Z="{:0.0f}" />' \
'<BaseRotation X="{:0.0f}" Y="{:0.0f}" Z="{:0.0f}" />' \
'<Blocks>\n' \
'<Block ID="56" Health="80" Weld="80" Ground="true" ActiveID="{}">' \
'<Pos x="0" y="0" z="0" /><Rot v="0" /><Col r="0" g="0" b="0" />' \
'</Block>\n' \
'</Blocks>\n</Grid>\n</MachineSaveData>\n'.format(machine_id, x, y, z, rot_x, rot_y, rot_z, next_active_id)
self.db.execute(sql, (machine_id, xml))
# Solar beacon is self powered
sql = 'INSERT INTO activeblocks_connector_power (block_id_1, module_id_1, block_id_2, module_id_2, power) ' \
'VALUES (?, 3, ?, 1, 20)'
self.db.execute(sql, (next_active_id, next_active_id))
# No idea what this does
sql = 'INSERT INTO machine_rtree_rowid (rowid, nodeno) VALUES (?, 1)'
self.db.execute(sql, (machine_id,))
# Insert into machine_rtree seems unhealthy
self.on_save()
class Container:
"""0-based, player inventory = index 0
contents is 0-based, serialized json-like
first item is probably a version
v:1,0:{package:com.planetnomads, id:59, count:1, props:},1:{...},
"""
stacks = {}
size = 0
db_key = None
def __init__(self, db, save_callback):
self.db = db
self.save_callback = save_callback
def load(self, key):
"""Load container from db
:return bool
"""
sql = "select * from containers where id = ?"
self.db.execute(sql, (key,))
row = self.db.fetchone()
if not row:
return False
self.size = row["size"]
self.stacks = ContentParser.parse_item_stack(row["content"])
self.db_key = key
return True
def save(self):
sorted_keys = sorted(self.stacks)
s = []
for key in sorted_keys:
s.append("{}:{}".format(key, self.stacks[key].get_db_string()))
sql = "update containers set content = ? where id = ?"
self.db.execute(sql, ("v:1," + ",".join(s) + ",", self.db_key))
self.save_callback()
return True
def get_stacks(self):
return self.stacks
def add_stack(self, item, count):
if len(self.stacks) >= self.size:
return False
for i in range(self.size):
stack = self.stacks.get(i, None)
if stack:
continue # skip all stacks that are occupied
self.stacks[i] = Stack(item, count=count)
return True
def __str__(self):
return "Container with {} slots, {} slots used".format(self.size, len(self.stacks))
class ContentParser:
"""
Content is 0-based, serialized json-like. The number shows the slot in the container, empty slots are skipped.
~0.6.8 added a version number as first item
Example: v:1,0:{package:com.planetnomads, id:59, count:1, props:},10:{...},
"""
@staticmethod
def parse_item_stack(content):
# TODO check version number
start = content.find(",")
content = content[start + 1:] # Remove version number because it breaks my nice regexes
regex_val = re.compile(r"[, {](\w+):([^,}]*)[,}]")
regex_slot = re.compile(r"^(\d+):{")
parts = re.split(r"(?<=}),(?=\d+:{|$)", content)
result = {}
for part in parts:
if part == "":
continue
m = regex_slot.match(part)
if m:
key = int(m.group(1))
else:
continue
vars = {}
m = regex_val.findall(part)
if m:
for k, v in m:
if k == "id":
item_id = int(v)
elif k == "count":
vars[k] = int(v)
else:
vars[k] = v
item = Item(item_id)
stack = Stack(item, **vars)
result[key] = stack
return result
class Stack:
def __init__(self, item, count=1, package="com.planetnomads", props="False", infinityCount="False"):
self.item = item
self.count = count
self.package = package
self.props = props
self.infinity_count = infinityCount
def get_item_name(self):
return self.item.get_name()
def get_count(self):
return self.count
def get_db_string(self):
start = "{"
end = "}"
data = "package:{}, id:{}, count:{}, infinityCount:{}, props:{}".format(self.package, self.item.item_type,
self.count, self.infinity_count,
self.props)
return start + data + end
def __str__(self):
return "Stack of {} {}".format(self.get_count(), self.item.get_name())
class Item:
names = {
33: "Battery",
49: "Carbon",
51: "Aluminium",
52: "Silicium",
56: "Iron",
57: "Titanium",
58: "Gold",
59: "Silver",
60: "Cobalt",
61: "Uranium",
62: "Xaenite",
63: "Enriched Uranium",
64: "Deuterium",
65: "Xaenite Rod",
67: "Plating",
68: "Composite Plating",
69: "Basic Frame",
70: "Reinforced Frame",
72: "Glass Components",
73: "Standard Electronics",
74: "SuperConductive Electronics",
75: "Quantum Electronics",
76: "Standard Mechanical Components",
77: "SuperAlloy Mechanical",
78: "Composite Parts",
79: "Advanced Composite Parts",
80: "Fabric Mk1",
81: "Fabric Mk2",
82: "ALM",
83: "Advanced ALM",
84: "Super ALM",
86: "Fruitage",
87: "Dirty Water",
88: "Herbs",
89: "Raw Meat",
90: "Purified Water",
91: "Electrolytes Water",
92: "Nutrition Capsules",
93: "Super Food",
95: "Bandages",
96: "Stimulation Injection",
108: "Exploration Suit Mk2",
109: "Exploration Suit Mk3",
110: "Exploration Suit Mk4",
112: "Jetpack Mk2",
113: "Jetpack Mk3",
114: "Jetpack Mk4",
116: "MultiTool Mk2",
117: "MultiTool Mk3",
118: "MultiTool Mk4",
392745: "Biomass Container",
9550358: "Seeds",
11691828: "Sleeping Bag",
}
def __init__(self, item_type: int):
self.item_type = item_type
def get_name(self):
if self.item_type in self.names:
return self.names[self.item_type]
return "unknown item type {}".format(self.item_type)
class Machine:
"""
0 16000 0 0 0 0 = north pole at sea level
0 -16000 0 0 0 180 = south pole at sea level, "upside down"
planet diameter is 32km
"""
def __init__(self, db_data, db):
self.identifier = db_data['id']
self.xml = db_data['data']
self.transform = db_data['transform']
self.loaded = False
self.grid = [] # Only one grid per machine
self.changed = False
self.active_block_ids = []
self.db = db
self.name = None
self.type = None
root = ETree.fromstring(self.xml)
for node in root:
if node.tag == "Grid":
self.grid.append(Grid(node))
else:
raise IOError("Unexpected element %s in machine" % node.tag)
self.active_block_ids = self.grid[0].get_active_block_ids()
self.active_block_data = {}
@property
def grids(self):
return self.grid
def set_active_blocks(self, data):
for row in data:
if row["id"] not in self.active_block_ids:
continue
self.active_block_data[row["id"]] = ActiveBlock(row["data"])
def randomize_color(self):
for g in self.grids:
g.randomize_color()
self.changed = True
def set_color(self, color, replace=None):
for g in self.grids:
g.set_color(color, replace)
self.changed = True
def get_xml_string(self):
"""Save the current machine, replaces original xml"""
xml = ETree.Element("MachineSaveData")
for g in self.grid:
g.build_xml(xml)
return ETree.tostring(xml, "unicode")
def is_changed(self):
return self.changed
def get_changed_active_blocks(self):
result = {}
for aid in self.active_block_data:
active_block = self.active_block_data[aid]
if active_block.changed:
result[aid] = active_block
return result
def __str__(self):
grounded = self.is_grounded()
return "Machine {} ({})".format(
self.get_name_or_id(),
self.get_type()
)
def is_grounded(self):
for g in self.grids: # TODO only 1 grid per machine now
if g.is_grounded():
return True
return False
def teleport(self, distance: int, target):
"""Teleport machine over/under the target."""
rot_x, rot_y, rot_z = self.get_rotation()
(x, y, z) = self.get_coordinates()
(target_x, target_y, target_z) = target.get_coordinates()
distance_to_planet_center = sqrt(target_x ** 2 + target_y ** 2 + target_z ** 2)
factor = 1 + distance / distance_to_planet_center
target_x2 = target_x * factor # TODO use np
target_y2 = target_y * factor
target_z2 = target_z * factor
self.transform = "{:0.3f} {:0.3f} {:0.3f} {} {} {}".format(target_x2, target_y2, target_z2, rot_x, rot_y, rot_z)
# Use the exact difference to move subgrids, this is important or the object will disappear
difference = (target_x2 - x, target_y2 - y, target_z2 - z)
for g in self.grid:
g.move_by(difference, self.active_block_data)
self.changed = True
def get_rotation(self):
"""Get rotation as tuple of string"""
(x, y, z, rotX, rotY, rotZ) = [x for x in self.transform.split(" ")]
return rotX, rotY, rotZ
def get_coordinates(self):
"""Get coords as tuple of x, y, z"""
(x, y, z, rotX, rotY, rotZ) = [x for x in self.transform.split(" ")]
return [float(i) for i in (x, y, z)]
def get_name_or_id(self):
n = self.get_name()
if n:
return n
return self.identifier
def get_type(self):
if self.type:
return self.type
self.type = "Construct"
if not self.is_grounded():
if self.has_cockpit():
self.type = "Vehicle"
return "Vehicle"
# If it has no cockpit it's random scattered blocks
return "Construct"
if self.has_generator():
self.type = "Base"
return "Base"
return "Construct"
def get_name(self):
if self.name is not None:
return self.name
for g in self.grids:
name = g.get_name(self.active_block_data)
if name:
self.name = name
return name
self.name = ""
return ""
def has_cockpit(self):
return self.grids[0].has_cockpit()
def has_generator(self):
return self.grids[0].has_generator()
class XmlNode:
"""Basic XML node"""
def __init__(self, node):
self.type = node.tag
self._attribs = OrderedDict()
self._children = []
for a in node.attrib:
self._attribs[a] = node.attrib[a]
expected_children = self.get_expected_children_types()
for item in node:
if item.tag in expected_children:
self._children.append(globals()[item.tag](item)) # Create object from class name
else:
print("Unexpected children type %s" % item.tag)
def get_attribs(self):
"""Get attributes in the original order, much easier to diff xml this way"""
return self._attribs
def get_children(self):
return self._children
def build_xml(self, xml):
sub = ETree.SubElement(xml, self.type, self.get_attribs())
for c in self._children:
c.build_xml(sub)
def get_expected_children_types(self):
return []
class MachineNode(XmlNode):
def get_active_block_ids(self):
res = []
for c in self._children:
try:
res.extend(c.get_active_block_ids())
except AttributeError:
pass # Class doesn't have active blocks
return res
def is_grounded(self):
for c in self.get_children():
try:
if c.is_grounded():
return True
except AttributeError:
pass
return False
def has_cockpit(self):
for x in self.get_children():
try:
if x.has_cockpit():
return True
except AttributeError:
pass
return False
def has_generator(self):
for c in self.get_children():
try:
if c.has_generator():
return True
except AttributeError:
pass
return False
def has_hoverjack(self):
for c in self.get_children():
try:
if c.has_hoverjack():
return True
except AttributeError:
pass
return False
def get_name(self, active_block_data):
for c in self.get_children():
try:
name = c.get_name(active_block_data)
if name != "":
return name
except AttributeError:
pass
return ""
def get_expected_children_types(self):
return ['Grid']
def move_by(self, vector, active_block_data):
for c in self._children:
try:
c.move_by(vector, active_block_data)
except AttributeError:
pass
def randomize_color(self):
for c in self._children:
try:
c.randomize_color()
except AttributeError:
pass
def set_color(self, color, replace):
for c in self._children:
try:
c.set_color(color, replace)
except AttributeError:
pass
class Blocks(MachineNode):
def get_expected_children_types(self):
return ['Block']
class BasePosition(XmlNode):
def move_by(self, vector, active_blocks):
x = float(self._attribs["X"])
y = float(self._attribs["Y"])
z = float(self._attribs["Z"])
self._attribs["X"] = "{:0.5f}".format(x + vector[0])
self._attribs["Y"] = "{:0.5f}".format(y + vector[1])
self._attribs["Z"] = "{:0.5f}".format(z + vector[2])
class BaseRotation(XmlNode):
pass
class BaseBounds(XmlNode):
def move_by(self, vector, active_blocks):
x = float(self._attribs["MinX"])
y = float(self._attribs["MinY"])
z = float(self._attribs["MinZ"])
self._attribs["MinX"] = "{:0.5f}".format(x + vector[0])
self._attribs["MinY"] = "{:0.5f}".format(y + vector[1])
self._attribs["MinZ"] = "{:0.5f}".format(z + vector[2])
x = float(self._attribs["MaxX"])
y = float(self._attribs["MaxY"])
z = float(self._attribs["MaxZ"])
self._attribs["MaxX"] = "{:0.5f}".format(x + vector[0])
self._attribs["MaxY"] = "{:0.5f}".format(y + vector[1])
self._attribs["MaxZ"] = "{:0.5f}".format(z + vector[2])
class DistancePhysicsFreezeData(XmlNode):
pass
class Pos(XmlNode):
pass
class Rot(XmlNode):
pass
class Col(XmlNode):
def randomize_color(self):
self._attribs["r"] = str(random.randrange(0, 255))
self._attribs["g"] = str(random.randrange(0, 255))
self._attribs["b"] = str(random.randrange(0, 255))
def set_color(self, color, replace):
if replace:
if int(self._attribs["r"]) != replace[0]:
return
if int(self._attribs["g"]) != replace[1]:
return
if int(self._attribs["b"]) != replace[2]:
return
self._attribs["r"] = str(int(color[0]))
self._attribs["g"] = str(int(color[1]))
self._attribs["b"] = str(int(color[2]))
class Grid(MachineNode):
"""Every machine has 1 Grid which contains 1 Blocks"""
def get_expected_children_types(self):
return ['Blocks', 'BasePosition', 'BaseRotation', 'BaseBounds', 'DistancePhysicsFreezeData']
class SubGrid(Grid):
pass
class ActiveBlock:
def __init__(self, xml):
self.root = ETree.fromstring(xml)
self.name = self.root.attrib.get("Name", "")
self.changed = False
def get_xml_string(self):
return ETree.tostring(self.root, "unicode")
def get_name(self):
return self.name
def move_by(self, vector):
for node in self.root:
if node.tag != "Module":
continue
if node.attrib["Type"] != "PositionModule":
continue
position = node[0][0].text
x, y, z = [float(i) for i in position.split(";")]
node[0][0].text = "{:0.3f};{:0.3f};{:0.3f}".format(x + vector[0], y + vector[1], z + vector[2])
self.changed = True
class Block(MachineNode):
types = {
1: "Full Armor Block",
2: "Corner Armor Block",
3: "Compact Battery Rack",
4: "Cockpit 2x3",
5: "Reinforced Wall",
6: "Armor Corner Slope - Inverted",
7: "Armor Corner Slope - Long Inverted",
8: "Armor Corner Slope",
9: "Armor Slope Long",
10: "Armor Slope Corner (Long)",
11: "Armor Slope",
#12 active
13: "Conveyor L-Section",
14: "Conveyor",
15: "Conveyor T-Section",
16: "Conveyor X-Section",
#17 probly active block
18: "Wheel",
19: "Compact Container",
20: "Bio Generator",
21: "Reinforced Wall with Light",
22: "Reinforced Wall - Short",
23: "Reinforced Wall Corner",
24: "Reinforced Wall Outer Corner",
25: "Base Foundation (double height)",
26: "Raised Floor",
#27
28: "Compact Medbay",
29: "Medium Refinery",
#30
#31
32: "Reinforced Wall with Door",
33: "Ceiling Panel",
34: "Suspension",
#35 probly active
36: "Jack tool",
37: "Hover Jack",
38: "Railing",
39: "Short Railing",
40: "Stairs",
41: "Beacon",
42: "Uranium Generator",
43: "Ceiling Light",
44: "Indoor Light",
45: "Search Light - Front Mount",
46: "Search Light - Top Mount",
47: "Large Container",
48: "Fence",
49: "Fence Corner",
50: "Ramp",
51: "Inner Wall with Doors",
52: "Reinforced Wall Exterior/Interior Joint",
53: "Short inner wall",
54: "Inner Wall",
55: "Windowed Outer Wall",
56: "Solar Beacon",
57: "Escape pod",
61: "Base Foundation",
64: "Emergency 3D printer",
66: "Hinge",
68: "Rotating Plate",
71: "Item Dispenser",
73: "Mining Machine",
76: "Medium Armory",
78: "Medium Medbay",
79: "Escape Pod (broken)", # 3k health
80: "Radar", # 300 health
81: "Winch",
82: "Winch Shackle",
83: "Thruster", # 300 health
84: "Tank", # 250 health
85: "Big Tank", # 750 health
86: "Sloped Arc Corner",
87: "Corner Arc",
88: "Arc Block",
#89
90: "Wreck Container",
91: "Wreck Beacon",
92: "Cockpit 3x3",
93: "Rounded Cockpit 2x3",
94: "Buggy Wheel",
95: "Mobile Base Wheel",
96: "Large Suspension",
97: "Rounded Cockpit 3x3",
98: "Switchboard",
100: "Hover Pad",
101: "Floating Foundation",
114: "Air Blade",
126: "Glassed Cockpit 3x3",
}
def is_grounded(self):
return "Ground" in self._attribs and self._attribs["Ground"] == "true"
def get_active_block_id(self):
if "ActiveID" in self._attribs:
return int(self._attribs["ActiveID"])
return None
def get_active_block_ids(self):
result = []
active_id = self.get_active_block_id()
if active_id:
result.append(active_id)
result.extend(super(Block, self).get_active_block_ids())
return result
def get_active_block(self, active_blocks):
aid = self.get_active_block_id()
if aid:
if aid == 0:
pass
elif aid in active_blocks:
return active_blocks[aid]
else:
# Avoid crash if active block did not load. Why is it missing though?
print("Active block %i not found" % aid)
return None
def get_name(self, active_blocks):
active_block = self.get_active_block(active_blocks) # type: ActiveBlock
if active_block:
name = active_block.get_name()
if name:
return name
return super().get_name(active_blocks)
def has_cockpit(self):
if self._attribs["ID"] in ("4", "92", "93", "97", "126"):
return True
return super().has_cockpit()
def has_generator(self):
if self._attribs["ID"] in ("20", "42"):
return True
return super().has_generator()
def has_hoverjack(self):
if self._attribs["ID"] == "37":
return True
return super().has_hoverjack()
def get_expected_children_types(self):
return ['Pos', 'Col', 'Rot', 'SubGrid']
def move_by(self, vector, active_blocks):
super().move_by(vector, active_blocks)
active_block = self.get_active_block(active_blocks)
if not active_block:
return
active_block.move_by(vector)
| mit | -1,921,983,318,273,434,600 | 32.064865 | 189 | 0.534935 | false |
sidorov-si/TADStates | calc_enr.py | 1 | 6053 | #!/usr/bin/env python
"""
Calculate enrichment of regions with states using ChromHMM.
Usage:
calc_enr.py (-r <regions_file> | -R <directory_with_regions_files>) (-s <segmentation_directory> | -S <directory_with_segmentation_directories>) -c <ChromHMM_directory> -o <output_directory>
Options:
-h --help Show this screen.
--version Show version.
-r <regions_file> BED file with regions to calc enrichments for.
-R <directory_with_regions_files> Directory with BED files containing regions to calc enrichments for.
-s <segmentation_directory> Directory with segmentation produced by ChromHMM.
-S <directory_with_segmentation_directories> Directory with directories containing segmentations.
-c <ChromHMM_directory> ChromHMM directory.
-o <output_directory> Output directory name.
"""
import sys
print
modules = ["docopt", "os", "subprocess"]
exit_flag = False
for module in modules:
try:
__import__(module)
except ImportError:
exit_flag = True
sys.stderr.write("Error: Python module " + module + " is not installed.\n")
if exit_flag:
sys.stderr.write("You can install these modules with a command: pip install <module>\n")
sys.stderr.write("(Administrator privileges may be required.)\n")
sys.exit(1)
from docopt import docopt
from os.path import basename
from os.path import splitext
from os.path import join
from os.path import exists
from os.path import isdir
from os.path import isfile
from os import makedirs
from os import listdir
from subprocess import call
from sys import stdout
def calc_enr(regions, segm_files, png_directory, svg_directory, txt_directory, chromhmm_directory):
regions_part = splitext(basename(regions))[0]
for segm_file in segm_files:
print
print 'Calc enrichment for', basename(regions), 'and', basename(segm_file), '...'
stdout.flush()
segm_part = splitext(basename(segm_file))[0]
prefix = regions_part + '_' + segm_part
command_line_list = ['java', '-mx1600M', '-jar', join(chromhmm_directory, 'ChromHMM.jar'), \
'OverlapEnrichment', segm_file, regions, prefix]
code = call(command_line_list)
if code != 0:
print 'Something went wrong!'
else:
print 'Done.'
call(['mv', prefix + '.png', png_directory])
call(['mv', prefix + '.svg', svg_directory])
call(['mv', prefix + '.txt', txt_directory])
if __name__ == '__main__':
arguments = docopt(__doc__, version='calc_enr 0.2')
if arguments["-r"] != None:
regions = arguments["-r"]
if not exists(regions):
print "Error: Can't find BED file with regions: no such file '" + \
regions + "'. Exit.\n"
sys.exit(1)
if not isfile(regions):
print "Error: BED file with regions must be a regular file. " + \
"Something else given. Exit.\n"
sys.exit(1)
else:
regions = arguments["-R"].rstrip('/')
if not exists(regions):
print "Error: Can't find directory with region BED files: no such directory '" + \
regions + "'. Exit.\n"
sys.exit(1)
if not isdir(regions):
print "Error: Directory with region BED files must be a directory:). " + \
"Something else given. Exit.\n"
sys.exit(1)
if arguments["-s"] != None:
segm_dir = arguments["-s"].rstrip('/')
if not exists(segm_dir):
print "Error: Can't find directory with segmentation: no such directory '" + \
segm_dir + "'. Exit.\n"
sys.exit(1)
if not isdir(segm_dir):
print "Error: Directory with segmentation must be a directory:). " + \
"Something else given. Exit.\n"
sys.exit(1)
segm_directory = None
else:
segm_dir = None
segm_directory = arguments["-S"].rstrip('/')
if not exists(segm_directory):
print "Error: Can't find directory with directories containing segmentations: " + \
"no such directory '" + segm_directory + "'. Exit.\n"
sys.exit(1)
if not isdir(segm_directory):
print "Error: Directory with directories containing segmentations must " + \
"be a directory:). Something else given. Exit.\n"
sys.exit(1)
chromhmm_directory = arguments["-c"].rstrip('/')
if not exists(chromhmm_directory):
print "Error: Can't find ChromHMM directory: no such directory '" + \
chromhmm_directory + "'. Exit.\n"
sys.exit(1)
if not isdir(chromhmm_directory):
print "Error: ChromHMM directory must be a directory:). Something else given. Exit.\n"
sys.exit(1)
output_directory = arguments["-o"].rstrip('/')
if not exists(output_directory):
makedirs(output_directory)
png_directory = join(output_directory, 'PNG')
svg_directory = join(output_directory, 'SVG')
txt_directory = join(output_directory, 'TXT')
if not exists(png_directory):
makedirs(png_directory)
if not exists(svg_directory):
makedirs(svg_directory)
if not exists(txt_directory):
makedirs(txt_directory)
if segm_dir != None: # there is only one segmentation
segm_dirs = [segm_dir]
else:
segm_dirnames = sorted(listdir(segm_directory))
segm_dirs = [join(segm_directory, d) for d in segm_dirnames]
segm_files = []
for dir in segm_dirs:
filenames_list = listdir(dir)
segm_filenames = [f for f in filenames_list if 'segments' in f]
segm_files.extend([join(dir, f) for f in segm_filenames])
calc_enr(regions, segm_files, png_directory, svg_directory, txt_directory, chromhmm_directory)
| gpl-2.0 | -5,044,784,646,270,650,000 | 38.822368 | 192 | 0.594581 | false |
SopaXorzTaker/pypowder | thepowdertoy/thepowdertoy.py | 1 | 18583 | import hashlib
import requests
# Servers
DEFAULT_SERVER = "powdertoy.co.uk"
DEFAULT_STATIC_SERVER = "static.powdertoy.co.uk"
# Check for version updates
UPDATE_VERSION_STABLE = "Stable"
UPDATE_VERSION_BETA = "Beta"
UPDATE_VERSION_SNAPSHOT = "Snapshot"
# User elevations
USER_ELEVATION_NONE = ""
USER_ELEVATION_MOD = "Mod"
USER_ELEVATION_ADMIN = "Admin"
class LoginError(Exception):
pass
class ServerError(Exception):
pass
class Notification(object):
def __init__(self, link, text):
"""
Creates a Notification object.
:param link: the link of the notification
:param text: the text of the notification
"""
self.link, self.text = link, text
class Comment(object):
def __init__(self, username, user_id, text, timestamp, gravatar=None):
"""
Creates a new Comment object.
:param username: the username
:param user_id: the ID of the user
:param text: the text
:param timestamp: the timestamp of the comment
:param gravatar: the Gravatar ID of the user
"""
self.username, self.user_id, self.text, self.timestamp, self.gravatar = \
username, user_id, text, timestamp, gravatar
class User(object):
def __init__(self, username, user_id, avatar, age, location, biography, website, register_time, saves, forum):
self.username, self.user_id, self.avatar, self.age, self.location, self.biography, \
self.website, self.register_time, self.saves, self.forum = \
username, user_id, avatar, age, location, biography, website, register_time, saves, forum
class Save(object):
def __init__(self, parent, user, save_id, score, my_score, name,
description, date_created, date, published, favorite, views, version, tags):
"""
Creates a new Save object.
:param parent: ThePowderToy class that created the object
:param user: The User object
:param save_id: the ID of the save
:param score: the score of the save
:param my_score: my score of the save
:param name: the name of the save
:param description: the description of the save
:param date_created: the date of creation of the save
:param date: the last update of the save
:param published: whether the save is published
:param favorite: whether the save is in favorite
:param views: the count of views
:param version: the version which created the save
:param tags: the tags
"""
self._parent, self.user, self.save_id, self.score, self.my_score, self.name, self.description, \
self.date_created, self.date, self.published, self.favorite, self.views, self.version, \
self.tags = parent, user, save_id, score, my_score, name, description, date_created, date, \
published, favorite, views, version, tags
self._comments = None
self._save_data = None
def tag(self, tag, add=True):
"""
Tag a save.
:param tag: the tag to be added
:param add: if True (the default), the tag is to be added, else removed.
"""
self._parent.tag(self.save_id, tag, add)
if add:
self.tags.append(tag)
else:
self.tags.remove(tag)
def comment(self, comment):
"""
Add a comment to a save.
:param comment: the text of the comment
"""
self._parent.add_comment(self.save_id, comment)
self._comments = None
def vote(self, up=True):
"""
Vote for a save.
:param up: if True (the default), the save is voted up, otherwise down.
"""
self._parent.vote(self.save_id, up)
upvotes, downvotes = self.score
if up:
upvotes += 1
else:
downvotes += 1
self.score = (upvotes, downvotes)
def remove(self):
"""
Remove a save.
"""
self._parent.remove_save(self.save_id)
def publish(self, publish=True):
"""
Publishes or unpublishes a save
:param publish: whether to publish the save
"""
self._parent.publish_save(self.save_id, publish)
self.published = publish
@property
def comments(self):
if not self._comments:
self._comments = self._parent.get_comments(self.save_id)
return self._comments
@property
def save_data(self):
if not self._save_data:
self._save_data = self._parent.get_save_data(self.save_id)
return self._save_data
class ThePowderToy(object):
def vote(self, save_id, up=True):
"""
Vote for a save.
:param save_id: the ID of the save
:param up: if True (the default), the save is voted up, otherwise down.
"""
if not self._user_id:
raise ServerError("Not authorized")
vote_direction = "Up" if up else "Down"
req = requests.post("http://" + self._server + "/Vote.api", data={
"ID": str(save_id),
"Action": vote_direction
}, headers={
"X-Auth-User-Id": self._user_id,
"X-Auth-Session-Key": self._session_id
})
req.raise_for_status()
if not req.text == "OK":
raise ServerError(req.text)
if "Error" in req.text:
raise ServerError(req.text)
def tag(self, save_id, tag, add=True):
"""
Tag a save.
:param save_id: the ID of the save
:param tag: the tag to be added
:param add: if True (the default), the tag is to be added, else removed.
"""
if not self._user_id:
raise ServerError("Not authorized")
operation = "add" if add else "delete"
req = requests.get("http://" + self._server + "/Browse/EditTag.json", params={
"Op": operation,
"ID": str(save_id),
"Tag": tag,
"Key": self._session_key
}, headers={
"X-Auth-User-Id": self._user_id,
"X-Auth-Session-Key": self._session_id
})
req.raise_for_status()
if "Error" in req.text:
raise ServerError(req.text)
def get_comments(self, save_id, comment_start=0, comment_count=-1):
"""
Get a save's comments
:param save_id: The ID of the save
:param comment_start: The starting comment
:param comment_count: The final comment
:return: an array of Comment
"""
comment_req = requests.get("http://" + self._server + "/Browse/Comments.json", params={
"ID": str(save_id),
"Start": comment_start,
"Count": comment_count
}, headers={
"X-Auth-User-Id": self._user_id,
"X-Auth-Session-Key": self._session_id
} if self._user_id else None)
comment_req.raise_for_status()
comments = []
comment_json = comment_req.json()
for comment in comment_json:
comments.append(Comment(comment["Username"], comment["UserID"], comment["Text"], comment["Timestamp"],
comment["Gravatar"] if "Gravatar" in comment else None))
return comments
def search_saves(self, search_query="", start=0, count=-1, sort="", category=""):
"""
Search for saves.
:param search_query: the search query
:param start: the starting result
:param count: count of results
:param sort: sorting of results
:param category: the category name
:return: list of Save objects
"""
if sort == "date":
search_query += " sort:%s" % sort
req = requests.get("http://" + self._server + "/Browse.json", params={
"Search_Query": search_query,
"Category": category,
"Start": start,
"Count": count
}, headers={
"X-Auth-User-Id": self._user_id,
"X-Auth-Session-Key": self._session_id
} if self._user_id else None)
req.raise_for_status()
json = req.json()
if "Status" in json and json["Status"] == 1:
raise ServerError(json["Error"])
output = []
saves = json["Saves"]
for save in saves:
output.append(self.get_save(save["ID"]))
return output
def get_save(self, save_id, date=None):
"""
Get the save.
:param save_id: the ID of the save
:param date: the date of interest
:return: the save
"""
req = requests.get("http://" + self._server + "/Browse/View.json", params={
"ID": str(save_id),
"Date": str(date) if date else None
}, headers={
"X-Auth-User-Id": self._user_id,
"X-Auth-Session-Key": self._session_id
} if self._user_id else None)
req.raise_for_status()
if "Error" in req.text:
raise ServerError(req.text)
json = req.json()
return Save(self, self.get_user_by_name(json["Username"]), save_id, (json["ScoreUp"], json["ScoreDown"]),
json["ScoreMine"] if "ScoreMine" in json else 0, json["Name"], json["Description"],
json["DateCreated"], json["Date"], json["Published"], json["Favourite"], json["Views"],
json["Version"], json["Tags"] if "Tags" in json else [])
def get_user_by_id(self, user_id):
"""
Get a user's profile by ID.
:param user_id: the ID of the user
:return: the User object
"""
req = requests.get("http://" + self._server + "/User.json", params={
"ID": user_id
}, headers={
"X-Auth-User-Id": self._user_id,
"X-Auth-Session-Key": self._session_id
} if self._user_id else None)
req.raise_for_status()
if "Error" in req.text:
raise ServerError(req.text)
json = req.json()
user = json["User"]
saves = json["Saves"] if "Saves" in json else None
forum = json["Forum"] if "Forum" in json else None
return User(user["Username"], user["ID"], user["Avatar"], user["Age"], user["Location"], user["Biography"],
user["Website"], user["RegisterTime"] if "RegisterTime" in user else 0, saves, forum)
def get_user_by_name(self, username):
"""
Get a user's profile by username.
:param username: the name of the user
:return: the User object
"""
req = requests.get("http://" + self._server + "/User.json", params={
"Name": username
}, headers={
"X-Auth-User-Id": self._user_id,
"X-Auth-Session-Key": self._session_id
} if self._user_id else None)
req.raise_for_status()
if "Error" in req.text:
raise ServerError(req.text)
json = req.json()
user = json["User"]
saves = json["Saves"] if "Saves" in json else None
forum = json["Forum"] if "Forum" in json else None
return User(user["Username"], user["ID"], user["Avatar"], user["Age"], user["Location"], user["Biography"],
user["Website"], user["RegisterTime"] if "RegisterTime" in user else 0, saves, forum)
def upload_save(self, name, description, data, publish=True):
"""
Upload a save
:param name: the name of the save
:param description: the description of the save
:param data: the raw data of the save
:param publish: whether to publish the save or not
:return: the uploaded save, downloaded from the server as a Save object
"""
if not self._user_id:
raise ServerError("Not authorized")
if not data:
raise ValueError("Empty save")
req = requests.post("http://" + self._server + "/Save.api", data={
"Name": name,
"Description": description,
"Publish": "Public" if publish else "Private",
}, files=[
("Data", ("save.bin", data))
], headers={
"X-Auth-User-Id": self._user_id,
"X-Auth-Session-Key": self._session_id
})
req.raise_for_status()
return self.get_save(int(req.text[3:].strip()))
def remove_save(self, save_id):
"""
Remove a save.
:param save_id: the ID of the save to be deleted
"""
if not self._user_id:
raise ServerError("Not authorized")
req = requests.get("http://" + self._server + "/Browse/Delete.json", params={
"ID": save_id,
"Mode": "Delete",
"Key": self._session_key
}, headers={
"X-Auth-User-Id": self._user_id,
"X-Auth-Session-Key": self._session_id
})
req.raise_for_status()
status = req.json()["Status"]
if not status:
raise ServerError(req.json()["Error"])
def publish_save(self, save_id, publish=True):
"""
Publishes or unpublishes a save
:param save_id: the ID of the save
:param publish: whether to publish the save
"""
if not self._user_id:
raise ServerError("Not authorized")
if publish:
# We have to do that twice to recheck the status.
req = None
for i in xrange(2):
req = requests.post("http://" + self._server + "/Browse/View.json", params={
"ID": save_id,
"Key": self._session_key
}, data={
"ActionPublish": " "
}, headers={
"X-Auth-User-Id": self._user_id,
"X-Auth-Session-Key": self._session_id
})
req.raise_for_status()
if not req.json()["Published"]:
raise ServerError("Can't publish the save")
else:
req = requests.get("http://" + self._server + "/Browse/Delete.json", params={
"ID": save_id,
"Mode": "Unpublish",
"Key": self._session_key
}, headers={
"X-Auth-User-Id": self._user_id,
"X-Auth-Session-Key": self._session_id
})
req.raise_for_status()
status = req.json()["Status"]
if not status:
raise ServerError(req.json()["Error"])
def add_comment(self, save_id, comment):
"""
Add a comment to a save.
:param save_id: the ID of the save
:param comment: the text of the comment
"""
if not self._user_id:
raise ServerError("Not authorized")
req = requests.post("http://" + self._server + "/Browse/Comments.json", params={
"ID": save_id,
}, data={
"Comment": comment
}, headers={
"X-Auth-User-Id": self._user_id,
"X-Auth-Session-Key": self._session_id
})
if "Error" in req.text:
return ServerError(req.text)
req.raise_for_status()
def get_save_data(self, save_id, timestamp=None):
"""
Get raw save data.
:param save_id: the ID of the save
:param timestamp: the timestamp
:return: raw save data
"""
if timestamp:
url = "http://" + self._static_server + "/" + str(save_id) + "_" + str(timestamp) + ".cps"
else:
url = "http://" + self._static_server + "/" + str(save_id) + ".cps"
req = requests.get(url, headers={
"X-Auth-User-Id": self._user_id,
"X-Auth-Session-Key": self._session_id
} if self._user_id else None, stream=True)
req.raise_for_status()
return req.raw.data
def _authenticate(self):
user_hash = hashlib.md5(self._username + "-" + hashlib.md5(self._password).hexdigest()).hexdigest()
req = requests.post("http://" + self._server + "/Login.json", data={
"Username": self._username,
"Hash": user_hash
})
req.raise_for_status()
json = req.json()
if json["Status"] == 0:
if "Error" in json:
raise LoginError(json["Error"])
else:
raise LoginError
user_elevation = json["Elevation"]
if user_elevation == "Admin":
self._user_elevation = USER_ELEVATION_ADMIN
elif user_elevation == "Mod":
self._user_elevation = USER_ELEVATION_MOD
else:
self._user_elevation = USER_ELEVATION_NONE
self._user_id = json["UserID"]
self._notifications = [Notification(notification["Link"], notification["Text"])
for notification in json["Notifications"]]
self._session_id = json["SessionID"]
self._session_key = json["SessionKey"]
def _check_updates(self):
req = requests.get("http://" + self._server + "/Startup.json", auth=(self._user_id, self._session_id))
req.raise_for_status()
json = req.json()
self._latest_version = json["Updates"][self._update_version]
self._message_of_the_day = json["MessageOfTheDay"]
def get_latest_version(self):
"""
Get the latest version as a dictionary
:return: the latest version with description
"""
return self._latest_version
def get_message_of_the_day(self):
"""
Get the message of the day
:return: the message of the day
"""
return self._message_of_the_day
def __init__(self, credentials=(None, None), server=DEFAULT_SERVER, static_server=DEFAULT_STATIC_SERVER,
update_version=UPDATE_VERSION_STABLE):
"""
Creates a ThePowderToy object.
:param credentials: a tuple of username and password
:param server: the URL of the main server
:param static_server: the URL of the static server
"""
self._user_id = None
self._session_id = None
self._session_key = None
self._username = None
self._user_elevation = None
self._server, self._static_server, self._update_version = server, static_server, update_version
self._username, self._password = credentials
if self._username:
self._authenticate()
self._check_updates()
| gpl-3.0 | 3,383,186,490,061,603,000 | 30.49661 | 115 | 0.544745 | false |
pescobar/easybuild-easyblocks | easybuild/easyblocks/l/libqglviewer.py | 1 | 2297 | ##
# Copyright 2009-2019 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing libQGLViewer, implemented as an easyblock
@author: Javier Antonio Ruiz Bosch (Central University "Marta Abreu" of Las Villas, Cuba)
"""
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.run import run_cmd
from easybuild.tools.systemtools import get_shared_lib_ext
class EB_libQGLViewer(ConfigureMake):
"""Support for building/installing libQGLViewer."""
def configure_step(self):
"""Custom configuration procedure for libQGLViewer: qmake PREFIX=/install/path ..."""
cmd = "%(preconfigopts)s qmake PREFIX=%(installdir)s %(configopts)s" % {
'preconfigopts': self.cfg['preconfigopts'],
'installdir': self.installdir,
'configopts': self.cfg['configopts'],
}
run_cmd(cmd, log_all=True, simple=True)
def sanity_check_step(self):
"""Custom sanity check for libQGLViewer."""
shlib_ext = get_shared_lib_ext()
custom_paths = {
'files': [('lib/libQGLViewer.prl', 'lib64/libQGLViewer.prl'),
('lib/libQGLViewer.%s' % shlib_ext, 'lib64/libQGLViewer.%s' % shlib_ext)],
'dirs': ['include/QGLViewer'],
}
| gpl-2.0 | -8,200,181,824,849,117,000 | 40.017857 | 96 | 0.696561 | false |
jhallock7/SparseBayes-Python | SB2_ParameterSettings.py | 1 | 3373 |
# The following is a Python translation of a MATLAB file originally written principally by Mike Tipping
# as part of his SparseBayes software library. Initially published on GitHub on July 21st, 2015.
# SB2_PARAMETERSETTINGS User parameter initialisation for SPARSEBAYES
#
# SETTINGS = SB2_PARAMETERSETTINGS(parameter1, value1, parameter2, value2,...)
#
# OUTPUT ARGUMENTS:
#
# SETTINGS An initialisation structure to pass to SPARSEBAYES
#
# INPUT ARGUMENTS:
#
# Optional number of parameter-value pairs to specify some, all, or
# none of the following:
#
# BETA (Gaussian) noise precision (inverse variance)
# NOISESTD (Gaussian) noise standard deviation
# RELEVANT Indices of columns of basis matrix to use at start-up
# MU (WEIGHTS) Corresponding vector of weights to RELEVANT
# ALPHA Corresponding vector of hyperparameter values to RELEVANT
#
# EXAMPLE:
#
# SETTINGS = SB2_ParameterSettings('NoiseStd',0.1)
#
# NOTES:
#
# 1. If no input arguments are supplied, defaults (effectively an
# empty structure) will be returned.
#
# 2. If both BETA and NOISESTD are specified, BETA will take
# precedence.
#
# 3. RELEVANT may be specified without WEIGHTS or ALPHA (these will be
# sensibly initialised later).
#
# 4. If RELEVANT is specified, WEIGHTS may be specified also without ALPHA.
#
#
# Copyright 2009, Vector Anomaly Ltd
#
# This file is part of the SPARSEBAYES library for Matlab (V2.0).
#
# SPARSEBAYES is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# SPARSEBAYES is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with SPARSEBAYES in the accompanying file "licence.txt"; if not, write to
# the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
# MA 02110-1301 USA
#
# Contact the author: m a i l [at] m i k e t i p p i n g . c o m
#
def SB2_ParameterSettings(*args):
# Ensure arguments are supplied in pairs
if len(args) % 2 != 0:
raise Exception('Arguments to SB2_ParameterSettings should be (property, value) pairs')
# Any settings specified?
numSettings = len(args)/2
## Defaults - over-ridden later if requested
# Two options for setting noise level (purely for convenience)
# - if 'beta' set, 'noiseStdDev' will be over-ridden
SETTINGS = {
'BETA' : [],
'NOISESTD' : [],
'RELEVANT' : [],
'MU' : [],
'ALPHA' : []
}
## Requested overrides
# Parse string/variable pairs
for n in range(numSettings):
property_ = args[n*2]
value = args[n*2 + 1]
if property_ not in SETTINGS:
raise Exception('Unrecognised initialisation property: {0}'.format(property_))
else:
SETTINGS[property_] = value
return SETTINGS
| gpl-2.0 | -5,058,282,655,556,856,000 | 31.432692 | 103 | 0.660243 | false |
nickwolensky/mayaunittest | testcode_inspect_ui.py | 1 | 4789 | """User interface to help test maya tool code. Will work from within Maya UI as
well as standalone and can be run from an external interpreter such as mayapy.
Example::
Todo:
"""
import os
import sys
from nw_tools.Qt import QtWidgets, QtGui, QtCore
from nw_tools.ui.tools import get_maya_window, SuperWindow
import runmayatests
class Tree(QtWidgets.QTreeView):
def __init__(self, parent):
super(Tree, self).__init__(parent)
self.parent = parent
self.create_actions()
def create_actions(self):
self.walk_up_action = QtWidgets.QAction('Walk up directory', self)
self.walk_up_action.triggered.connect(self.parent.walk_up)
def mouseDoubleClickEvent(self, event):
self.parent.walk_down()
def contextMenuEvent(self, event):
menu = QtWidgets.QMenu(self)
menu.addAction(self.walk_up_action)
menu.exec_(event.globalPos())
class TestCodeUI(SuperWindow):
# Class constants
TITLE = 'Maya Unittest Pro - Nick Wolensky, 2017'
WIDTH = 800
HEIGHT = 400
# Should only be applicable if application is run from within Maya
DOCKABLE = True
def __init__(self, parent=None):
self.current_dir = QtCore.QDir.rootPath()
super(TestCodeUI, self).__init__(parent)
def _init_ui(self):
SuperWindow._init_ui(self)
self._add_widgets()
self._add_signals()
def add_toolbar_items(self):
SuperWindow.add_toolbar_items(self)
self.open_dir_action = QtWidgets.QAction('Open...', self)
self.run_action = QtWidgets.QAction('Run...', self)
self.stop_action = QtWidgets.QAction('Stop', self)
for action in [self.open_dir_action,
self.run_action,
self.stop_action]:
self.toolbar.addAction(action)
def _add_widgets(self):
split = QtWidgets.QSplitter(self.centralWidget())
split.resize(self.WIDTH, self.HEIGHT)
split.setHandleWidth(3)
split.setContentsMargins(2, 2, 2, 2)
btn_widg = QtWidgets.QWidget()
vbox = QtWidgets.QVBoxLayout()
btn_row = QtWidgets.QHBoxLayout()
btn_row.setContentsMargins(0, 0, 0, 0)
btn_widg.setLayout(vbox)
vbox.setContentsMargins(0, 0, 2, 0)
# Create a tree outliner for package that I want to recursively go
# through and test | Left Panel
self.model = QtWidgets.QFileSystemModel()
self.model.setRootPath(self.current_dir)
self.tree = Tree(self)
self.tree.setModel(self.model)
self.tree.hideColumn(1)
self.tree.hideColumn(2)
self.tree.hideColumn(3)
self._update_system_tree(self.current_dir)
# Create output section that tests get printed out to | Right Panel
text_display = QtWidgets.QLineEdit('This is going to be the place '
'where I display output from the '
'unittests')
text_display.setReadOnly(True)
text_display.setAlignment(QtCore.Qt.AlignTop)
self.btn = QtWidgets.QPushButton('^')
self.btn.setFixedWidth(25)
btn_row.addWidget(self.btn)
btn_row.addStretch(0)
vbox.addLayout(btn_row)
vbox.addWidget(self.tree)
split.addWidget(btn_widg)
split.addWidget(text_display)
split.setStretchFactor(0, 0)
split.setStretchFactor(1, 2)
def _add_signals(self):
# Open command
# self.open_dir_action.triggered.connect(self.update_system_tree)
# Run command
def run_tests():
runmayatests.main(test_dir=[self.get_selected_dir()])
self.run_action.triggered.connect(run_tests)
# Stop command
# self.stop_action.triggered.connect(self.dir_up)
# Move up button
self.btn.clicked.connect(self.walk_up)
def get_selected_dir(self):
index = self.tree.currentIndex()
path = self.model.filePath(index)
print path
return path
def _update_system_tree(self, directory):
self.tree.setRootIndex(self.model.index(directory))
self.current_dir = directory
def walk_up(self):
par_dir = os.path.abspath(os.path.join(self.current_dir, os.pardir))
self._update_system_tree(par_dir)
def walk_down(self):
self._update_system_tree(self.get_selected_dir())
def open_file_dialog(self):
pass
def dragEnterEvent(self, event):
pass
if __name__ == '__main__':
# Create the Qt Application
app = QtWidgets.QApplication(sys.argv)
# Build the UI window. Must keep a reference to the window class or else it
# goes out of scope
ui = TestCodeUI()
sys.exit(app.exec_())
| mit | 5,874,005,164,232,687,000 | 29.698718 | 79 | 0.622886 | false |
valohai/minique | minique/encoding.py | 1 | 1624 | import json
from typing import Union, Any
registry = {}
default_encoding_name = None
def register_encoding(name, *, default=False):
def decorator(cls):
global default_encoding_name
registry[name] = cls
if default:
default_encoding_name = name
return cls
return decorator
class BaseEncoding:
def encode(self, value: Any, failsafe: bool = False) -> Union[str, bytes]:
"""
Encode a value to a string or bytes.
:param failsafe: When set, hint that the encoder should try hard not to fail,
even if it requires loss of fidelity.
"""
raise NotImplementedError("Encoding not implemented")
def decode(self, value: Union[str, bytes]) -> Any:
raise NotImplementedError("Decoding not implemented")
@register_encoding("json", default=True)
class JSONEncoding(BaseEncoding):
"""
Default (JSON) encoding for kwargs and results.
"""
# These can be effortlessly overridden in subclasses
dump_kwargs = {
"ensure_ascii": False,
"separators": (",", ":"),
}
load_kwargs = {}
failsafe_default = str
def encode(self, value: Any, failsafe: bool = False) -> Union[str, bytes]:
kwargs = self.dump_kwargs.copy()
if failsafe:
kwargs["default"] = self.failsafe_default
return json.dumps(
value,
**kwargs,
)
def decode(self, value: Union[str, bytes]) -> Any:
if isinstance(value, bytes):
value = value.decode()
return json.loads(value, **self.load_kwargs)
| mit | 2,236,752,375,533,370,600 | 26.525424 | 85 | 0.602217 | false |
edx/i18n-tools | i18n/main.py | 1 | 1437 | #!/usr/bin/env python
"""
Main function for internationalization tools.
"""
import importlib
import sys
from path import Path
def get_valid_commands():
"""
Returns valid commands.
Returns:
commands (list): List of valid commands
"""
modules = [m.basename().split('.')[0] for m in Path(__file__).dirname().files('*.py')]
commands = []
for modname in modules:
if modname == 'main':
continue
mod = importlib.import_module('i18n.%s' % modname)
if hasattr(mod, 'main'):
commands.append(modname)
return commands
def error_message():
"""
Writes out error message specifying the valid commands.
Returns:
Failure code for system exit
"""
sys.stderr.write('valid commands:\n')
for cmd in get_valid_commands():
sys.stderr.write('\t%s\n' % cmd)
return -1
def main():
"""
Executes the given command. Returns error_message if command is not valid.
Returns:
Output of the given command or error message if command is not valid.
"""
try:
command = sys.argv[1]
except IndexError:
return error_message()
try:
module = importlib.import_module('i18n.%s' % command)
module.main.args = sys.argv[2:]
except (ImportError, AttributeError):
return error_message()
return module.main()
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 | 5,347,723,365,741,315,000 | 21.453125 | 90 | 0.601253 | false |
OpenEdition/bilbo | src/bilbo/reference/Word.py | 1 | 4264 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""
Created on April 18, 2012
@author: Young-Min Kim, Jade Tavernier
"""
from bilbo.reference.Balise import Balise
from bilbo.reference.Feature import Feature
class Word(object):
"""
A class corresponding to a word in a reference. It contains word name, features, tags, etc.
Word object is first created in CleanCorpus1 and CleanCorpus2.
"""
def __init__(self, mot, tags=[], features=[]):
"""
nom : word name
tag : list of Balise objects
feature : list of Feature objects
item : indicator of sub-reference (0 : no, 1 : yes)
"""
'Generate Tag objects'
self.nom = mot
self.core = mot
self.tag = []
self.feature = []
self.ignoreWord = 0
'item is an indicator showing if the word is in a sub reference'
self.item = 0
if type(mot) is str:
print mot
for tag in tags:
'Eliminate the spaces at the beginning and ending'
tag.lstrip()
tag.rstrip()
if tag != "" and self.getTag(tag) == -1:
self.tag.append(Balise(tag))
'Generate Feature objects'
for feature in features:
'Eliminate the spaces at the beginning and ending'
feature.lstrip()
feature.rstrip()
if feature != "" and self.getFeature(feature) == -1:
self.feature.append(Feature(feature))
def affiche(self):
print "\nWord : ",self.nom.encode('utf8'), self.core.encode('utf8')
if len(self.tag) >= 1:
print "\tTAG :"
for key in self.tag:
key.affiche()
if len(self.feature) >= 1:
print "\tFEATURE :"
for key in self.feature:
key.affiche()
def addFeature(self,feature):
if isinstance(feature, list):
for carac in feature:
'Eliminate the spaces at the beginning and ending'
carac.lstrip()
carac.rstrip()
if carac != "" and self.getFeature(carac) == -1:
self.feature.append(Feature(carac))
else:
'Eliminate the spaces at the beginning and ending'
feature.lstrip()
feature.rstrip()
if self.getFeature(feature) == -1:
self.feature.append(Feature(feature))
def addTag(self,tag):
if isinstance(tag, list):
for bal in tag:
'Eliminate the spaces at the beginning and ending'
bal.lstrip()
bal.rstrip()
if bal != "" and self.getTag(bal) == -1:
self.tag.append(Balise(bal))
else:
'Eliminate the spaces at the beginning and ending'
tag.lstrip()
tag.rstrip()
if self.getTag(tag) == -1:
self.tag.append(Balise(tag))
def delFeature(self,feature):
ref = self.getFeature(feature)
if ref != -1:
self.feature.remove(ref)
return -1
def delTag(self,tag):
ref = self.getTag(tag)
if ref != -1:
self.tag.remove(ref)
return -1
def delAllFeature(self):
del(self.feature[:])
def delAllTag(self):
del(self.tag[:])
def getFeature(self,feature):
for carac in self.feature:
if carac.nameIs(feature) == 1:
return carac
return -1
def getTag(self,tag):
for bal in self.tag:
if bal.nameIs(tag) == 1:
return bal
return -1
def listNomFeature(self):
carac = []
for key in self.feature:
carac.append(key.nom)
return carac
def listNomTag(self):
bal = []
for key in self.tag:
bal.append(key.nom)
return bal
def getLastFeature(self):
"""
Return the last feature
"""
if len(self.feature) == 0:
return -1
return self.feature[len(self.feature)-1]
def getLastTag(self):
"""
Return the last tag
"""
if len(self.tag) == 0:
return -1
if self.tag[len(self.tag)-1].nom == 'hi' and len(self.tag) > 1:
return self.tag[len(self.tag)-2]
return self.tag[len(self.tag)-1]
def getFeatureIndice(self, index):
"""
Return the feature at the index
"""
if index < 0: return -1
return self.feature[index]
def getTagIndice(self, index):
"""
Return the tag at the index
"""
if index < 0: return -1
return self.tag[index]
def getAllFeature(self):
"""
Return all the features
"""
return self.feature
def getAllTag(self):
"""
Return all the tags
"""
return self.tag
def nbTag(self):
"""
Return the number of tags
"""
return len(self.tag)
def nbFeatures(self):
"""
Return the number of features
"""
return len(self.feature)
def __getattr__(self, nom):
print("Alert ! There is no attribute {0} here !".format(nom))
| gpl-2.0 | 7,075,776,964,256,177,000 | 19.113208 | 92 | 0.645403 | false |
fluxer/spm | nuitka/nuitka/containers/odict.py | 1 | 6241 | # :copyright: (c) 2008 by Armin Ronacher and PEP 273 authors.
# :license: modified BSD license.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Kay Hayen did some changes for Nuitka, and put everything he added under the same
# modified BSD license.
""" This module is only an abstraction of OrderedDict as present in 2.7 and 3.x.
It is not in 2.6, for this version we are using the odict.py as mentioned in the
PEP-0372.
This can be removed safely after the transition, note that the documentation was
removed, as it's not interesting really, being redundant to the Python 2.7
documentation. """
# pylint: disable=E0611,W0141
try:
from collections import OrderedDict
except ImportError:
from itertools import izip, imap
from copy import deepcopy
missing = object()
class OrderedDict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self)
self._keys = []
self.update(*args, **kwargs)
def __delitem__(self, key):
dict.__delitem__(self, key)
self._keys.remove(key)
def __setitem__(self, key, item):
if key not in self:
self._keys.append(key)
dict.__setitem__(self, key, item)
def __deepcopy__(self, memo = None):
if memo is None:
memo = {}
d = memo.get(id(self), missing)
if d is not missing:
return d
memo[id(self)] = d = self.__class__()
dict.__init__(d, deepcopy(self.items(), memo))
d._keys = self._keys[:]
return d
def __getstate__(self):
return {"items": dict(self), "keys": self._keys}
def __setstate__(self, d):
self._keys = d["keys"]
dict.update(d["items"])
def __reversed__(self):
return reversed(self._keys)
def __eq__(self, other):
if isinstance(other, OrderedDict):
if not dict.__eq__(self, other):
return False
return self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self.__eq__(other)
def __cmp__(self, other):
if isinstance(other, OrderedDict):
return cmp(self.items(), other.items())
elif isinstance(other, dict):
return dict.__cmp__(self, other)
return NotImplemented
@classmethod
def fromkeys(cls, iterable, default = None):
return cls((key, default) for key in iterable)
def clear(self):
del self._keys[:]
dict.clear(self)
def copy(self):
return self.__class__(self)
def items(self):
return zip(self._keys, self.values())
def iteritems(self):
return izip(self._keys, self.itervalues())
def keys(self):
return self._keys[:]
def iterkeys(self):
return iter(self._keys)
def pop(self, key, default = missing):
if default is missing:
return dict.pop(self, key)
elif key not in self:
return default
self._keys.remove(key)
return dict.pop(self, key, default)
def popitem(self, key):
self._keys.remove(key)
return dict.popitem(key)
def setdefault(self, key, default = None):
if key not in self:
self._keys.append(key)
dict.setdefault(self, key, default)
def update(self, *args, **kwargs):
sources = []
if len(args) == 1:
if hasattr(args[0], "iteritems"):
sources.append(args[0].iteritems())
else:
sources.append(iter(args[0]))
elif args:
raise TypeError("expected at most one positional argument")
if kwargs:
sources.append(kwargs.iteritems())
for iterable in sources:
for key, val in iterable:
self[key] = val
def values(self):
return map(self.get, self._keys)
def itervalues(self):
return imap(self.get, self._keys)
def index(self, item):
return self._keys.index(item)
def byindex(self, item):
key = self._keys[item]
return (key, dict.__getitem__(self, key))
def reverse(self):
self._keys.reverse()
def sort(self, *args, **kwargs):
self._keys.sort(*args, **kwargs)
def __repr__(self):
return "OrderedDict(%r)" % self.items()
__copy__ = copy
__iter__ = iterkeys
| gpl-2.0 | -8,327,234,843,170,627,000 | 33.480663 | 83 | 0.579875 | false |
Tiimber/terminal-notification | growl_notifier.py | 1 | 1544 | try:
import gntp.notifier
except ImportError:
pass
class GrowlNotifier():
growl = None
@staticmethod
def register():
if GrowlNotifier.growl is None:
GrowlNotifier.growl = gntp.notifier.GrowlNotifier(
applicationName='Terminal Notification',
notifications=['Message'],
defaultNotifications=['Message'],
)
growl_register = GrowlNotifier.growl.register()
if not growl_register:
GrowlNotifier.growl = None
return GrowlNotifier.growl is not None
@staticmethod
def notify_obj(notify_object):
if GrowlNotifier.growl is not None:
title = str(notify_object['title']) if 'title' in notify_object else None
subtitle = str(notify_object['subtitle']) if 'subtitle' in notify_object else None
if title is not None and subtitle is not None:
title = title + ' / ' + subtitle
elif subtitle is not None:
title = subtitle
message = str(notify_object['message']) if 'message' in notify_object else None
return GrowlNotifier.notify(title=title, message=message)
else:
return False
@staticmethod
def notify(title=None, message=None):
notify_success = GrowlNotifier.growl.notify(
noteType='Message',
title=title,
description=message,
sticky=False,
priority=1
)
return notify_success | gpl-2.0 | 2,967,684,299,168,456,000 | 32.586957 | 94 | 0.588731 | false |
lyubent/CassTor | cassandra/pylib/cqlshlib/formatting.py | 1 | 8617 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import time
from collections import defaultdict
from . import wcwidth
from .displaying import colorme, FormattedValue, DEFAULT_VALUE_COLORS
from cql import cqltypes
unicode_controlchars_re = re.compile(r'[\x00-\x31\x7f-\xa0]')
controlchars_re = re.compile(r'[\x00-\x31\x7f-\xff]')
def _show_control_chars(match):
txt = repr(match.group(0))
if txt.startswith('u'):
txt = txt[2:-1]
else:
txt = txt[1:-1]
return txt
bits_to_turn_red_re = re.compile(r'\\([^uUx]|u[0-9a-fA-F]{4}|x[0-9a-fA-F]{2}|U[0-9a-fA-F]{8})')
def _make_turn_bits_red_f(color1, color2):
def _turn_bits_red(match):
txt = match.group(0)
if txt == '\\\\':
return '\\'
return color1 + txt + color2
return _turn_bits_red
default_null_placeholder = 'null'
default_time_format = ''
default_float_precision = 3
default_colormap = DEFAULT_VALUE_COLORS
empty_colormap = defaultdict(lambda: '')
def format_by_type(cqltype, val, encoding, colormap=None, addcolor=False,
nullval=None, time_format=None, float_precision=None):
if nullval is None:
nullval = default_null_placeholder
if val is None:
return colorme(nullval, colormap, 'error')
if addcolor is False:
colormap = empty_colormap
elif colormap is None:
colormap = default_colormap
if time_format is None:
time_format = default_time_format
if float_precision is None:
float_precision = default_float_precision
return format_value(cqltype, val, encoding=encoding, colormap=colormap,
time_format=time_format, float_precision=float_precision,
nullval=nullval)
def format_value_default(val, colormap, **_):
val = str(val)
escapedval = val.replace('\\', '\\\\')
bval = controlchars_re.sub(_show_control_chars, escapedval)
tbr = _make_turn_bits_red_f(colormap['hex'], colormap['text'])
coloredval = colormap['text'] + bits_to_turn_red_re.sub(tbr, bval) + colormap['reset']
return FormattedValue(bval, coloredval)
# Mapping cql type base names ("int", "map", etc) to formatter functions,
# making format_value a generic function
_formatters = {}
def format_value(cqltype, val, **kwargs):
formatter = _formatters.get(cqltype.typename, format_value_default)
return formatter(val, subtypes=cqltype.subtypes, **kwargs)
def formatter_for(typname):
def registrator(f):
_formatters[typname] = f
return f
return registrator
@formatter_for('blob')
def format_value_blob(val, colormap, **_):
bval = ''.join('%02x' % ord(c) for c in val)
return colorme(bval, colormap, 'hex')
def format_python_formatted_type(val, colormap, color):
bval = str(val)
return colorme(bval, colormap, color)
@formatter_for('decimal')
def format_value_decimal(val, colormap, **_):
return format_python_formatted_type(val, colormap, 'decimal')
@formatter_for('uuid')
def format_value_uuid(val, colormap, **_):
return format_python_formatted_type(val, colormap, 'uuid')
@formatter_for('inet')
def formatter_value_inet(val, colormap, **_):
return format_python_formatted_type(val, colormap, 'inet')
@formatter_for('boolean')
def format_value_boolean(val, colormap, **_):
return format_python_formatted_type(val, colormap, 'boolean')
def format_floating_point_type(val, colormap, float_precision, **_):
bval = '%.*g' % (float_precision, val)
return colorme(bval, colormap, 'float')
formatter_for('float')(format_floating_point_type)
formatter_for('double')(format_floating_point_type)
def format_integer_type(val, colormap, **_):
# base-10 only for now; support others?
bval = str(val)
return colorme(bval, colormap, 'int')
formatter_for('bigint')(format_integer_type)
formatter_for('int')(format_integer_type)
formatter_for('varint')(format_integer_type)
formatter_for('counter')(format_integer_type)
@formatter_for('timestamp')
def format_value_timestamp(val, colormap, time_format, **_):
bval = strftime(time_format, val)
return colorme(bval, colormap, 'timestamp')
@formatter_for('timeuuid')
def format_value_timeuuid(val, colormap, time_format, **_):
utime = cqltypes.unix_time_from_uuid1(val)
bval = strftime(time_format, utime)
return colorme(bval, colormap, 'timestamp')
def strftime(time_format, seconds):
local = time.localtime(seconds)
formatted = time.strftime(time_format, local)
if local.tm_isdst != 0:
offset = -time.altzone
else:
offset = -time.timezone
if formatted[-4] != '0000' or time_format[-2] != '%z' or offset == 0:
return formatted
# deal with %z on platforms where it isn't supported. see CASSANDRA-4746.
if offset < 0:
sign = '-'
else:
sign = '+'
hours, minutes = divmod(abs(offset) / 60, 60)
return formatted[:-5] + sign + '{0:0=2}{1:0=2}'.format(hours, minutes)
@formatter_for('text')
def format_value_text(val, encoding, colormap, **_):
escapedval = val.replace(u'\\', u'\\\\')
escapedval = unicode_controlchars_re.sub(_show_control_chars, escapedval)
bval = escapedval.encode(encoding, 'backslashreplace')
displaywidth = wcwidth.wcswidth(bval.decode(encoding))
tbr = _make_turn_bits_red_f(colormap['hex'], colormap['text'])
coloredval = colormap['text'] + bits_to_turn_red_re.sub(tbr, bval) + colormap['reset']
return FormattedValue(bval, coloredval)
# name alias
formatter_for('varchar')(format_value_text)
def format_simple_collection(subtype, val, lbracket, rbracket, encoding,
colormap, time_format, float_precision, nullval):
subs = [format_value(subtype, sval, encoding=encoding, colormap=colormap,
time_format=time_format, float_precision=float_precision,
nullval=nullval)
for sval in val]
bval = lbracket + ', '.join(sval.strval for sval in subs) + rbracket
lb, sep, rb = [colormap['collection'] + s + colormap['reset']
for s in (lbracket, ', ', rbracket)]
coloredval = lb + sep.join(sval.coloredval for sval in subs) + rb
displaywidth = 2 * len(subs) + sum(sval.displaywidth for sval in subs)
return FormattedValue(bval, coloredval, displaywidth)
@formatter_for('list')
def format_value_list(val, encoding, colormap, time_format, float_precision, subtypes, nullval, **_):
return format_simple_collection(subtypes[0], val, '[', ']', encoding, colormap,
time_format, float_precision, nullval)
@formatter_for('set')
def format_value_set(val, encoding, colormap, time_format, float_precision, subtypes, nullval, **_):
return format_simple_collection(subtypes[0], val, '{', '}', encoding, colormap,
time_format, float_precision, nullval)
@formatter_for('map')
def format_value_map(val, encoding, colormap, time_format, float_precision, subtypes, nullval, **_):
def subformat(v, subtype):
return format_value(subtype, v, encoding=encoding, colormap=colormap,
time_format=time_format, float_precision=float_precision,
nullval=nullval)
subkeytype, subvaltype = subtypes
subs = [(subformat(k, subkeytype), subformat(v, subvaltype)) for (k, v) in val.items()]
bval = '{' + ', '.join(k.strval + ': ' + v.strval for (k, v) in subs) + '}'
lb, comma, colon, rb = [colormap['collection'] + s + colormap['reset']
for s in ('{', ', ', ': ', '}')]
coloredval = lb \
+ comma.join(k.coloredval + colon + v.coloredval for (k, v) in subs) \
+ rb
displaywidth = 4 * len(subs) + sum(k.displaywidth + v.displaywidth for (k, v) in subs)
return FormattedValue(bval, coloredval, displaywidth)
| mit | -2,934,228,930,725,762,600 | 39.455399 | 101 | 0.659278 | false |
genialis/resolwe-bio | resolwe_bio/tools/demultiplex.py | 1 | 11588 | #!/usr/bin/env python3
# XXX: Refactor to a comand line tool and remove pylint disable
"""NGS reads demultiplexer."""
import argparse
import gzip
import json
import os
import subprocess
import sys
from resolwe_runtime_utils import error, export_file, progress, run, save, send_message
from six import iteritems
parser = argparse.ArgumentParser(description="NGS reads demultiplexer.")
parser.add_argument("barcodes", help="barcodes file")
parser.add_argument(
"s", metavar="READS", nargs="?", help="file containing unpaired reads"
)
parser.add_argument("-1", metavar="READS-1", help="file containing upstream mates")
parser.add_argument("-2", metavar="READS-2", help="file containing downstream mates")
parser.add_argument("-m", "--mapping", help="barcode mapping file")
parser.add_argument(
"--progress-start", type=float, default=0.0, help="initial progress"
)
args = parser.parse_args()
if not (args.s or (args.__dict__["1"] and args.__dict__["2"])) or (
args.s and args.__dict__["1"] and args.__dict__["2"]
):
sys.stderr.write("Give either unpaired reads or both paired read mates.")
print()
exit(1)
if args.s:
reads1 = args.s
reads2 = ""
else:
reads1 = args.__dict__["1"]
reads2 = args.__dict__["2"]
if not os.path.isfile(reads2):
sys.stderr.write("Reads file {} not found.".format(reads2))
print()
exit(1)
if not os.path.isfile(reads1):
sys.stderr.write("Reads file {} not found.".format(reads1))
print()
exit(1)
if not os.path.isfile(args.barcodes):
sys.stderr.write("Barcodes file {} not found.".format(args.barcodes))
print()
exit(1)
if args.mapping and not os.path.isfile(args.mapping):
sys.stderr.write("Barcode mapping file {} not found.".format(args.mapping))
print()
exit(1)
pool_maps = {}
def isnum(number):
"""Check if number."""
try:
int(number)
return True
except ValueError:
return False
barcode_length = 0
if args.mapping:
with open(args.mapping, encoding="utf-8") as fd:
for line in fd:
line = line.rstrip()
if not line:
continue
t = line.split("\t")
barcode, filename = "", ""
if len(t) == 2:
barcode, filename = t[0:2]
if len(t) > 2 and isnum(t[0]):
barcode, filename = t[1:3]
barcode, filename = barcode.strip(), filename.strip()
if barcode and filename:
pool_maps[barcode] = filename
if barcode_length > 0 and barcode_length != len(barcode):
send_message(error("Barcodes should be of the same length."))
exit(1)
else:
barcode_length = len(barcode)
for bar, _map in iteritems(pool_maps):
print("{}: {}".format(bar, _map))
def read_multiplexed(
reads1_file, reads2_file, barcodes_file, pool_maps, progress_start
):
"""Parse multiplexed file."""
pool_name = reads1_file.split(".")[0]
def nicename(a):
return a.replace("#", "").replace(" ", " ").replace("/", " ").replace(" ", "_")
files, f1, f2, fbar = {}, None, None, None
try:
barcodes = set(pool_maps.keys())
print("BARCODES: {}".format(barcodes))
for barcode in barcodes:
name = nicename(pool_maps[barcode])
if reads2_file:
filename = "{}_{}_{}_mate1.fq.gz".format(pool_name, name, barcode)
files[barcode] = gzip.open(filename, "wb")
filename = "{}_{}_{}_mate2.fq.gz".format(pool_name, name, barcode)
files[barcode + "2"] = gzip.open(filename, "wb")
else:
filename = "{}_{}_{}.fq.gz".format(pool_name, name, barcode)
files[barcode] = gzip.open(filename, "wb")
if reads2_file:
files["notmatched"] = gzip.open(
"Not_Matched_{}_mate1.fq.gz".format(pool_name), "wb"
)
files["badquality"] = gzip.open(
"Bad_Quality_{}_mate1.fq.gz".format(pool_name), "wb"
)
files["notmatched2"] = gzip.open(
"Not_Matched_{}_mate2.fq.gz".format(pool_name), "wb"
)
files["badquality2"] = gzip.open(
"Bad_Quality_{}_mate2.fq.gz".format(pool_name), "wb"
)
else:
files["notmatched"] = gzip.open(
"Not_Matched_{}.fq.gz".format(pool_name), "wb"
)
files["badquality"] = gzip.open(
"Bad_Quality_{}.fq.gz".format(pool_name), "wb"
)
filenames = list(sorted(set(f.name for f in files.values())))
p = subprocess.Popen(
"gzip -dc {} | wc -l".format(barcodes_file),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
numlines, err = p.communicate()
if err:
raise Exception(err)
numlines = int(numlines)
readid, matched, notmatched, badquality, skipped = 0, 0, 0, 0, 0
send_message(progress(progress_start))
_progress = progress_start
progress_step = (0.9 - _progress) / 20.0
progress_span = numlines / 20
def save_results(matched, notmatched, badquality, skipped, total, _progress):
total = float(total)
send_message(
save(
"matched",
"{:,} reads ({:.2f} %)".format(matched, 100 * matched / total),
)
)
send_message(
save(
"notmatched",
"{:,} reads ({:.2f} %)".format(
notmatched, 100 * notmatched / total
),
)
)
send_message(
save(
"badquality",
"{:,} reads ({:.2f} %)".format(
badquality, 100 * badquality / total
),
)
)
send_message(
save(
"skipped",
"{:,} reads ({:.2f} %)".format(skipped, 100 * skipped / total),
)
)
send_message(progress(_progress))
f1 = gzip.GzipFile(reads1_file, "r")
fbar = gzip.GzipFile(barcodes_file, "r")
if reads2_file:
f2 = gzip.GzipFile(reads2_file, "r")
while True:
readid += 1
r1 = f1.readline()
if not r1:
break
r1 = r1.decode("utf-8").rstrip("\r").rstrip("\n").split("\t")
if len(r1) != 11:
print("SKIPPED: error in {} line in r1".format(readid))
continue
s1 = r1[-3].replace(".", "N")
p1 = r1[-1]
rbar = fbar.readline()
if not rbar:
break
rbar = rbar.decode("utf-8").rstrip("\r").rstrip("\n").split("\t")
if len(rbar) != 11:
print("SKIPPED: error in {} line in rbar".format(readid))
continue
sbar = rbar[-3].replace(".", "N")[:barcode_length]
pbar = rbar[-1]
if reads2_file:
r2 = f2.readline()
if not r2:
break
r2 = r2.decode("utf-8").rstrip("\r").rstrip("\n").split("\t")
if len(r2) != 11:
print("SKIPPED: error in {} line in r2".format(readid))
continue
s2 = r2[-3].replace(".", "N")
p2 = r2[-1]
else:
r2 = r1
p2 = p1
if r1[:7] == r2[:7] == rbar[:7] and p1 == p2 == pbar:
idline = "@" + ":".join(r1[:7]) + " " + sbar
if p1 == "1" and p2 == "1":
if sbar in barcodes:
files[sbar].write(
(
idline + "\n" + s1 + "\n" + "+" + "\n" + r1[-2] + "\n"
).encode("utf-8")
)
if reads2_file:
files[sbar + "2"].write(
(
idline
+ "\n"
+ s2
+ "\n"
+ "+"
+ "\n"
+ r2[-2]
+ "\n"
).encode("utf-8")
)
matched += 1
else:
files["notmatched"].write(
(
idline + "\n" + s1 + "\n" + "+" + "\n" + r1[-2] + "\n"
).encode("utf-8")
)
if reads2_file:
files["notmatched2"].write(
(
idline
+ "\n"
+ s2
+ "\n"
+ "+"
+ "\n"
+ r2[-2]
+ "\n"
).encode("utf-8")
)
notmatched += 1
else:
files["badquality"].write(
(idline + "\n" + s1 + "\n" + "+" + "\n" + r1[-2] + "\n").encode(
"utf-8"
)
)
if reads2_file:
files["badquality2"].write(
(
idline + "\n" + s2 + "\n" + "+" + "\n" + r2[-2] + "\n"
).encode("utf-8")
)
badquality += 1
else:
print(
"SKIPPED: {}, p1: {}, p2: {}, pbar: {}".format(readid, p1, p2, pbar)
)
print("{} ? {} ? {}".format(r1[:7], r2[:7], rbar[:7]))
skipped += 1
if readid % progress_span == 0:
_progress += progress_step
save_results(
matched, notmatched, badquality, skipped, readid, _progress
)
save_results(matched, notmatched, badquality, skipped, readid, 0.9)
finally:
if f1:
f1.close()
if f2:
f2.close()
if fbar:
fbar.close()
for f in files:
files[f].close()
return filenames
filenames = read_multiplexed(
reads1, reads2, args.barcodes, pool_maps, args.progress_start
)
for name in filenames:
if reads2:
if name.endswith("_mate2.fq.gz"):
continue
name2 = name.replace("_mate1", "_mate2")
send_message(export_file(name))
send_message(export_file(name2))
process = "upload-fastq-paired"
proc_input = {"src1": [name], "src2": [name2]}
else:
send_message(export_file(name))
process = "upload-fastq-single"
proc_input = {"src": [name]}
send_message(run(process, json.dumps(proc_input, separators=(",", ":"))))
| apache-2.0 | -835,447,415,483,483,300 | 31.827195 | 88 | 0.427166 | false |
aimas/TuniErp-8.0 | addons/pad_project/__openerp__.py | 1 | 1503 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Pad on tasks',
'version': '1.0',
'category': 'Project Management',
'description': """
This module adds a PAD in all project kanban views.
===================================================
""",
'author': 'OpenERP SA',
'website': 'https://www.tunierp.com/page/project-management',
'depends': ['project', 'pad'],
'data': ['project_task.xml'],
'demo': [],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -4,788,185,978,103,792,000 | 38.552632 | 78 | 0.578177 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-sql/azure/mgmt/sql/models/elastic_pool_operation.py | 1 | 4797 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_resource import ProxyResource
class ElasticPoolOperation(ProxyResource):
"""A elastic pool operation.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar elastic_pool_name: The name of the elastic pool the operation is
being performed on.
:vartype elastic_pool_name: str
:ivar operation: The name of operation.
:vartype operation: str
:ivar operation_friendly_name: The friendly name of operation.
:vartype operation_friendly_name: str
:ivar percent_complete: The percentage of the operation completed.
:vartype percent_complete: int
:ivar server_name: The name of the server.
:vartype server_name: str
:ivar start_time: The operation start time.
:vartype start_time: datetime
:ivar state: The operation state.
:vartype state: str
:ivar error_code: The operation error code.
:vartype error_code: int
:ivar error_description: The operation error description.
:vartype error_description: str
:ivar error_severity: The operation error severity.
:vartype error_severity: int
:ivar is_user_error: Whether or not the error is a user error.
:vartype is_user_error: bool
:ivar estimated_completion_time: The estimated completion time of the
operation.
:vartype estimated_completion_time: datetime
:ivar description: The operation description.
:vartype description: str
:ivar is_cancellable: Whether the operation can be cancelled.
:vartype is_cancellable: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'elastic_pool_name': {'readonly': True},
'operation': {'readonly': True},
'operation_friendly_name': {'readonly': True},
'percent_complete': {'readonly': True},
'server_name': {'readonly': True},
'start_time': {'readonly': True},
'state': {'readonly': True},
'error_code': {'readonly': True},
'error_description': {'readonly': True},
'error_severity': {'readonly': True},
'is_user_error': {'readonly': True},
'estimated_completion_time': {'readonly': True},
'description': {'readonly': True},
'is_cancellable': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'elastic_pool_name': {'key': 'properties.elasticPoolName', 'type': 'str'},
'operation': {'key': 'properties.operation', 'type': 'str'},
'operation_friendly_name': {'key': 'properties.operationFriendlyName', 'type': 'str'},
'percent_complete': {'key': 'properties.percentComplete', 'type': 'int'},
'server_name': {'key': 'properties.serverName', 'type': 'str'},
'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},
'state': {'key': 'properties.state', 'type': 'str'},
'error_code': {'key': 'properties.errorCode', 'type': 'int'},
'error_description': {'key': 'properties.errorDescription', 'type': 'str'},
'error_severity': {'key': 'properties.errorSeverity', 'type': 'int'},
'is_user_error': {'key': 'properties.isUserError', 'type': 'bool'},
'estimated_completion_time': {'key': 'properties.estimatedCompletionTime', 'type': 'iso-8601'},
'description': {'key': 'properties.description', 'type': 'str'},
'is_cancellable': {'key': 'properties.isCancellable', 'type': 'bool'},
}
def __init__(self):
super(ElasticPoolOperation, self).__init__()
self.elastic_pool_name = None
self.operation = None
self.operation_friendly_name = None
self.percent_complete = None
self.server_name = None
self.start_time = None
self.state = None
self.error_code = None
self.error_description = None
self.error_severity = None
self.is_user_error = None
self.estimated_completion_time = None
self.description = None
self.is_cancellable = None
| mit | -3,615,658,341,865,835,000 | 41.078947 | 103 | 0.608505 | false |
yesudeep/cmc | app/models.py | 1 | 7739 | #!/usr/bin/env python
# -*- coding: utf-8; mode: python; tab-width: 4; indent-tabs-mode: nil; -*-
# Models for the datastore.
# Copyright (c) 2009 happychickoo.
#
# The MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import configuration
from google.appengine.ext import db
from google.appengine.api import memcache
from aetycoon import TransformProperty
from django.template.defaultfilters import slugify
from caching_counter import CachingCounter
from dbhelper import SerializableModel, serialize_entities, deserialize_entities
import appengine_admin
class OpenIDUser(SerializableModel):
nickname = db.StringProperty()
identifier = db.StringProperty(required=True)
email = db.EmailProperty()
class SuggestedTitle(SerializableModel):
title = db.StringProperty(required=True)
slug = TransformProperty(title, slugify)
def increment_vote_count(self, delta=1):
CachingCounter('SuggestedTitle(%s).vote_count.key=%s' % (self.slug, str(self.key()))).incr(delta=delta)
@property
def vote_count(self):
return CachingCounter('SuggestedTitle(%s).vote_count.key=%s' % (self.slug, str(self.key()))).count
@classmethod
def up_vote_or_insert(cls, title):
t = SuggestedTitle.all().filter('slug = ', slugify(title)).get()
if not t:
t = SuggestedTitle(title=title)
t.put()
t.increment_vote_count()
return t
def __unicode__(self):
return self.title
def __str__(self):
return self.__unicode__()
class Celebrity(SerializableModel):
name = db.StringProperty(required=True)
slug = TransformProperty(name, slugify)
def increment_vote_count(self, delta=1):
CachingCounter('Celebrity(%s).vote_count.key=%s' % (self.slug, str(self.key()))).incr(delta=delta)
@property
def vote_count(self):
return CachingCounter('Celebrity(%s).vote_count.key=%s' % (self.slug, str(self.key()))).count
@classmethod
def up_vote_or_insert(cls, name):
t = Celebrity.all().filter('slug = ', slugify(name)).get()
if not t:
t = Celebrity(name=name)
t.put()
t.increment_vote_count()
return t
@classmethod
def get_latest(cls, count=100):
cache_key = 'Celebrity.get_latest(count=%d)' % count
celebrities = deserialize_entities(memcache.get(cache_key))
if not celebrities:
celebrities = Celebrity.all().order('-when_modified').fetch(count)
memcache.set(cache_key, serialize_entities(celebrities), 10)
return celebrities
def __unicode__(self):
return self.name
def __str__(self):
return self.name
class Person(SerializableModel):
full_name = db.StringProperty(required=True)
email = db.EmailProperty(required=True)
mobile_number = db.StringProperty()
def __unicode__(self):
return self.full_name
def __str__(self):
return self.full_name
class SuggestedTitlePerson(Person):
suggested_title = db.ReferenceProperty(SuggestedTitle, collection_name='people')
def __unicode__(self):
return self.full_name
def __str__(self):
return self.full_name
class StoryAuthor(Person):
def __unicode__(self):
return self.full_name
def __str__(self):
return self.full_name
class NotifyReleasePerson(Person):
def __unicode__(self):
return self.full_name
def __str__(self):
return self.full_name
class Story(SerializableModel):
title = db.StringProperty(required=True)
content = db.TextProperty(default=db.Blob(""))
author = db.ReferenceProperty(StoryAuthor, collection_name="stories")
def __unicode__(self):
return self.title
def __str__(self):
return self.title
def get_latest_document(self):
"""Returns the latest document submitted."""
pass
class StoryDocument(SerializableModel):
story = db.ReferenceProperty(Story, collection_name="documents")
path = db.StringProperty()
name = db.StringProperty()
def __unicode__(self):
return self.name
def __str__(self):
return self.name
@property
def document(self):
import static
return static.get(self.path)
class AdminCelebrity(appengine_admin.ModelAdmin):
model = Celebrity
listFields = ("name", "slug", "vote_count")
editFields = ("name",)
readonlyFields = ("slug", "when_created", 'vote_count', "when_modified")
listGql = "order by name asc"
class AdminStoryAuthor(appengine_admin.ModelAdmin):
model = StoryAuthor
listFields = ("full_name", 'email', 'mobile_number',)
editFields = ("full_name", "email", 'mobile_number',)
listGql = 'order by full_name asc'
class AdminNotifyReleasePerson(appengine_admin.ModelAdmin):
model = NotifyReleasePerson
listFields = ("full_name", "email", "mobile_number",)
editFields = ("full_name", "email", "mobile_number",)
listGql = 'order by full_name asc'
class AdminSuggestedTitlePerson(appengine_admin.ModelAdmin):
model = SuggestedTitlePerson
listFields = ("full_name", "email", "mobile_number", "suggested_title")
editFields = ("full_name", "email", "mobile_number", "suggested_title")
listGql = 'order by full_name asc'
class AdminSuggestedTitle(appengine_admin.ModelAdmin):
model = SuggestedTitle
listFields = ('title', 'people', 'vote_count')
editFields = ('title', )
readonlyFields = ('slug', 'people', 'vote_count', 'when_created', 'when_modified')
class AdminStory(appengine_admin.ModelAdmin):
model = Story
listFields = ('title', 'author')
editFields = ('title', 'content')
readonlyFields = ('author', 'when_created', 'when_modified')
listGql = 'order by when_created desc'
class AdminStoryDocument(appengine_admin.ModelAdmin):
model = StoryDocument
listFields = ('path', 'story', 'name', 'document')
editFields = ('path', 'story', 'name')
readonlyFields = ('path', 'name', 'when_created', 'when_modified')
listGql = 'order by when_created desc'
class AdminStaticContent(appengine_admin.ModelAdmin):
from static import StaticContent
model = StaticContent
listFields = ('body', 'content_type', 'status',)
editFields = ('body', 'content_type',)
readonlyFields = ('status', 'last_modified', 'headers', 'etag')
listGql = 'order by last_modified desc'
appengine_admin.register(
AdminStory,
AdminStoryDocument,
AdminCelebrity,
AdminStoryAuthor,
AdminNotifyReleasePerson,
AdminSuggestedTitlePerson,
AdminSuggestedTitle,
AdminStaticContent
)
| mit | -3,802,891,922,520,883,000 | 31.931915 | 111 | 0.671663 | false |
msimacek/koschei | alembic/versions/14ef9d47d314_split_dependency_changes_table.py | 1 | 1760 | """Split dependency changes table
Revision ID: 14ef9d47d314
Revises: 31d647dbc4c5
Create Date: 2015-09-07 16:23:42.789628
"""
# revision identifiers, used by Alembic.
revision = '14ef9d47d314'
down_revision = '31d647dbc4c5'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('unapplied_change',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('dep_name', sa.String(), nullable=False),
sa.Column('prev_epoch', sa.Integer(), nullable=True),
sa.Column('prev_version', sa.String(), nullable=True),
sa.Column('prev_release', sa.String(), nullable=True),
sa.Column('curr_epoch', sa.Integer(), nullable=True),
sa.Column('curr_version', sa.String(), nullable=True),
sa.Column('curr_release', sa.String(), nullable=True),
sa.Column('distance', sa.Integer(), nullable=True),
sa.Column('package_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['package_id'], ['package.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_unapplied_change_package_id'), 'unapplied_change', ['package_id'], unique=False)
op.execute("""
ALTER TABLE dependency_change RENAME TO applied_change;
DELETE FROM applied_change WHERE applied_in_id IS NULL;
ALTER TABLE applied_change RENAME COLUMN applied_in_id TO build_id;
ALTER TABLE applied_change ALTER COLUMN build_id SET NOT NULL;
ALTER TABLE applied_change DROP COLUMN package_id;
DROP INDEX ix_dependency_change_applied_in_id;
""")
op.create_index(op.f('ix_applied_change_build_id'), 'applied_change', ['build_id'], unique=False)
def downgrade():
raise NotImplementedError()
| gpl-2.0 | 2,047,308,295,171,628,800 | 38.111111 | 109 | 0.667614 | false |
alokjani/contrail-datapipeline | tools/dummy-http-receiver.py | 1 | 1358 | #!/usr/bin/env python
"""
Very simple HTTP server in python.
Usage::
./dummy-web-server.py [<port>]
Send a GET request::
curl http://localhost
Send a HEAD request::
curl -I http://localhost
Send a POST request::
curl -d "foo=bar&bin=baz" http://localhost
"""
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import SocketServer
class DummyReceiver(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
self._set_headers()
self.wfile.write("<html><body><h1>hi!</h1></body></html>")
def do_HEAD(self):
self._set_headers()
def do_POST(self):
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
print post_data # <-- Print post data
self._set_headers()
def run(server_class=HTTPServer, handler_class=DummyReceiver, port=80):
server_address = ('', port)
httpd = server_class(server_address, handler_class)
print 'Starting httpd...'
httpd.serve_forever()
if __name__ == "__main__":
from sys import argv
if len(argv) == 2:
run(port=int(argv[1]))
else:
run()
| apache-2.0 | -7,341,904,458,939,571,000 | 24.148148 | 89 | 0.616348 | false |
blamarvt/quark | quark/drivers/nvp_driver.py | 1 | 23423 | # Copyright 2013 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
NVP client driver for Quark
"""
from oslo.config import cfg
import aiclib
from neutron.extensions import securitygroup as sg_ext
from neutron.openstack.common import log as logging
from quark.drivers import base
from quark import exceptions
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
nvp_opts = [
cfg.IntOpt('max_ports_per_switch',
default=0,
help=_('Maximum amount of NVP ports on an NVP lswitch')),
cfg.StrOpt('default_tz_type',
help=_('The type of connector to use for the default tz'),
default="stt"),
cfg.StrOpt('default_tz',
help=_('The default transport zone UUID')),
cfg.MultiStrOpt('controller_connection',
default=[],
help=_('NVP Controller connection string')),
cfg.IntOpt('max_rules_per_group',
default=30,
help=_('Maxiumum size of NVP SecurityRule list per group')),
cfg.IntOpt('max_rules_per_port',
default=30,
help=_('Maximum rules per NVP lport across all groups')),
]
physical_net_type_map = {
"stt": "stt",
"gre": "gre",
"flat": "bridge",
"bridge": "bridge",
"vlan": "bridge",
"local": "local"
}
CONF.register_opts(nvp_opts, "NVP")
def _tag_roll(tags):
return [{'scope': k, 'tag': v} for k, v in tags]
def _tag_unroll(tags):
return dict((t['scope'], t['tag']) for t in tags)
class NVPDriver(base.BaseDriver):
def __init__(self):
self.nvp_connections = []
self.conn_index = 0
self.limits = {'max_ports_per_switch': 0,
'max_rules_per_group': 0,
'max_rules_per_port': 0}
super(NVPDriver, self).__init__()
@classmethod
def get_name(klass):
return "NVP"
def load_config(self):
#NOTE(mdietz): What does default_tz actually mean?
# We don't have one default.
default_tz = CONF.NVP.default_tz
LOG.info("Loading NVP settings " + str(default_tz))
connections = CONF.NVP.controller_connection
self.limits.update({
'max_ports_per_switch': CONF.NVP.max_ports_per_switch,
'max_rules_per_group': CONF.NVP.max_rules_per_group,
'max_rules_per_port': CONF.NVP.max_rules_per_port})
LOG.info("Loading NVP settings " + str(connections))
for conn in connections:
(ip, port, user, pw, req_timeout,
http_timeout, retries, redirects) = conn.split(":")
self.nvp_connections.append(dict(ip_address=ip,
port=port,
username=user,
password=pw,
req_timeout=req_timeout,
http_timeout=http_timeout,
retries=retries,
redirects=redirects,
default_tz=default_tz))
def get_connection(self):
conn = self.nvp_connections[self.conn_index]
if "connection" not in conn:
scheme = conn["port"] == "443" and "https" or "http"
uri = "%s://%s:%s" % (scheme, conn["ip_address"], conn["port"])
user = conn['username']
passwd = conn['password']
conn["connection"] = aiclib.nvp.Connection(uri,
username=user,
password=passwd)
return conn["connection"]
def create_network(self, context, network_name, tags=None,
network_id=None, **kwargs):
return self._lswitch_create(context, network_name, tags,
network_id, **kwargs)
def delete_network(self, context, network_id):
lswitches = self._lswitches_for_network(context, network_id).results()
connection = self.get_connection()
for switch in lswitches["results"]:
LOG.debug("Deleting lswitch %s" % switch["uuid"])
connection.lswitch(switch["uuid"]).delete()
def _collect_lswitch_info(self, lswitch, get_status):
info = {
'port_isolation_enabled': lswitch['port_isolation_enabled'],
'display_name': lswitch['display_name'],
'uuid': lswitch['uuid'],
'transport_zones': lswitch['transport_zones'],
}
info.update(_tag_unroll(lswitch['tags']))
if get_status:
status = lswitch.pop('_relations')['LogicalSwitchStatus']
info.update({
'lport_stats': {
'fabric_up': status['lport_fabric_up_count'],
'admin_up': status['lport_admin_up_count'],
'link_up': status['lport_link_up_count'],
'count': status['lport_count'],
}, 'fabric_status': status['fabric_status'],
})
return info
def diag_network(self, context, network_id, get_status):
switches = self._lswitch_status_query(context, network_id)['results']
return {'logical_switches': [self._collect_lswitch_info(s, get_status)
for s in switches]}
def create_port(self, context, network_id, port_id,
status=True, security_groups=[], allowed_pairs=[]):
tenant_id = context.tenant_id
lswitch = self._create_or_choose_lswitch(context, network_id)
connection = self.get_connection()
port = connection.lswitch_port(lswitch)
port.admin_status_enabled(status)
port.allowed_address_pairs(allowed_pairs)
nvp_group_ids = self._get_security_groups_for_port(context,
security_groups)
port.security_profiles(nvp_group_ids)
tags = [dict(tag=network_id, scope="neutron_net_id"),
dict(tag=port_id, scope="neutron_port_id"),
dict(tag=tenant_id, scope="os_tid")]
LOG.debug("Creating port on switch %s" % lswitch)
port.tags(tags)
res = port.create()
res["lswitch"] = lswitch
port = connection.lswitch_port(lswitch)
port.uuid = res["uuid"]
port.attachment_vif(port_id)
return res
def update_port(self, context, port_id, status=True,
security_groups=[], allowed_pairs=[]):
connection = self.get_connection()
lswitch_id = self._lswitch_from_port(context, port_id)
port = connection.lswitch_port(lswitch_id, port_id)
nvp_group_ids = self._get_security_groups_for_port(context,
security_groups)
if nvp_group_ids:
port.security_profiles(nvp_group_ids)
if allowed_pairs:
port.allowed_address_pairs(allowed_pairs)
port.admin_status_enabled(status)
return port.update()
def delete_port(self, context, port_id, **kwargs):
connection = self.get_connection()
lswitch_uuid = kwargs.get('lswitch_uuid', None)
if not lswitch_uuid:
lswitch_uuid = self._lswitch_from_port(context, port_id)
LOG.debug("Deleting port %s from lswitch %s" % (port_id, lswitch_uuid))
connection.lswitch_port(lswitch_uuid, port_id).delete()
def _collect_lport_info(self, lport, get_status):
info = {
'mirror_targets': lport['mirror_targets'],
'display_name': lport['display_name'],
'portno': lport['portno'],
'allowed_address_pairs': lport['allowed_address_pairs'],
'nvp_security_groups': lport['security_profiles'],
'uuid': lport['uuid'],
'admin_status_enabled': lport['admin_status_enabled'],
'queue_uuid': lport['queue_uuid'],
}
if get_status:
stats = lport['statistics']
status = lport['status']
lswitch = {
'uuid': status['lswitch']['uuid'],
'display_name': status['lswitch']['display_name'],
}
lswitch.update(_tag_unroll(status['lswitch']['tags']))
info.update({
'statistics': {
'recieved': {
'packets': stats['rx_packets'],
'bytes': stats['rx_bytes'],
'errors': stats['rx_errors']
},
'transmitted': {
'packets': stats['tx_packets'],
'bytes': stats['tx_bytes'],
'errors': stats['tx_errors']
},
},
'status': {
'link_status_up': status['link_status_up'],
'admin_status_up': status['admin_status_up'],
'fabric_status_up': status['fabric_status_up'],
},
'lswitch': lswitch,
})
info.update(_tag_unroll(lport['tags']))
return info
def diag_port(self, context, port_id, get_status=False):
connection = self.get_connection()
lswitch_uuid = self._lswitch_from_port(context, port_id)
lswitch_port = connection.lswitch_port(lswitch_uuid, port_id)
query = lswitch_port.query()
query.relations("LogicalPortAttachment")
results = query.results()
if results['result_count'] == 0:
return {'lport': "Logical port not found."}
config = results['results'][0]
relations = config.pop('_relations')
config['attachment'] = relations['LogicalPortAttachment']['type']
if get_status:
config['status'] = lswitch_port.status()
config['statistics'] = lswitch_port.statistics()
return {'lport': self._collect_lport_info(config, get_status)}
def _get_network_details(self, context, network_id, switches):
name, phys_net, phys_type, segment_id = None, None, None, None
for res in switches["results"]:
name = res["display_name"]
for zone in res["transport_zones"]:
phys_net = zone["zone_uuid"]
phys_type = zone["transport_type"]
if "binding_config" in zone:
binding = zone["binding_config"]
segment_id = binding["vlan_translation"][0]["transport"]
break
return dict(network_name=name, phys_net=phys_net,
phys_type=phys_type, segment_id=segment_id)
return {}
def create_security_group(self, context, group_name, **group):
tenant_id = context.tenant_id
connection = self.get_connection()
group_id = group.get('group_id')
profile = connection.securityprofile()
if group_name:
profile.display_name(group_name)
ingress_rules = group.get('port_ingress_rules', [])
egress_rules = group.get('port_egress_rules', [])
if (len(ingress_rules) + len(egress_rules) >
self.limits['max_rules_per_group']):
raise exceptions.DriverLimitReached(limit="rules per group")
if egress_rules:
profile.port_egress_rules(egress_rules)
if ingress_rules:
profile.port_ingress_rules(ingress_rules)
tags = [dict(tag=group_id, scope="neutron_group_id"),
dict(tag=tenant_id, scope="os_tid")]
LOG.debug("Creating security profile %s" % group_name)
profile.tags(tags)
return profile.create()
def delete_security_group(self, context, group_id):
guuid = self._get_security_group_id(context, group_id)
connection = self.get_connection()
LOG.debug("Deleting security profile %s" % group_id)
connection.securityprofile(guuid).delete()
def update_security_group(self, context, group_id, **group):
query = self._get_security_group(context, group_id)
connection = self.get_connection()
profile = connection.securityprofile(query.get('uuid'))
ingress_rules = group.get('port_ingress_rules',
query.get('logical_port_ingress_rules'))
egress_rules = group.get('port_egress_rules',
query.get('logical_port_egress_rules'))
if (len(ingress_rules) + len(egress_rules) >
self.limits['max_rules_per_group']):
raise exceptions.DriverLimitReached(limit="rules per group")
if group.get('name', None):
profile.display_name(group['name'])
if group.get('port_ingress_rules', None) is not None:
profile.port_ingress_rules(ingress_rules)
if group.get('port_egress_rules', None) is not None:
profile.port_egress_rules(egress_rules)
return profile.update()
def _update_security_group_rules(self, context, group_id, rule, operation,
checks):
groupd = self._get_security_group(context, group_id)
direction, secrule = self._get_security_group_rule_object(context,
rule)
rulelist = groupd['logical_port_%s_rules' % direction]
for check in checks:
if not check(secrule, rulelist):
raise checks[check]
getattr(rulelist, operation)(secrule)
LOG.debug("%s rule on security group %s" % (operation, groupd['uuid']))
group = {'port_%s_rules' % direction: rulelist}
return self.update_security_group(context, group_id, **group)
def create_security_group_rule(self, context, group_id, rule):
return self._update_security_group_rules(
context, group_id, rule, 'append',
{(lambda x, y: x not in y):
sg_ext.SecurityGroupRuleExists(id=group_id),
(lambda x, y:
self._check_rule_count_per_port(context, group_id) <
self.limits['max_rules_per_port']):
exceptions.DriverLimitReached(limit="rules per port")})
def delete_security_group_rule(self, context, group_id, rule):
return self._update_security_group_rules(
context, group_id, rule, 'remove',
{(lambda x, y: x in y):
sg_ext.SecurityGroupRuleNotFound(id="with group_id %s" %
group_id)})
def _create_or_choose_lswitch(self, context, network_id):
switches = self._lswitch_status_query(context, network_id)
switch = self._lswitch_select_open(context, network_id=network_id,
switches=switches)
if switch:
LOG.debug("Found open switch %s" % switch)
return switch
switch_details = self._get_network_details(context, network_id,
switches)
if not switch_details:
raise exceptions.BadNVPState(net_id=network_id)
return self._lswitch_create(context, network_id=network_id,
**switch_details)
def _lswitch_status_query(self, context, network_id):
query = self._lswitches_for_network(context, network_id)
query.relations("LogicalSwitchStatus")
results = query.results()
LOG.debug("Query results: %s" % results)
return results
def _lswitch_select_open(self, context, switches=None, **kwargs):
"""Selects an open lswitch for a network. Note that it does not select
the most full switch, but merely one with ports available.
"""
if switches is not None:
for res in switches["results"]:
count = res["_relations"]["LogicalSwitchStatus"]["lport_count"]
if self.limits['max_ports_per_switch'] == 0 or \
count < self.limits['max_ports_per_switch']:
return res["uuid"]
return None
def _lswitch_delete(self, context, lswitch_uuid):
connection = self.get_connection()
LOG.debug("Deleting lswitch %s" % lswitch_uuid)
connection.lswitch(lswitch_uuid).delete()
def _config_provider_attrs(self, connection, switch, phys_net,
net_type, segment_id):
if not (phys_net or net_type):
return
if not phys_net and net_type:
raise exceptions.ProvidernetParamError(
msg="provider:physical_network parameter required")
if phys_net and not net_type:
raise exceptions.ProvidernetParamError(
msg="provider:network_type parameter required")
if not net_type in ("bridge", "vlan") and segment_id:
raise exceptions.SegmentIdUnsupported(net_type=net_type)
if net_type == "vlan" and not segment_id:
raise exceptions.SegmentIdRequired(net_type=net_type)
phys_type = physical_net_type_map.get(net_type.lower())
if not phys_type:
raise exceptions.InvalidPhysicalNetworkType(net_type=net_type)
tz_query = connection.transportzone(phys_net).query()
transport_zone = tz_query.results()
if transport_zone["result_count"] == 0:
raise exceptions.PhysicalNetworkNotFound(phys_net=phys_net)
switch.transport_zone(zone_uuid=phys_net,
transport_type=phys_type,
vlan_id=segment_id)
def _lswitch_create(self, context, network_name=None, tags=None,
network_id=None, phys_net=None,
phys_type=None, segment_id=None,
**kwargs):
# NOTE(mdietz): physical net uuid maps to the transport zone uuid
# physical net type maps to the transport/connector type
# if type maps to 'bridge', then segment_id, which maps
# to vlan_id, is conditionally provided
LOG.debug("Creating new lswitch for %s network %s" %
(context.tenant_id, network_name))
tenant_id = context.tenant_id
connection = self.get_connection()
switch = connection.lswitch()
if network_name is None:
network_name = network_id
switch.display_name(network_name)
tags = tags or []
tags.append({"tag": tenant_id, "scope": "os_tid"})
if network_id:
tags.append({"tag": network_id, "scope": "neutron_net_id"})
switch.tags(tags)
pnet = phys_net or CONF.NVP.default_tz
ptype = phys_type or CONF.NVP.default_tz_type
switch.transport_zone(pnet, ptype)
LOG.debug("Creating lswitch for network %s" % network_id)
# When connecting to public or snet, we need switches that are
# connected to their respective public/private transport zones
# using a "bridge" connector. Public uses no VLAN, whereas private
# uses VLAN 122 in netdev. Probably need this to be configurable
self._config_provider_attrs(connection, switch, phys_net, phys_type,
segment_id)
res = switch.create()
return res["uuid"]
def _lswitches_for_network(self, context, network_id):
connection = self.get_connection()
query = connection.lswitch().query()
query.tagscopes(['os_tid', 'neutron_net_id'])
query.tags([context.tenant_id, network_id])
return query
def _lswitch_from_port(self, context, port_id):
connection = self.get_connection()
query = connection.lswitch_port("*").query()
query.relations("LogicalSwitchConfig")
query.uuid(port_id)
port = query.results()
if port['result_count'] > 1:
raise Exception("Could not identify lswitch for port %s" % port_id)
if port['result_count'] < 1:
raise Exception("No lswitch found for port %s" % port_id)
return port['results'][0]["_relations"]["LogicalSwitchConfig"]["uuid"]
def _get_security_group(self, context, group_id):
connection = self.get_connection()
query = connection.securityprofile().query()
query.tagscopes(['os_tid', 'neutron_group_id'])
query.tags([context.tenant_id, group_id])
query = query.results()
if query['result_count'] != 1:
raise sg_ext.SecurityGroupNotFound(id=group_id)
return query['results'][0]
def _get_security_group_id(self, context, group_id):
return self._get_security_group(context, group_id)['uuid']
def _get_security_group_rule_object(self, context, rule):
ethertype = rule.get('ethertype', None)
rule_clone = {}
ip_prefix = rule.get('remote_ip_prefix', None)
if ip_prefix:
rule_clone['ip_prefix'] = ip_prefix
profile_uuid = rule.get('remote_group_id', None)
if profile_uuid:
rule_clone['profile_uuid'] = profile_uuid
for key in ['protocol', 'port_range_min', 'port_range_max']:
if rule.get(key):
rule_clone[key] = rule[key]
connection = self.get_connection()
secrule = connection.securityrule(ethertype, **rule_clone)
direction = rule.get('direction', '')
if direction not in ['ingress', 'egress']:
raise AttributeError(
"Direction not specified as 'ingress' or 'egress'.")
return (direction, secrule)
def _check_rule_count_per_port(self, context, group_id):
connection = self.get_connection()
ports = connection.lswitch_port("*").query().security_profile_uuid(
'=', self._get_security_group_id(
context, group_id)).results().get('results', [])
groups = (port.get('security_profiles', []) for port in ports)
return max([self._check_rule_count_for_groups(
context, (connection.securityprofile(gp).read() for gp in group))
for group in groups] or [0])
def _check_rule_count_for_groups(self, context, groups):
return sum(len(group['logical_port_ingress_rules']) +
len(group['logical_port_egress_rules'])
for group in groups)
def _get_security_groups_for_port(self, context, groups):
if (self._check_rule_count_for_groups(
context,
(self._get_security_group(context, g) for g in groups))
> self.limits['max_rules_per_port']):
raise exceptions.DriverLimitReached(limit="rules per port")
return [self._get_security_group(context, group)['uuid']
for group in groups]
| apache-2.0 | 3,469,498,176,643,507,700 | 41.664845 | 79 | 0.564915 | false |
PermutaTriangle/PermStruct | examples/classical_3_4/321_2134.py | 1 | 1261 | from __future__ import print_function
from permuta import *
import permstruct
import permstruct.dag
from permstruct import *
from permstruct.dag import taylor_dag
import sys
is_classical = True
# -- Wilf-class 2 in http://wikipedia.org/wiki/Enumerations_of_specific_permutation_classes -- #
# STATUS ================================================ > SUCCESS!
patts = [Permutation([3,2,1]), Permutation([2,1,3,4])]
perm_bound = 8
verify_bound = 12
ignored = 0
# The dag
max_len_patt = None
upper_bound = None
remove = False # True (3, 3) 4 works
# Grids
max_rule_size = (6, 6)
max_non_empty = 6
max_rules = None
# ------------------------------------------------------------------------------
settings = StructSettings(
perm_bound=perm_bound,
verify_bound=verify_bound,
max_rule_size=max_rule_size,
max_non_empty=max_non_empty,
max_rules=max_rules,
verbosity=StructLogger.INFO)
# settings.set_input(StructInput.from_avoidance(settings, patts))
settings.set_input(AvoiderInput(settings, patts))
settings.set_dag(taylor_dag(settings,
max_len_patt=max_len_patt,
remove=remove,
upper_bound=upper_bound))
exhaustive(settings)
| bsd-3-clause | 245,150,646,829,064,480 | 25.270833 | 96 | 0.59318 | false |
CZ-NIC/foris | foris/config_handlers/profile.py | 1 | 2032 | # coding=utf-8
# Foris - web administration interface
# Copyright (C) 2018 CZ.NIC, z.s.p.o. <http://www.nic.cz>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .base import BaseConfigHandler
from foris import fapi
from foris.form import Hidden
from foris.state import current_state
from foris.utils.translators import gettext_dummy as gettext, _
class ProfileHandler(BaseConfigHandler):
""" Profile settings handler
"""
userfriendly_title = gettext("Guide workflow")
def __init__(self, *args, **kwargs):
self.load_backend_data()
super(ProfileHandler, self).__init__(*args, **kwargs)
def load_backend_data(self):
self.backend_data = current_state.backend.perform("web", "get_guide")
def get_form(self):
data = {"workflow": self.backend_data["current_workflow"]}
if self.data:
data.update(self.data)
profile_form = fapi.ForisForm("profile", data)
main = profile_form.add_section(name="set_profile", title=_(self.userfriendly_title))
main.add_field(Hidden, name="workflow", value=self.backend_data["current_workflow"])
def profile_form_cb(data):
result = current_state.backend.perform(
"web", "update_guide", {"enabled": True, "workflow": data["workflow"]}
)
return "save_result", result
profile_form.add_callback(profile_form_cb)
return profile_form
| gpl-3.0 | 8,376,547,051,335,723,000 | 33.440678 | 93 | 0.682579 | false |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Ops/PyScripts/lib/ops/cmd/drivers.py | 1 | 3371 |
import ops.cmd
import ops
import ops.env
import ops.cmd.safetychecks
from ops.cmd import getBoolOption, setBoolOption, getValueOption, setListOption, setStringOption
OpsCommandException = ops.cmd.OpsCommandException
VALID_OPTIONS = ['minimal', 'load', 'unload', 'list', 'minimal', 'nosignature', 'noversion']
class DriversCommand(ops.cmd.DszCommand, ):
optgroups = {'operation': ['load', 'unload', 'list']}
reqgroups = ['operation']
rejects = {'load': ['minimal', 'nosignature', 'version'], 'unload': ['minimal', 'nosignature', 'version']}
reqopts = []
defopts = {}
def __init__(self, plugin='drivers', autominimal=False, **optdict):
self.autominimal = autominimal
ops.cmd.DszCommand.__init__(self, plugin, **optdict)
def validateInput(self):
for opt in self.optdict:
if (opt not in VALID_OPTIONS):
return False
if ((not self.driver_list) and (self.load is None) and (self.unload is None)):
return False
if (((self.load is not None) or (self.unload is not None)) and (self.minimal or self.nosignature or self.noversion)):
return False
return True
def __getAutoMinimal(self):
return self.__autoMinimal
def __setAutoMinimal(self, val):
self.__autoMinimal = val
autominimal = property(__getAutoMinimal, __setAutoMinimal)
minimal = property((lambda x: getBoolOption(x, 'minimal')), (lambda x, y: setBoolOption(x, y, 'minimal')))
nosignature = property((lambda x: getBoolOption(x, 'nosignature')), (lambda x, y: setBoolOption(x, y, 'nosignature')))
noversion = property((lambda x: getBoolOption(x, 'noversion')), (lambda x, y: setBoolOption(x, y, 'noversion')))
driver_list = property((lambda x: getBoolOption(x, 'list')), (lambda x, y: setBoolOption(x, y, 'list')))
load = property((lambda x: getValueOption(x, 'load')), (lambda x, y: setStringOption(x, y, 'load')))
unload = property((lambda x: getValueOption(x, 'unload')), (lambda x, y: setStringOption(x, y, 'unload')))
ops.cmd.command_classes['drivers'] = DriversCommand
ops.cmd.aliasoptions['drivers'] = VALID_OPTIONS
def mySafetyCheck(self):
good = True
msgparts = []
if ((ops.env.get('OPS_NODRIVER').upper() == 'TRUE') and ((self.load is not None) or (self.unload is not None))):
good = False
msgparts.append('OPS_NODRIVER is set to TRUE, you should probably not load or unload drivers')
if ((ops.env.get('OPS_DRIVERLIST_MINIMAL').upper() == 'TRUE') and (not self.minimal) and self.driver_list):
if self.autominimal:
self.minimal = True
else:
good = False
msgparts.append('OPS_DRIVERLIST_MINIMAL is set to TRUE, you should not run a drivers -list without -minimal')
if ((ops.env.get('OPS_NODRIVERLIST').upper() == 'TRUE') and self.driver_list):
good = False
msgparts.append('OPS_NODRIVERLIST is set to true, you probably should not run a drivers -list')
if (not self.validateInput()):
good = False
msgparts.append('Your command did not pass input validation')
msg = ''
if (len(msgparts) > 0):
msg = msgparts[0]
for msgpart in msgparts[1:]:
msg += ('\n\t' + msgpart)
return (good, msg)
ops.cmd.safetychecks.addSafetyHandler('drivers', 'ops.cmd.drivers.mySafetyCheck') | unlicense | -1,865,745,882,127,163,600 | 47.171429 | 125 | 0.646099 | false |
cs-chan/FuzzyComputerVision | FCVT.py | 1 | 11861 | # -*- coding: utf-8 -*-
"""
@author: ChernHong Lim
"""
import numpy as np
import copy
import os
import matplotlib.pyplot as plt
import cv2
from scipy import stats
from skimage.feature import local_binary_pattern
#from skimage import io
from sklearn.cluster import KMeans
from sklearn.svm import SVC
import FQRC
"""""""""""""""""""""
Image Acquisition
"""""""""""""""""""""
#Read Source
def IA_readSource( sourceDir, display ):
image = cv2.imread(sourceDir)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if display:
show(image)
return image
"""""""""""""""""""""
Image Preprocessing
"""""""""""""""""""""
#Convert Gray
def IP_convertGray(image, display):
grayImage = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
if display:
show(grayImage)
return grayImage
#Convert Binary
def IP_convertBinary(image, display):
grayImage = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
(thresh, binaryImage) = cv2.threshold(grayImage, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
if display:
show(binaryImage)
return binaryImage
#Image Resize
def IP_resize(image, sx, sy, display):
resizedImage = cv2.resize(image,None,fx=sx, fy=sy)
if display:
show(resizedImage)
return resizedImage
#Image Filtering
def IP_imageFilt(image, method, kernel, display):
if method == 'average':
filteredImage = cv2.blur(image,kernel)
elif method == 'gaussian':
filteredImage = cv2.GaussianBlur(image,kernel,0)
elif method == 'median':
filteredImage = cv2.medianBlur(image,kernel[0])
if display:
show(filteredImage)
return filteredImage
#Image Morphological Operation
def IP_imageMorph(image, method, kernelSize, display):
kernel = np.ones((kernelSize[0],kernelSize[1]),np.uint8)
if method == 'erosion':
morphImage = cv2.erode(image,kernel,iterations = 1)
elif method == 'dilation':
morphImage = cv2.dilate(image,kernel,iterations = 1)
elif method == 'opening':
morphImage = cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel)
elif method == 'closing':
morphImage = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel)
if display:
show(morphImage)
return morphImage
"""""""""""""""""""""
Feature Extraction
"""""""""""""""""""""
#Color detection
def FE_colorDetection(image, lowerbound, upperbound, display):
lower = np.array([lowerbound[0],lowerbound[1],lowerbound[2]]) #lower boundary of RGB value
upper = np.array([upperbound[0],upperbound[1],upperbound[2]]) #upper boundary of RGB value
mask_image = cv2.inRange(image, lower, upper)
colorImage = cv2.bitwise_and(image, image, mask = mask_image)
if(display):
show(colorImage)
return colorImage
#Edge detection
def FE_edgeDetection(image, display):
edgesImage = cv2.Canny(image,100,200)
if display:
show(edgesImage)
return edgesImage
#Corner detection
def FE_cornerDetection(image, display):
gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
gray = np.float32(gray)
dst = cv2.cornerHarris(gray,2,3,0.04)
# dst = IP_imageMorph(dst, 'dilation', (10,10), False) #result is dilated for marking the corners, not important
image[dst>0.01*dst.max()]=[255,0,0] # Threshold for an optimal value, it may vary depending on the image.
if display:
show(image)
return dst
#Keypoint SIFT, SURF
def FE_keypointDetection(image, method, display):
gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
if method == 'SIFT':
points = cv2.SIFT()
elif method == 'SURF':
points = cv2.SURF()
kp, des = points.detectAndCompute(gray,None)
if display:
img=cv2.drawKeypoints(gray,kp)
show(img)
return kp,des
#LBP
def FE_LBPDetection(image, display):
# settings for LBP
radius = 3
n_points = 8 * radius
METHOD = 'uniform'
gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
lbp = local_binary_pattern(gray, n_points, radius, METHOD)
# Calculate the histogram
x = stats.itemfreq(lbp.ravel())
# Normalize the histogram
hist = x[:, 1]/sum(x[:, 1])
if display:
print 'imageLBP = [' + " ".join(['%0.4f'%i for i in hist]) + "]"
# print hist
return hist
#HOG
"""""""""""""""""""""
Feature Representation - Clustering and Quantisation to support Bag of Feature
"""""""""""""""""""""
#Feature clustering
def FE_Clustering(feaMat, clusterNo):
cluster = KMeans(n_clusters=clusterNo)
cluster.fit(feaMat)
cluster_labels = cluster.labels_
cluster_centers = cluster.cluster_centers_
cluster_labels_unique = np.unique(cluster_labels)
return cluster,cluster_labels,cluster_centers,cluster_labels_unique
#Feature quantisation
def FE_Quantisation(feaMat, cluster):
cluster_membership = cluster.predict(feaMat)
descriptor = np.bincount(cluster_membership, minlength=len(cluster.cluster_centers_))
return descriptor
"""""""""""""""""""""
Classification
"""""""""""""""""""""
def CL_Train(X_train, y_train, method, visualize):
if method == 'Crisp':
Classifier = SVC(kernel="linear", C=0.025)
Classifier.fit(X_train, y_train)
elif method == 'Fuzzy':
Classifier = FQRC.CL_FQRC_Train(X_train, y_train, binNum=5, visualize=visualize)
return Classifier
def CL_Predict(X_test, classifier, method, visualize):
if method == 'Crisp':
Predict = classifier.predict(X_test)
elif method == 'Fuzzy':
Predict = FQRC.CL_FQRC_Predict(X_test, classifier, visualize=visualize)
return Predict
"""""""""""""""""""""
Application: Image classification
"""""""""""""""""""""
def Image_Classification(trainingFolder, testingFolder, feature, classification_method):
"""
Setting
"""
path = os.getcwd()
#display
display = False
visualize = True
"""
Training
"""
pathTraining = path + '\\' + trainingFolder
dirsTraining = os.listdir(pathTraining)
noOfClass = len(dirsTraining)
groundTruth = np.arange(0,noOfClass)
counter2 = 0
if(feature == 'SIFT' or feature == 'SURF'):
# Keypoint detection for all files in each folder
for ind in range(0,noOfClass):
pathTrainingClass = pathTraining + '\\' + dirsTraining[ind]
dirsTF = os.listdir(pathTrainingClass)
listofGT = [ind] * len(dirsTF)
counter1 = 0
for indFile in range(0,len(dirsTF)):
image = IA_readSource(pathTrainingClass + '\\' + dirsTF[indFile], display)
imageKeyPoint = FE_keypointDetection(image, feature, display)
if counter1 == 0:
imageKeyPoint_perImage = [imageKeyPoint[1]]
counter1 = counter1 + 1
else:
imageKeyPoint_perImage.append(imageKeyPoint[1])
if counter2 == 0:
data = [dirsTF,listofGT,imageKeyPoint_perImage]
counter2 = counter2 + 1
else:
list.extend(data[0],dirsTF)
list.extend(data[1],listofGT)
list.extend(data[2],imageKeyPoint_perImage)
# Keypoint clustering
counter3 = 0
for item in data[2]:
if counter3 == 0:
imageKeyPoint_all = item
counter3 = counter3 + 1
else:
imageKeyPoint_all = np.concatenate((imageKeyPoint_all, item), axis=0)
cluster = FE_Clustering(imageKeyPoint_all, 5)
#Keypoint quantisation
counter4 = 0;
for indData in range(0,len(data[1])):
quantisation = FE_Quantisation(data[2][indData], cluster[0])
if counter4==0:
desc = copy.copy(quantisation)
counter4 = counter4 + 1
else:
desc = np.vstack((desc,quantisation))
elif(feature == 'LBP'):
for ind in range(0,noOfClass):
pathTrainingClass = pathTraining + '\\' + dirsTraining[ind]
dirsTF = os.listdir(pathTrainingClass)
listofGT = [ind] * len(dirsTF)
counter1 = 0
for indFile in range(0,len(dirsTF)):
image = IA_readSource(pathTrainingClass + '\\' + dirsTF[indFile], display)
imageFea = FE_LBPDetection(image, display)
if counter1 == 0:
imageFea_perImage = [imageFea]
counter1 = counter1 + 1
else:
imageFea_perImage.append(imageFea)
if counter2 == 0:
data = [dirsTF,listofGT,imageFea_perImage]
counter2 = counter2 + 1
else:
list.extend(data[0],dirsTF)
list.extend(data[1],listofGT)
list.extend(data[2],imageFea_perImage)
counter4 = 0
for indData in range(0,len(data[1])):
if counter4==0:
desc = copy.copy(data[2][indData])
counter4 = counter4 + 1
else:
desc = np.vstack((desc,data[2][indData]))
#Classification
trainDes = desc
trainGT = np.array(data[1])
classifier = CL_Train(trainDes, trainGT, classification_method, visualize)
"""
Testing
"""
pathTesting = path + '\\' + testingFolder
dirsTesting = os.listdir(pathTesting)
noOfClass = len(dirsTesting)
output_overall = []
for ind in range(0,noOfClass):
pathTestingClass = pathTesting + '\\' + dirsTesting[ind]
dirsTest = os.listdir(pathTestingClass)
listofGTTest = [ind] * len(dirsTest)
counter5 = 0
for indFile in range(0,len(dirsTest)):
image = IA_readSource(pathTestingClass + '\\' + dirsTest[indFile], display)
if(feature == 'SIFT' or feature == 'SURF'):
imageKeyPoint = FE_keypointDetection(image, feature, display)
desctest = FE_Quantisation(imageKeyPoint[1], cluster[0])
elif(feature == 'LBP'):
desctest = FE_LBPDetection(image, display)
answer = CL_Predict(desctest, classifier, classification_method, visualize=True)
if counter5 == 0:
output = answer
counter5 = counter5 + 1
else:
output = np.vstack((output,answer))#
fig = plt.figure()
plt.imshow(image)
plt.title('Classification Results: ' + str(np.around(answer,decimals=2)))
plt.axis('off')
output_overall.append(output)
return output_overall
"""""""""""""""""""""
Utility
"""""""""""""""""""""
#Image visualization
def show(image):
if(len(image.shape)>2):
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.imshow('Image',image)
cv2.waitKey(0)
cv2.destroyAllWindows()
# io.imshow(image)
| gpl-3.0 | -4,393,226,043,624,216,600 | 28.257653 | 115 | 0.548942 | false |
cosurgi/trunk | examples/mpi/testMPI_3D_bisection.py | 1 | 2164 |
# Possible executions of this script
# ./yadempi script.py #interactive will spawn additional workers
# mpiexec -n 4 ./yadempi script.py #non interactive
NSTEPS=100 #turn it >0 to see time iterations, else only initilization TODO!HACK
import os
from yade import mpy as mp
numThreads = 6
#add spheres
young = 5e6
compFricDegree = 0.0
O.materials.append(FrictMat(young=young, poisson=0.5, frictionAngle = radians(compFricDegree), density= 2600, label='sphereMat'))
O.materials.append(FrictMat(young=young*100, poisson = 0.5, frictionAngle = compFricDegree, density =2600, label='wallMat'))
mn,mx=Vector3(0,0,0),Vector3(150,150,100)
pred = pack.inAlignedBox(mn,mx)
O.bodies.append(pack.regularHexa(pred,radius=3,gap=0, material='sphereMat'))
walls=aabbWalls([Vector3(-mx[0]*2,-1,-mx[2]*2),Vector3(mx[0]*3,mx[1],mx[2]*3)], oversizeFactor=1, material='wallMat',wire=False)
for w in walls: w.shape.wire=False
O.bodies.append(walls[:3]+walls[4:]) #don't insert top wall
collider.verletDist = 2
newton.gravity=(0.05,-0.5,0.05) #else nothing would move
tsIdx=O.engines.index(timeStepper) #remove the automatic timestepper. Very important: we don't want subdomains to use many different timesteps...
O.engines=O.engines[0:tsIdx]+O.engines[tsIdx+1:]
O.dt=0.01
######### RUN ##########
def collectTiming():
created = os.path.isfile("collect.dat")
f=open('collect.dat','a')
if not created: f.write("numThreads mpi omp Nspheres N M runtime \n")
from yade import timing
f.write(str(numThreads)+" "+str(os.getenv('OMPI_COMM_WORLD_SIZE'))+" "+os.getenv('OMP_NUM_THREADS')+" "+str(N*M*(numThreads-1))+" "+str(N)+" "+str(M)+" "+str(timing.runtime())+"\n")
f.close()
# customize mpy
mp.VERBOSE_OUTPUT=False
mp.YADE_TIMING=False
mp.DOMAIN_DECOMPOSITION= True
#mp.MERGE_W_INTERACTIONS=True
#mp.ERASE_REMOTE_MASTER=True
mp.REALLOCATE_FREQUENCY=2
mp.mpirun(NSTEPS,numThreads,True)
#def animate():
#for k in range(600):
# single-thread vtk output from merged scene
#if mp.rank == 0:
#from yade import export
#v=export.VTKExporter("mpi3d")
#for k in range(600):
#mp.mpirun(15,4,True)
#if mp.rank == 0:
#v.exportSpheres(what=dict(subdomain='b.subdomain'))
| gpl-2.0 | 2,623,087,043,722,613,000 | 30.823529 | 182 | 0.719963 | false |
phlax/translate | translate/storage/test_aresource.py | 1 | 17503 | # -*- coding: utf-8 -*-
from __future__ import print_function
from lxml import etree
from translate.storage import aresource, test_monolingual
from translate.misc.multistring import multistring
from translate.storage.base import TranslationStore
class TestAndroidResourceUnit(test_monolingual.TestMonolingualUnit):
UnitClass = aresource.AndroidResourceUnit
def __check_escape(self, string, xml, target_language=None):
"""Helper that checks that a string is output with the right escape."""
unit = self.UnitClass("teststring")
if (target_language is not None):
store = TranslationStore()
store.settargetlanguage(target_language)
unit._store = store
unit.target = string
print("unit.target:", repr(unit.target))
print("xml:", repr(xml))
assert str(unit) == xml
def __check_parse(self, string, xml):
"""Helper that checks that a string is parsed correctly."""
parser = etree.XMLParser(strip_cdata=False)
translatable = 'translatable="false"' not in xml
et = etree.fromstring(xml, parser)
unit = self.UnitClass.createfromxmlElement(et)
print("unit.target:", repr(unit.target))
print("string:", string)
print("translatable:", repr(unit.istranslatable()))
assert unit.target == string
assert unit.istranslatable() == translatable
############################ Check string escape ##########################
def test_escape_message_with_newline(self):
string = 'message\nwith newline'
xml = '<string name="teststring">message\n\\nwith newline</string>\n\n'
self.__check_escape(string, xml)
def test_escape_quotes_with_newline(self):
string = '\'message\'\nwith newline'
xml = '<string name="teststring">\\\'message\\\'\n\\nwith newline</string>\n\n'
self.__check_escape(string, xml)
def test_escape_message_with_newline_in_xml(self):
string = 'message\n\nwith newline in xml\n'
xml = ('<string name="teststring">message\n\\n\n\\nwith newline in xml\n\\n'
'</string>\n\n')
self.__check_escape(string, xml)
def test_escape_twitter(self):
string = '@twitterescape'
xml = '<string name="teststring">\\@twitterescape</string>\n\n'
self.__check_escape(string, xml)
def test_escape_quote(self):
string = 'quote \'escape\''
xml = '<string name="teststring">quote \\\'escape\\\'</string>\n\n'
self.__check_escape(string, xml)
def test_escape_double_space(self):
string = 'double space'
xml = '<string name="teststring">"double space"</string>\n\n'
self.__check_escape(string, xml)
def test_escape_leading_space(self):
string = ' leading space'
xml = '<string name="teststring">" leading space"</string>\n\n'
self.__check_escape(string, xml)
def test_escape_tailing_space(self):
string = 'tailing space '
xml = '<string name="teststring">"tailing space "</string>\n\n'
self.__check_escape(string, xml)
def test_escape_xml_entities(self):
string = '>xml&entities'
xml = '<string name="teststring">>xml&entities</string>\n\n'
self.__check_escape(string, xml)
def test_escape_html_code(self):
string = 'some <b>html code</b> here'
xml = ('<string name="teststring">some <b>html code</b> here'
'</string>\n\n')
self.__check_escape(string, xml)
def test_escape_html_code_quote(self):
string = 'some <b>html code</b> \'here\''
xml = ('<string name="teststring">some <b>html code</b> \\\'here\\\''
'</string>\n\n')
self.__check_escape(string, xml)
def test_escape_html_code_quote_newline(self):
string = 'some \n<b>html code</b> \'here\''
xml = ('<string name="teststring">some \n\\n<b>html code</b> \\\'here\\\''
'</string>\n\n')
self.__check_escape(string, xml)
def test_escape_arrows(self):
string = '<<< arrow'
xml = '<string name="teststring"><<< arrow</string>\n\n'
self.__check_escape(string, xml)
def test_escape_link(self):
string = '<a href="http://example.net">link</a>'
xml = ('<string name="teststring">\n'
' <a href="http://example.net">link</a>\n'
'</string>\n\n')
self.__check_escape(string, xml)
def test_escape_link_and_text(self):
string = '<a href="http://example.net">link</a> and text'
xml = ('<string name="teststring"><a href="http://example.net">link'
'</a> and text</string>\n\n')
self.__check_escape(string, xml)
def test_escape_blank_string(self):
string = ''
xml = '<string name="teststring"></string>\n\n'
self.__check_escape(string, xml)
def test_plural_escape_message_with_newline(self):
mString = multistring(['one message\nwith newline', 'other message\nwith newline'])
xml = ('<plurals name="teststring">\n\t'
'<item quantity="one">one message\n\\nwith newline</item>\n\t'
'<item quantity="other">other message\n\\nwith newline</item>\n'
'</plurals>\n\n')
self.__check_escape(mString, xml, 'en')
def test_plural_invalid_lang(self):
mString = multistring(['one message', 'other message'])
xml = ('<plurals name="teststring">\n\t'
'<item quantity="one">one message</item>\n\t'
'<item quantity="other">other message</item>\n'
'</plurals>\n\n')
self.__check_escape(mString, xml, 'invalid')
def test_escape_html_quote(self):
string = 'start \'here\' <b>html code \'to escape\'</b> also \'here\''
xml = ('<string name="teststring">start \\\'here\\\' <b>html code \\\'to escape\\\'</b> also \\\'here\\\''
'</string>\n\n')
self.__check_escape(string, xml)
def test_escape_html_leading_space(self):
string = ' <b>html code \'to escape\'</b> some \'here\''
xml = ('<string name="teststring"> <b>html code \\\'to escape\\\'</b> some \\\'here\\\''
'</string>\n\n')
self.__check_escape(string, xml)
def test_escape_html_trailing_space(self):
string = '<b>html code \'to escape\'</b> some \'here\' '
xml = ('<string name="teststring"><b>html code \\\'to escape\\\'</b> some \\\'here\\\' '
'</string>\n\n')
self.__check_escape(string, xml)
def test_escape_html_with_ampersand(self):
string = '<b>html code \'to escape\'</b> some \'here\' with & char'
xml = ('<string name="teststring"><b>html code \\\'to escape\\\'</b> some \\\'here\\\' with & char'
'</string>\n\n')
self.__check_escape(string, xml)
def test_escape_html_double_space(self):
string = '<b>html code \'to escape\'</b> some \'here\''
xml = ('<string name="teststring"><b>"html code \\\'to escape\\\'"</b> some \\\'here\\\''
'</string>\n\n')
self.__check_escape(string, xml)
def test_escape_html_deep_double_space(self):
string = '<b>html code \'to <i>escape</i>\'</b> some \'here\''
xml = ('<string name="teststring"><b>"html code \\\'to "<i>escape</i>\\\'</b> some \\\'here\\\''
'</string>\n\n')
self.__check_escape(string, xml)
def test_escape_complex_xml(self):
string = '<g:test xmlns:g="ttt" g:somevalue="aaaa " aaa">value</g:test> & outer > <br/>text'
xml = ('<string name="teststring">'
'<g:test xmlns:g="ttt" g:somevalue="aaaa " aaa">value</g:test> & outer > <br/>text'
'</string>\n\n')
self.__check_escape(string, xml)
############################ Check string parse ###########################
def test_parse_message_with_newline(self):
string = 'message\nwith newline'
xml = '<string name="teststring">message\\nwith newline</string>\n\n'
self.__check_parse(string, xml)
def test_parse_message_with_newline_in_xml(self):
string = 'message \nwith\n newline\n in xml'
xml = ('<string name="teststring">message\n\\nwith\\n\nnewline\\n\nin xml'
'</string>\n\n')
self.__check_parse(string, xml)
def test_parse_twitter(self):
string = '@twitterescape'
xml = '<string name="teststring">\\@twitterescape</string>\n\n'
self.__check_parse(string, xml)
def test_parse_quote(self):
string = 'quote \'escape\''
xml = '<string name="teststring">quote \\\'escape\\\'</string>\n\n'
self.__check_parse(string, xml)
def test_parse_double_space(self):
string = 'double space'
xml = '<string name="teststring">"double space"</string>\n\n'
self.__check_parse(string, xml)
def test_parse_leading_space(self):
string = ' leading space'
xml = '<string name="teststring">" leading space"</string>\n\n'
self.__check_parse(string, xml)
def test_parse_xml_entities(self):
string = '>xml&entities'
xml = '<string name="teststring">>xml&entities</string>\n\n'
self.__check_parse(string, xml)
def test_parse_html_code(self):
string = 'some <b>html code</b> here'
xml = ('<string name="teststring">some <b>html code</b> here'
'</string>\n\n')
self.__check_parse(string, xml)
def test_parse_arrows(self):
string = '<<< arrow'
xml = '<string name="teststring"><<< arrow</string>\n\n'
self.__check_parse(string, xml)
def test_parse_link(self):
string = '<a href="http://example.net">link</a>'
xml = ('<string name="teststring"><a href="http://example.net">link'
'</a></string>\n\n')
self.__check_parse(string, xml)
def test_parse_link_and_text(self):
string = '<a href="http://example.net">link</a> and text'
xml = ('<string name="teststring"><a href="http://example.net">link'
'</a> and text</string>\n\n')
self.__check_parse(string, xml)
def test_parse_blank_string(self):
string = ''
xml = '<string name="teststring"></string>\n\n'
self.__check_parse(string, xml)
def test_parse_trailing_space(self):
string = 'test'
xml = '<string name="teststring">test </string>\n\n'
self.__check_parse(string, xml)
def test_parse_trailing_spaces(self):
string = 'test'
xml = '<string name="teststring">test </string>\n\n'
self.__check_parse(string, xml)
def test_parse_leading_spaces(self):
string = 'test'
xml = '<string name="teststring"> test</string>\n\n'
self.__check_parse(string, xml)
def test_parse_trailing_newline(self):
string = 'test'
xml = '<string name="teststring">test\n</string>\n\n'
self.__check_parse(string, xml)
def test_parse_many_quotes(self):
string = 'test'
xml = '<string name="teststring">""""""""""test"""""""</string>\n\n'
self.__check_parse(string, xml)
def test_parse_blank_string_again(self):
string = ''
xml = '<string name="teststring"/>\n\n'
self.__check_parse(string, xml)
def test_parse_double_quotes_string(self):
"""Check that double quotes got removed."""
string = 'double quoted text'
xml = '<string name="teststring">"double quoted text"</string>\n\n'
self.__check_parse(string, xml)
def test_parse_newline_in_string(self):
"""Check that newline is read as space.
At least it seems to be what Android does.
"""
string = 'newline\nin string'
xml = '<string name="teststring">newline\\nin string</string>\n\n'
self.__check_parse(string, xml)
def test_parse_not_translatable_string(self):
string = 'string'
xml = ('<string name="teststring" translatable="false">string'
'</string>\n\n')
self.__check_parse(string, xml)
def test_plural_parse_message_with_newline(self):
mString = multistring(['one message\nwith newline', 'other message\nwith newline'])
xml = ('<plurals name="teststring">\n\t'
'<item quantity="one">one message\\nwith newline</item>\n\t'
'<item quantity="other">other message\\nwith newline</item>\n\n'
'</plurals>\n\n')
self.__check_parse(mString, xml)
def test_parse_html_quote(self):
string = 'start \'here\' <b>html code \'to escape\'</b> also \'here\''
xml = ('<string name="teststring">start \\\'here\\\' <b>html code \\\'to escape\\\'</b> also \\\'here\\\''
'</string>\n\n')
self.__check_parse(string, xml)
def test_parse_html_leading_space(self):
string = ' <b>html code \'to escape\'</b> some \'here\''
xml = ('<string name="teststring"> <b>html code \\\'to escape\\\'</b> some \\\'here\\\''
'</string>\n\n')
self.__check_parse(string, xml)
def test_parse_html_leading_space_quoted(self):
string = ' <b>html code \'to escape\'</b> some \'here\''
xml = ('<string name="teststring">" "<b>"html code \'to escape\'"</b>" some \'here\'"'
'</string>\n\n')
self.__check_parse(string, xml)
def test_parse_html_trailing_space(self):
string = '<b>html code \'to escape\'</b> some \'here\' '
xml = ('<string name="teststring"><b>html code \\\'to escape\\\'</b> some \\\'here\\\' '
'</string>\n\n')
self.__check_parse(string, xml)
def test_parse_html_trailing_space_quoted(self):
string = '<b>html code \'to escape\'</b> some \'here\' '
xml = ('<string name="teststring"><b>"html code \'to escape\'"</b>" some \'here\' "'
'</string>\n\n')
self.__check_parse(string, xml)
def test_parse_html_with_ampersand(self):
string = '<b>html code \'to escape\'</b> some \'here\' with & char'
xml = ('<string name="teststring"><b>html code \\\'to escape\\\'</b> some \\\'here\\\' with & char'
'</string>\n\n')
self.__check_parse(string, xml)
def test_parse_html_double_space_quoted(self):
string = '<b>html code \'to escape\'</b> some \'here\''
xml = ('<string name="teststring"><b>"html code \'to escape\'"</b>" some \'here\'"'
'</string>\n\n')
self.__check_parse(string, xml)
def test_parse_html_deep_double_space_quoted(self):
string = '<b>html code \'to <i> escape</i>\'</b> some \'here\''
xml = ('<string name="teststring"><b>"html code \'to "<i>" escape"</i>\\\'</b> some \\\'here\\\''
'</string>\n\n')
self.__check_parse(string, xml)
def test_parse_complex_xml(self):
string = '<g:test xmlns:g="ttt" g:somevalue="aaaa " aaa">value</g:test> outer & text'
xml = ('<string name="teststring">'
'<g:test xmlns:g="ttt" g:somevalue="aaaa " aaa">value</g:test> outer & text'
'</string>\n\n')
self.__check_parse(string, xml)
class TestAndroidResourceFile(test_monolingual.TestMonolingualStore):
StoreClass = aresource.AndroidResourceFile
def test_targetlanguage_default_handlings(self):
store = self.StoreClass()
# Initial value is None
assert store.gettargetlanguage() is None
# sourcelanguage shouldn't change the targetlanguage
store.setsourcelanguage('en')
assert store.gettargetlanguage() is None
# targetlanguage setter works correctly
store.settargetlanguage('de')
assert store.gettargetlanguage() == 'de'
# explicit targetlanguage wins over filename
store.filename = 'dommy/values-it/res.xml'
assert store.gettargetlanguage() == 'de'
def test_targetlanguage_auto_detection_filename(self):
store = self.StoreClass()
# Check language auto_detection
store.filename = 'project/values-it/res.xml'
assert store.gettargetlanguage() == 'it'
def test_targetlanguage_auto_detection_filename_default_language(self):
store = self.StoreClass()
store.setsourcelanguage('en')
# Check language auto_detection
store.filename = 'project/values/res.xml'
assert store.gettargetlanguage() == 'en'
def test_targetlanguage_auto_detection_invalid_filename(self):
store = self.StoreClass()
store.setsourcelanguage('en')
store.filename = 'project/invalid_directory/res.xml'
assert store.gettargetlanguage() is None
store.filename = 'invalid_directory'
assert store.gettargetlanguage() is None
def test_namespaces(self):
content = '''<resources xmlns:tools="http://schemas.android.com/tools">
<string name="string1" tools:ignore="PluralsCandidate">string1</string>
<string name="string2">string2</string>
</resources>'''
store = self.StoreClass()
store.parse(content)
newstore = self.StoreClass()
newstore.addunit(store.units[0], new=True)
print(newstore)
assert b'<resources xmlns:tools="http://schemas.android.com/tools">' in bytes(newstore)
| gpl-2.0 | -5,462,796,748,261,070,000 | 39.422633 | 114 | 0.575273 | false |
cloudxaas/cloudauth | lib/libauthz.py | 1 | 3763 | #!/usr/bin/python
import os
import sys
import pwd
import uuid
import struct
import socket
import logging
import base64
import urlparse
import time
import datetime
import json
import grp, pwd
import libauthn
from M2Crypto import EVP, EC, util
logger = logging.getLogger("libauthz")
def assert_authz(qstr, authn_cert, authz_keypem):
#qstr: ttype=qst&tval=b64urlsafe&srvs=foo&srvs=bar
#qstr: ttype=jmt&tval=jmt-token&srvs=foo&srvs=bar
logger.info(qstr)
attrs = urlparse.parse_qs(qstr)
ttype = attrs["ttype"][0]
token = attrs["tval"][0]
if (ttype == "qst"):
token = libauthn.base64url_decode(token)
try:
services = attrs["srvs"]
except KeyError:
services = ["OMNI"]
if (libauthn.verify_authn(ttype, token, authn_cert) == False):
return qstr
if (ttype == "qst"):
return assert_authz_qst(token, services, authn_cert, authz_keypem)
elif (ttype == "qsb"):
token = libauthn.qsb2qst(token)
token = assert_authz_qst(token, services, authn_cert, authz_keypem)
btkns = ""
tkns = token.split("\r\n")
for token in tkns :
if (token == None or len(token.strip()) <= 0): break
btkns += libauthn.qst2qsb(token, "authz") + "\r\n"
return btkns
elif (ttype == "jwt"):
return assert_authz_jwt(token, services, authn_cert, authz_keypem)
else:
logger.error("unsupported authn token: %s", qstr)
return qstr
def assert_authz_jwt(token, services, authn_cert, authz_keypem):
hdr, bdy, sig = token.split(".", 2)
hdr = libauthn.base64url_decode(hdr).strip()
bdy = libauthn.base64url_decode(bdy).strip()
logger.info("hdr=%s", hdr)
logger.info("bdy=%s", bdy)
hdr_obj = json.loads(hdr)
bdy_obj = json.loads(bdy)
subject = bdy_obj["s"]
authz_tokens = ""
for srvs in services :
bd = bdy[:-1] if bdy.endswith('}') else bdy
bd += ', "sv":"' + srvs + '"'
roles = assert_roles(subject, srvs)
logger.info("roles for %s %s: %s", subject, srvs, roles)
if (len(roles) > 0):
bd += ', "rl" : ["' + roles[0] + '"'
for i in range(1, len(roles)):
bd += ', "' + roles[i] + '"'
bd += "]}"
bd = json.loads(bd)
bd = json.dumps(bd)
logger.info("body=%s", bd)
stkn = base64.urlsafe_b64encode(json.dumps(hdr)) + "." + base64.urlsafe_b64encode(bd).rstrip("=")
sig = libauthn.hash_n_sign(stkn, "sha1", authz_keypem)
stkn = stkn + "." + base64.urlsafe_b64encode(sig).rstrip("=")
authz_tokens += stkn + "\r\n"
logger.info(authz_tokens)
return authz_tokens
def assert_authz_qst(token, services, authn_cert, authz_keypem):
token = token[0:token.find("&h=")] #strip of authn sig
tkn_attrs = urlparse.parse_qs(token)
subject = tkn_attrs["s"][0]
authz_tokens = ""
for srvs in services :
stkn = token + "&sv=" + srvs
roles = assert_roles(subject, srvs)
logger.info("roles for %s %s: %s", subject, srvs, roles)
for role in roles:
stkn += "&rl=" + role
sig = libauthn.hash_n_sign(stkn, "sha1", authz_keypem)
stkn = stkn + "&h=" + base64.urlsafe_b64encode(sig).rstrip("=")
authz_tokens += stkn + "\r\n"
logger.info(authz_tokens)
return authz_tokens
def assert_roles(subject, service = None):
# test function to get user's group info
app, host, user = subject.split("~", 2)
groups = [g.gr_name for g in grp.getgrall() if user in g.gr_mem]
gid = pwd.getpwnam(user).pw_gid
groups.append(grp.getgrgid(gid).gr_name)
return groups
| apache-2.0 | 6,111,352,499,256,282,000 | 21.806061 | 105 | 0.577199 | false |
miku/siskin | siskin/task.py | 1 | 5779 | # coding: utf-8
# pylint: disable=C0103,W0232,C0301,W0703
# Copyright 2015 by Leipzig University Library, http://ub.uni-leipzig.de
# The Finc Authors, http://finc.info
# Martin Czygan, <[email protected]>
#
# This file is part of some open source application.
#
# Some open source application is free software: you can redistribute
# it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# Some open source application is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar. If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
"""
Define a siskin wide task with artifacts under core.home directory.
[core]
smtp = server.example.com
default-sender = [email protected]
default-replyto = [email protected]
error-email = [email protected], [email protected]
home = /path/to/dir
[amsl]
write-url = https://live.abc.technology/w/i/write
"""
import datetime
import logging
import os
import re
import socket
import tempfile
import traceback
import luigi
from gluish.task import BaseTask
from gluish.utils import shellout
from siskin import __version__
from siskin.configuration import Config
from siskin.mail import send_mail
config = Config.instance()
class DefaultTask(BaseTask):
"""
Base task for all siskin tasks.
It sets the base directory, where all task artifacts will be stored. It
also provides shortcuts to config, assets and logging objects.
On failure, an email is sent to configured addresses.
A command line parameter named --stamp is used to optionally update
timestamps in AMSL electronic resource management system.
"""
BASE = config.get('core', 'home', fallback=os.path.join(tempfile.gettempdir(), 'siskin-data'))
stamp = luigi.BoolParameter(default=False, description="update processing time of source via AMSL API", significant=False)
@classmethod
def assets(cls, path):
"""
Return the absolute path to the asset. `path` is the relative path
below the assets root dir.
"""
return os.path.join(os.path.dirname(__file__), 'assets', path)
@property
def config(self):
"""
Return the config instance.
"""
return config
@property
def logger(self):
"""
Return the logger. Module logging uses singleton internally, so no worries.
"""
return logging.getLogger('siskin')
def on_failure(self, exception):
"""
If a task fails, try to send an email.
"""
try:
tolist = self.config.get("core", "error-email").split(",")
subject = "%s %s" % (self, datetime.datetime.today().strftime("%Y-%m-%d %H:%M"))
message = """
This is siskin {version} on {host}.
An error occured in Task {name}, this is the error message:
{exc}
Stacktrace:
{tb}
""".format(
version=__version__,
name=self,
exc=exception,
tb=traceback.format_exc(),
host=socket.gethostname(),
)
message = message.encode("utf-8")
send_mail(tolist=tolist, subject=subject, message=message)
self.logger.debug("sent error emails to %s", ", ".join(tolist))
except TypeError as err:
self.logger.debug("error-email may not be configured, not sending mail: %s", err)
except Exception as err:
self.logger.debug("failed to send error email: %s", err)
def on_success(self):
"""
Try to send a datestamp to AMSL, but only if a couple of prerequisites are met:
All subclasses inherit a --stamp boolean flag, which must be set. If
the TAG of the source is not numeric, we won't do anything. If
"amsl.write-url" configuration is not set, we will log the error, but
do not stop processing. Finally, even if the HTTP request fails, it
won't be fatal.
On success, the API returns:
< HTTP/1.1 200 OK
< Content-Type: text/html; charset=UTF-8
< Content-Length: 2
...
OK
Note that if a subclass overwrites `on_success` this method is not
called, so you have to call it manually.
"""
if not self.stamp:
return
if not hasattr(self, 'TAG'):
self.logger.warn("no tag defined, skip stamping")
return
if not re.match(r"^[\d]+$", self.TAG):
self.logger.warn("non-integer source id: %s, skip stamping", self.TAG)
return
sid = self.TAG.lstrip("0") # Otherwise: Parameter 'sid' ... not a positive integer.
try:
write_url = config.get("amsl", "write-url")
if write_url is None:
self.logger.warn("missing amsl.write-url configuration, skip stamping")
return
except Exception as err:
self.logger.warn("could not stamp: %s", err)
return
try:
shellout("""curl --fail -XPOST "{write_url}?do=updatetime&sid={sid}" > /dev/null """, write_url=write_url, sid=sid)
except RuntimeError as err:
self.logger.warn(err)
return
else:
self.logger.debug("successfully stamped: %s", sid)
| gpl-3.0 | -1,744,374,796,712,234,000 | 31.649718 | 127 | 0.6181 | false |
PYPIT/PYPIT | pypeit/tests/test_bpmimage.py | 1 | 2180 | # Module to run tests on BPMImage class
# Requires files in Development suite and an Environmental variable
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# TEST_UNICODE_LITERALS
import os
import pytest
import glob
import numpy as np
from pypeit.tests.tstutils import dev_suite_required
from pypeit.spectrographs import util
from pypeit.core import procimg
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'files')
return os.path.join(data_dir, filename)
def test_dummy_image():
# Simple
shape=(2048,2048)
spectrograph = util.load_spectrograph('shane_kast_blue')
bpm = spectrograph.bpm(shape=shape)#, trim=False)
assert isinstance(bpm, np.ndarray)
assert bpm.shape == shape
assert np.sum(bpm) == 0
@dev_suite_required
def test_keck_lris_red():
# Spectrograph
spectrograph = util.load_spectrograph('keck_lris_red')
#
example_file = os.path.join(os.getenv('PYPEIT_DEV'), 'RAW_DATA', 'Keck_LRIS_red',
'long_600_7500_d560', 'LR.20160216.05529.fits.gz')
# Get the shape
dsec_img = spectrograph.get_datasec_img(example_file, det=2)
shape = procimg.trim_frame(dsec_img, dsec_img < 1).shape
# Simple
bpm = spectrograph.bpm(shape=shape, filename=example_file, det=2)
assert np.sum(bpm) > 0
@dev_suite_required
def test_keck_deimos():
spectrograph = util.load_spectrograph('keck_deimos')
example_file = os.path.join(os.getenv('PYPEIT_DEV'), 'RAW_DATA', 'Keck_DEIMOS', '830G_L_8400',
'd0914_0002.fits.gz')
# Get the shape
dsec_img = spectrograph.get_datasec_img(example_file, det=2)
shape = procimg.trim_frame(dsec_img, dsec_img < 1).shape
# Simple
bpm = spectrograph.bpm(shape=shape,det=4)
assert bpm[0,0] == 1
# This is too experimental
'''
def test_bpm_from_bias():
bias = np.full((1024,1024), 1000, dtype=float)
bias[512,512] += 50.
bpmImage = bpmimage.BPMImage(msbias=bias)
bpm = bpmImage.build()
# Test
assert np.isclose(bpm[512,512],1)
'''
| gpl-3.0 | 5,104,313,725,190,529,000 | 27.684211 | 98 | 0.668349 | false |
thtrieu/essence | src/optimizer.py | 1 | 2211 | import numpy as np
from .utils import extract
class Optimizer(object):
def __init__(self, lr = 1e-3, *args, **kwargs):
minimize, kwargs = extract('minimize', True, **kwargs)
self._lr = lr * (2. * np.float64(minimize) - 1.)
self._construct(*args, **kwargs)
def apply(self, var_slot):
self._current = var_slot
var_slot.apply_grad(self._rule)
self._current = None
def finalize_step(self): pass
def _construct(*args, **kwargs): pass
class StochasticDescentOptimizer(Optimizer):
def _construct(self, decay = 1.):
self._decay = decay
def _rule(self, v, g):
return v - self._lr * g
def finalize_step(self):
self._lr *= self._decay
class RMSPropOptimizer(Optimizer):
def _construct(self, p = .975):
self._p = p
self._moments = dict()
def _rule(self, v, g):
c = self._current
m = self._moments
if c not in m:
m[c] = 0
r = m[c]
r = self._p * r + (1. - self._p) * g * g
m[c] = r
dv = self._lr * np.divide(g, np.sqrt(1e-8 + r))
return v - dv
class AdamOptimizer(Optimizer):
def _construct(self, p1 = .9, p2 = .999):
self._p1, self._p2 = p1, p2
self._moments = dict()
def _rule(self, v, g):
c = self._current
m = self._moments
if c not in m:
m[c] = dict({'s': 0, 'r': 0, 't': 0})
s, r, t = m[c]['s'], m[c]['r'], m[c]['t']
s = s * self._p1 + (1. - self._p1) * g
r = r * self._p2 + (1. - self._p2) * g * g
m[c]['s'], m[c]['r'], m[c]['t'] = s, r, (t + 1)
s_ = np.divide(s, 1. - np.power(self._p1, t + 1))
r_ = np.divide(r, 1. - np.power(self._p2, t + 1))
dv = self._lr * np.divide(s_, np.sqrt(r_) + 1e-8)
return v - dv
"""
Optimizer factory
"""
_optimizer_factory = dict({
'sgd' : StochasticDescentOptimizer,
'adam': AdamOptimizer,
'rmsprop': RMSPropOptimizer
})
def optimizer_factory(name, *args, **kwargs):
assert name in _optimizer_factory, \
'Optimizer {} not found'.format(name)
return _optimizer_factory[name](*args, **kwargs) | gpl-3.0 | -509,026,370,414,560,960 | 25.97561 | 62 | 0.509272 | false |
kwoodhouse93/astro-bomber | source/bomber.py | 1 | 5440 | import pygame
from pygame.locals import *
import pymunk
from source import game
from source.constants import *
from source.utilities import *
from source.weapon import *
class Bomber:
def __init__(self):
self.width = width = BOMBER_WIDTH
self.height = height = BOMBER_HEIGHT
vertices = [
(-(width/2), -(height/2)),
( 0, (height/2)),
( (width/2), -(height/2))
]
radius = 5
mass = 5
moment = pymunk.moment_for_poly(mass, vertices, radius=radius)
self.body = pymunk.Body(mass, moment)
self.body.position = SCREEN_CENTER
self.shape = pymunk.Poly(self.body, vertices, radius=radius)
self.shape.collision_type = CT_BOMBER
# Object constants
self.turn_torque = BOMBER_TORQUE
self.engine_thrust = BOMBER_MAIN_ENGINE_THRUST
self.braking_force = BOMBER_BRAKE_FORCE
self.reverse_thrust = BOMBER_REVERSE_ENGINE_THRUST
self.ang_vel_limit = BOMBER_ANG_VEL_LIMIT
# State variables
self.strength = BOMBER_STRENGTH
self.turning_left = False
self.turning_right = False
self.thrusting = False
self.braking = False
# Register callback functors
event_manager = game.event_manager
event_manager.register_keydown(K_LEFT, self.cb_left_turn_on)
event_manager.register_keydown(K_RIGHT, self.cb_right_turn_on)
event_manager.register_keydown(K_UP, self.cb_thrust_forwards_on)
event_manager.register_keydown(K_DOWN, self.cb_thrust_backwards_on)
event_manager.register_keydown(K_LSHIFT, self.cb_fire_primary_weapon)
event_manager.register_keydown(K_RSHIFT, self.cb_fire_primary_weapon)
event_manager.register_keydown(K_LCTRL, self.cb_fire_secondary_weapon)
event_manager.register_keydown(K_RCTRL, self.cb_fire_secondary_weapon)
event_manager.register_keyup(K_LEFT, self.cb_left_turn_off)
event_manager.register_keyup(K_RIGHT, self.cb_right_turn_off)
event_manager.register_keyup(K_UP, self.cb_thrust_forwards_off)
event_manager.register_keyup(K_DOWN, self.cb_thrust_backwards_off)
# Add to space
game.space.add(self.body, self.shape)
# Add components
self.components = []
self.primary_weapon = PrimaryCannon(self)
self.components.append(self.primary_weapon)
self.secondary_weapon = SecondaryBombLauncher(self)
self.components.append(self.secondary_weapon)
def cb_left_turn_on(self, event):
self.turning_left = True
def cb_right_turn_on(self, event):
self.turning_right = True
def cb_thrust_forwards_on(self, event):
self.thrusting = True
def cb_thrust_backwards_on(self, event):
self.braking = True
def cb_left_turn_off(self, event):
self.turning_left = False
def cb_right_turn_off(self, event):
self.turning_right = False
def cb_thrust_forwards_off(self, event):
self.thrusting = False
def cb_thrust_backwards_off(self, event):
self.braking = False
def cb_fire_primary_weapon(self, event):
self.primary_weapon.activate()
def cb_fire_secondary_weapon(self, event):
self.secondary_weapon.activate()
def hit(self, damage):
self.strength -= damage
if self.strength < 0:
print("SHIP DESTROYED")
# self.strength = BOMBER_STRENGTH
game.object_manager.unregister_player(self)
def delete(self):
game.space.remove(self.body, self.shape)
for component in self.components:
game.object_manager.unregister(component)
def update(self):
Utils.wrap_body(self.body, radius=(self.width / 2))
# print(str(self.body.angular_velocity))
if self.turning_left and not self.turning_right and self.body.angular_velocity < self.ang_vel_limit:
self.body.torque = self.turn_torque
elif self.turning_right and not self.turning_left and self.body.angular_velocity > -self.ang_vel_limit:
self.body.torque = -self.turn_torque
elif self.body.angular_velocity > 0.1:
self.body.torque = -self.turn_torque
elif self.body.angular_velocity < -0.1:
self.body.torque = self.turn_torque
else:
self.body.angular_velocity = 0
self.body.torque = 0
# forward_vel = self.body.velocity.rotated(-self.body.angle).y
if self.thrusting:
force = (0, self.engine_thrust)
point = (0, 0)
self.body.apply_force_at_local_point(force, point)
elif self.braking:# and forward_vel > 1:
# force = (0, -self.braking_force)
force = (0, -self.reverse_thrust)
point = (0, 0)
self.body.apply_force_at_local_point(force, point)
# print(str(self.body.torque))
def draw(self):
screen = game.screen
# width, height = Utils.get_screen_size()
# print (str(self.body.position))
# position = int(self.body.position.x), \
# height - int(self.body.position.y)
# pygame.draw.circle(screen, (0, 0, 255), position, int(self.radius), 2)
# def draw_bomber(screen, bomber):
# p = int(bomber.body.position.x), 600 - int(bomber.body.position.y)
# pygame.draw.circle(screen, (0, 0, 255), p, int(bomber.radius), 2)
| mit | -653,415,825,549,484,000 | 36.777778 | 111 | 0.627206 | false |
PetukhovVictor/compiler | src/VM/conf.py | 1 | 1054 | # -*- coding: utf-8 -*-
from .commands import *
# Мапа соответствий: строковое представление команды VM - класс команды VM
commands_map = {
'PUSH': Push,
'POP': Pop,
'NOP': Nop,
'DUP': Dup,
'LOAD': Load,
'PLOAD': PLoad,
'BLOAD': BLoad,
'BPLOAD': BPLoad,
'DLOAD': DLoad,
'DBLOAD': DBLoad,
'STORE': Store,
'PSTORE': PStore,
'BSTORE': BStore,
'BPSTORE': BPStore,
'DSTORE': DStore,
'DBSTORE': DBStore,
'ADD': Add,
'MUL': Mul,
'SUB': Sub,
'DIV': Div,
'MOD': Mod,
'INVERT': Invert,
'COMPARE': Compare,
'LABEL': Label,
'JUMP': Jump,
'JZ': Jz,
'JNZ': Jnz,
'READ': Read,
'WRITE': Write,
'ENTER': Enter,
'CALL': Call,
'FUNCTION': Function,
'RETURN': Return,
'MALLOC': Malloc,
'DMALLOC': DMalloc,
'LOG': Log
}
# Разделитель команд VM
COMMAND_SEPARATOR = '\n'
# Разделитель аргументов команд VM
ARGS_SEPARATOR = ' '
| mit | -3,168,778,966,710,100,500 | 18.06 | 74 | 0.545645 | false |
rebase-helper/rebase-helper | rebasehelper/tests/functional/test_rebase.py | 1 | 10429 | # -*- coding: utf-8 -*-
#
# This tool helps you rebase your package to the latest version
# Copyright (C) 2013-2019 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Authors: Petr Hráček <[email protected]>
# Tomáš Hozza <[email protected]>
# Nikola Forró <[email protected]>
# František Nečas <[email protected]>
import json
import os
import git # type: ignore
import pytest # type: ignore
import unidiff # type: ignore
from typing import List
from rebasehelper.cli import CLI
from rebasehelper.config import Config
from rebasehelper.application import Application
from rebasehelper.constants import RESULTS_DIR, CHANGES_PATCH
from rebasehelper.helpers.git_helper import GitHelper
@pytest.fixture
def initialized_git_repo(workdir):
repo = git.Repo.init(workdir)
# Configure user otherwise app.apply_changes() will fail
repo.git.config('user.name', GitHelper.get_user(), local=True)
repo.git.config('user.email', GitHelper.get_email(), local=True)
repo.git.add(all=True)
repo.index.commit('Initial commit', skip_hooks=True)
return repo
class TestRebase:
TEST_FILES: List[str] = [
'rebase/test.spec',
'rebase/applicable.patch',
'rebase/backported.patch',
'rebase/conflicting.patch',
'rebase/renamed-0.1.patch',
]
@pytest.mark.xfail(reason='''
the test fails from time to time due to RPM macros not being expanded,
see https://github.com/rebase-helper/rebase-helper/issues/811
''')
@pytest.mark.parametrize('buildtool', [
pytest.param('rpmbuild', marks=pytest.mark.skipif(
os.geteuid() != 0,
reason='requires superuser privileges')),
pytest.param('mock', marks=pytest.mark.long_running),
])
@pytest.mark.parametrize('favor_on_conflict', ['upstream', 'downstream', 'off'])
@pytest.mark.integration
@pytest.mark.usefixtures('initialized_git_repo')
def test_rebase(self, buildtool, favor_on_conflict):
new_version = '0.2'
cli = CLI([
'--non-interactive',
'--disable-inapplicable-patches',
'--buildtool', buildtool,
'--favor-on-conflict', favor_on_conflict,
'--outputtool', 'json',
'--pkgcomparetool', 'rpmdiff,pkgdiff,abipkgdiff,licensecheck,sonamecheck',
'--color=always',
'--apply-changes',
new_version,
])
config = Config()
config.merge(cli)
execution_dir, results_dir = Application.setup(config)
app = Application(config, os.getcwd(), execution_dir, results_dir)
app.run()
changes = os.path.join(RESULTS_DIR, CHANGES_PATCH)
patch = unidiff.PatchSet.from_filename(changes, encoding='UTF-8')
if favor_on_conflict == 'upstream':
backported_patch, conflicting_patch, renamed_patch, spec_file = patch
assert conflicting_patch.is_removed_file # conflicting.patch
elif favor_on_conflict == 'downstream':
backported_patch, conflicting_patch, renamed_patch, spec_file = patch
assert conflicting_patch.is_modified_file # conflicting.patch
else:
backported_patch, renamed_patch, spec_file = patch
# Non interactive mode - inapplicable patches are only commented out.
assert [h for h in spec_file if '+#Patch1: conflicting.patch\n' in h.target]
assert [h for h in spec_file if '+#%%patch1 -p1\n' in h.target]
assert renamed_patch.is_rename # renamed patch 0.1.patch to 0.2.patch
assert os.path.basename(renamed_patch.source_file) == 'renamed-0.1.patch'
assert os.path.basename(renamed_patch.target_file) == 'renamed-0.2.patch'
# Check that the renamed patch path is unchanged
assert not [h for h in spec_file if '-Patch3: renamed-%{version}.patch\n' in h.source]
assert backported_patch.is_removed_file # backported.patch
assert spec_file.is_modified_file # test.spec
if favor_on_conflict != 'downstream':
assert [h for h in spec_file if '-Patch1: conflicting.patch\n' in h.source]
assert [h for h in spec_file if '-%patch1 -p1\n' in h.source]
assert [h for h in spec_file if '-Patch2: backported.patch\n' in h.source]
assert [h for h in spec_file if '-%patch2 -p1\n' in h.source]
assert [h for h in spec_file if '+- New upstream release {}\n'.format(new_version) in h.target]
with open(os.path.join(RESULTS_DIR, 'report.json')) as f:
report = json.load(f)
assert 'success' in report['result']
# patches
assert 'applicable.patch' in report['patches']['untouched']
if favor_on_conflict == 'upstream':
# In case of conflict, upstream code is favored, therefore conflicting patch is unused.
assert 'conflicting.patch' in report['patches']['deleted']
elif favor_on_conflict == 'downstream':
assert 'conflicting.patch' in report['patches']['modified']
else:
# Non interactive mode - skipping conflicting patches
assert 'conflicting.patch' in report['patches']['inapplicable']
assert 'backported.patch' in report['patches']['deleted']
# licensecheck
assert report['checkers']['licensecheck']['license_changes']
assert len(report['checkers']['licensecheck']['disappeared_licenses']) == 1
assert len(report['checkers']['licensecheck']['new_licenses']) == 1
# rpmdiff
assert report['checkers']['rpmdiff']['files_changes']['added'] == 1
assert report['checkers']['rpmdiff']['files_changes']['changed'] == 3
assert report['checkers']['rpmdiff']['files_changes']['removed'] == 1
# abipkgdiff
assert report['checkers']['abipkgdiff']['abi_changes']
lib = report['checkers']['abipkgdiff']['packages']['test']['libtest1.so']
if 'Function symbols changes summary' in lib:
assert lib['Function symbols changes summary']['Added']['count'] == 1
elif 'Functions changes summary' in lib:
assert lib['Functions changes summary']['Added']['count'] == 1
if favor_on_conflict != 'downstream':
if 'Variable symbols changes summary' in lib:
assert lib['Variable symbols changes summary']['Removed']['count'] == 1
elif 'Variables changes summary' in lib:
assert lib['Variables changes summary']['Removed']['count'] == 1
# sonamecheck
change = report['checkers']['sonamecheck']['soname_changes']['test']['changed'][0]
assert change['from'] == 'libtest2.so.0.1'
assert change['to'] == 'libtest2.so.0.2'
repo = git.Repo(execution_dir)
assert '- New upstream release {}'.format(new_version) in repo.commit().summary
@pytest.mark.parametrize('buildtool', [
pytest.param('rpmbuild', marks=pytest.mark.skipif(
os.geteuid() != 0,
reason='requires superuser privileges')),
pytest.param('mock', marks=pytest.mark.long_running),
])
@pytest.mark.integration
@pytest.mark.usefixtures('initialized_git_repo')
def test_files_build_log_hook(self, buildtool):
new_version = '0.3'
cli = CLI([
'--non-interactive',
'--disable-inapplicable-patches',
'--force-build-log-hooks',
'--buildtool', buildtool,
'--outputtool', 'json',
'--pkgcomparetool', '',
'--color=always',
new_version,
])
config = Config()
config.merge(cli)
execution_dir, results_dir = Application.setup(config)
app = Application(config, os.getcwd(), execution_dir, results_dir)
app.run()
changes = os.path.join(RESULTS_DIR, CHANGES_PATCH)
patch = unidiff.PatchSet.from_filename(changes, encoding='UTF-8')
_, _, spec_file = patch
assert spec_file.is_modified_file
# removed files
assert [h for h in spec_file if '-%doc README.md CHANGELOG.md\n' in h.source]
assert [h for h in spec_file if '+%doc README.md\n' in h.target]
assert [h for h in spec_file if '-%doc %{_docdir}/%{name}/notes.txt\n' in h.source]
assert [h for h in spec_file if '-%{_datadir}/%{name}/1.dat\n' in h.source]
assert [h for h in spec_file if '-%{_datadir}/%{name}/extra/C.dat\n' in h.source]
assert [h for h in spec_file if '-%doc data/extra/README.extra\n' in h.source]
# added files
assert [h for h in spec_file if '+%{_datadir}/%{name}/2.dat\n' in h.target]
assert [h for h in spec_file if '+%{_datadir}/%{name}/extra/D.dat\n' in h.target]
with open(os.path.join(RESULTS_DIR, 'report.json')) as f:
report = json.load(f)
assert 'success' in report['result']
# files build log hook
added = report['build_log_hooks']['files']['added']
assert '%{_datadir}/%{name}/2.dat' in added['%files']
assert '%{_datadir}/%{name}/extra/D.dat' in added['%files extra']
removed = report['build_log_hooks']['files']['removed']
assert 'CHANGELOG.md' in removed['%files']
assert '%{_docdir}/%{name}/notes.txt' in removed['%files']
assert '%{_datadir}/%{name}/1.dat' in removed['%files']
assert '%{_datadir}/%{name}/extra/C.dat' in removed['%files extra']
assert 'data/extra/README.extra' in removed['%files extra']
| gpl-2.0 | 1,191,313,108,908,472,000 | 46.807339 | 103 | 0.613702 | false |
benallard/pythoncard | test/testMessageDigest.py | 1 | 2295 | import unittest
from pythoncard.security import MessageDigest
from pythoncard.security.key import _binaryToarray
class testMessageDigest(unittest.TestCase):
def testEmptySHA1(self):
md = MessageDigest.getInstance(MessageDigest.ALG_SHA, False)
self.assertEqual(MessageDigest.ALG_SHA, md.getAlgorithm())
self.assertEqual(MessageDigest.LENGTH_SHA, md.getLength())
res = [0]*20
self.assertEqual(MessageDigest.LENGTH_SHA, md.doFinal([],0,0,res,0))
'da39a3ee5e6b4b0d3255bfef95601890afd80709'
self.assertEqual(_binaryToarray(bytes.fromhex('da39a3ee5e6b4b0d3255bfef95601890afd80709')), res)
def testComplexSHA1(self):
md = MessageDigest.getInstance(MessageDigest.ALG_SHA, False)
testarray = [ b"abc", b"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", b"a", b"0123456701234567012345670123456701234567012345670123456701234567"]
repeatcount = [1, 1, 1000000, 10]
resultarray = [_binaryToarray(bytes.fromhex('A9993E364706816ABA3E25717850C26C9CD0D89D')),
_binaryToarray(bytes.fromhex('84983E441C3BD26EBAAE4AA1F95129E5E54670F1')),
_binaryToarray(bytes.fromhex('34AA973CD4C4DAA4F61EEB2BDBAD27316534016F')),
_binaryToarray(bytes.fromhex('DEA356A2CDDD90C7A7ECEDC5EBB563934F460452'))]
for i in range(4):
md.reset()
res = [0]*20
for j in range(repeatcount[i]-1):
md.update(testarray[i], 0, len(testarray[i]))
md.doFinal(testarray[i], 0, len(testarray[i]), res, 0)
self.assertEqual(resultarray[i], res)
def testLengths(self):
for algo, length in [(MessageDigest.ALG_SHA_512, MessageDigest.LENGTH_SHA_512),
(MessageDigest.ALG_MD5, MessageDigest.LENGTH_MD5),
(MessageDigest.ALG_RIPEMD160, MessageDigest.LENGTH_RIPEMD160),
(MessageDigest.ALG_SHA_256, MessageDigest.LENGTH_SHA_256),
(MessageDigest.ALG_SHA_384, MessageDigest.LENGTH_SHA_384),
(MessageDigest.ALG_SHA, MessageDigest.LENGTH_SHA)]:
md = MessageDigest.getInstance(algo, False)
self.assertEqual(algo, md.getAlgorithm())
self.assertEqual(length, md.getLength())
| lgpl-3.0 | 3,411,043,323,126,917,000 | 53.642857 | 165 | 0.672331 | false |
dssg/wikienergy | proto/pylearn2/create_appliance_detection_dataset.py | 1 | 1872 | import sys
import os.path
sys.path.append(os.path.abspath(os.path.join(os.pardir,os.pardir)))
import disaggregator as da
import disaggregator.PecanStreetDatasetAdapter as psda
import pylearn2.datasets as ds
import pickle
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='create appliance detection datasets for pylearn2.')
parser.add_argument('appliance',type=str,
help='appliance to make the datasets around')
parser.add_argument('data_dir',type=str,
help='directory in which to store data')
parser.add_argument('prefix',type=str,
help='prefix for dataset files')
args = parser.parse_args()
schema = 'shared'
tables = [u'validated_01_2014',
u'validated_02_2014',
u'validated_03_2014',
u'validated_04_2014',
u'validated_05_2014',]
db_url = "postgresql://USERNAME:[email protected]:5432/postgres"
psda.set_url(db_url)
window_length=24*4*7
window_stride=24*4
train,valid,test = psda.get_appliance_detection_arrays(
schema,tables,args.appliance,window_length,window_stride,10)
train_dataset = ds.DenseDesignMatrix(X=train[0],y=train[1])
valid_dataset = ds.DenseDesignMatrix(X=valid[0],y=valid[1])
test_dataset = ds.DenseDesignMatrix(X=test[0],y=test[1])
with open('{data_dir}/{prefix}_train.pkl'
.format(data_dir=args.data_dir,prefix=args.prefix), 'w') as f:
pickle.dump(train_dataset,f)
with open('{data_dir}/{prefix}_valid.pkl'
.format(data_dir=args.data_dir,prefix=args.prefix), 'w') as f:
pickle.dump(valid_dataset,f)
with open('{data_dir}/{prefix}_test.pkl'
.format(data_dir=args.data_dir,prefix=args.prefix), 'w') as f:
pickle.dump(test_dataset,f)
import pdb; pdb.set_trace()
| mit | -4,833,271,291,454,625,000 | 36.44 | 101 | 0.657585 | false |
pikpok/calibre-helion | __init__.py | 1 | 4162 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import,
print_function)
import time
from urllib import quote
from lxml.html import fromstring, tostring
from calibre.ebooks.metadata.sources.base import Source
from calibre import browser, url_slash_cleaner
from calibre.utils.cleantext import clean_ascii_chars
class Helion(Source):
name = 'Helion'
description = _('Pobiera metadane z helion.pl')
author = 'pikpok'
supported_platforms = ['windows', 'osx', 'linux']
version = (0, 0, 4)
minimum_calibre_version = (0, 8, 0)
capabilities = frozenset(['identify', 'cover'])
touched_fields = frozenset(['title', 'authors', 'identifier:helion',
'identifier:isbn', 'rating', 'publisher', 'pubdate', 'languages'])
supports_gzip_transfer_encoding = True
def identify(self, log, result_queue, abort, title=None, authors=None, identifiers={}, timeout=30):
matches = []
br = self.browser
q = ''
title_tokens = list(self.get_title_tokens(title, strip_joiners=False, strip_subtitle=True))
if title_tokens:
tokens = [quote(t.encode('iso-8859-2')) for t in title_tokens]
q += '+'.join(tokens)
if authors:
authors_tokens = self.get_author_tokens(authors, only_first_author=True)
if authors_tokens:
q += '+'
tokens = [quote(t.encode('iso-8859-2')) for t in authors_tokens]
q += '+'.join(tokens)
query = 'http://helion.pl/search?qa=&szukaj=%s&sortby=wd&wsprzed=1&wprzyg=1&wyczerp=1&sent=1'%(q)
response = br.open_novisit(query, timeout=timeout)
raw = response.read().strip()
root = fromstring(clean_ascii_chars(raw))
results = root.xpath('*//div[contains(@class,"search-helion")]')
for result in results:
book_url = result.xpath('./a[contains(@href,"ksiazki")]/@href')
matches.append(book_url)
from calibre_plugins.helion.worker import Worker
workers = [Worker(url, result_queue, br, log, i, self) for i, url in enumerate(matches) if url]
for w in workers:
w.start()
time.sleep(0.1)
while not abort.is_set():
a_worker_is_alive = False
for w in workers:
w.join(0.2)
if abort.is_set():
break
if w.is_alive():
a_worker_is_alive = True
if not a_worker_is_alive:
break
return None
def download_cover(self, log, result_queue, abort, title = None, authors = None, identifiers = {}, timeout = 30, get_best_cover = False):
url = self.get_cached_cover_url(identifiers = identifiers)
br = self.browser
try:
cdata = br.open_novisit(url, timeout=timeout).read()
result_queue.put((self, cdata))
except:
log.exception('Failed to download cover from:', url)
def get_cached_cover_url(self, identifiers):
url = None
helion_id = identifiers.get('helion')
if helion_id is not None:
url = 'http://helion.pl/okladki/326x466/%s.jpg'%(helion_id)
return url
if __name__ == '__main__':
'''
Tests
'''
from calibre.ebooks.metadata.sources.test import (test_identify_plugin, title_test, authors_test)
test_identify_plugin(Helion.name,
[
(
{
'title':'Ruby on Rails. Wprowadzenie',
'authors':['Bruce A. Tate & Curt Hibbs']
},
[
title_test('Ruby on Rails. Wprowadzenie'),
authors_test(['Bruce A. Tate', 'Curt Hibbs'])
]
)
]
)
test_identify_plugin(Helion.name,
[
(
{
'title':u'Jak pozostać anonimowym w sieci',
'authors':[u'Radosław Sokół']
},
[
title_test(u'Jak pozostać anonimowym w sieci'),
authors_test([u'Radosław Sokół'])
]
)
]
)
| mit | -1,299,868,961,320,810,200 | 32.5 | 141 | 0.557053 | false |
OpenISA/riscv-sbt | scripts/auto/genmake.py | 1 | 14624 | #!/usr/bin/env python3
from auto.config import ARM, GOPTS, RV32, SBT, TOOLS, X86
from auto.utils import cat, path, unique
class ArchAndMode:
def __init__(self, farch, narch, mode=None):
self.farch = farch
self.narch = narch
self.mode = mode
# get arch prefix string
def prefix(self):
if not self.farch:
return self.narch.prefix
elif not self.narch:
return self.farch.prefix
else:
return self.farch.add_prefix(self.narch.prefix)
# get output file name for this ArchAndMode
def bin(self, name):
if not self.prefix():
raise Exception("No prefix")
if not name:
raise Exception("No name")
return (self.prefix() + "-" + name +
("-" + self.mode if self.mode else ""));
@staticmethod
def sfmt(templ, prefix, mode):
return templ.format(**{
"prefix": prefix,
"mode": mode,
})
def fmt(self, templ):
prefix = self.prefix()
if prefix:
prefix = prefix + '-'
else:
prefix = ''
mode = self.mode
if mode:
mode = '-' + mode
else:
mode = ''
return self.sfmt(templ, prefix, mode)
# arguments for a given run
class Run:
def __init__(self, args, id=None, rflags=None, outidx=None, stdin=None):
self.args = args
self.rflags_arg = rflags
self.id = id
self.outidx = outidx
self.stdin = stdin
@staticmethod
def out_suffix():
return ".out"
@staticmethod
def build_name(am, base, id, suffix):
if am:
r = am.bin(base)
else:
r = base
if id:
r = r + '-' + id
if suffix:
r = r + suffix
return r
def bin(self, am, name):
return am.bin(name)
def out(self, am, name):
if self.outidx == None:
if am:
return self.build_name(am, name, self.id, self.out_suffix())
else:
return None
return am.fmt(self.args[self.outidx])
def build(self, am):
if am:
return [am.fmt(arg) for arg in self.args]
return [ArchAndMode.sfmt(arg, '', '') for arg in self.args]
@staticmethod
def _escape(args):
args2 = []
for arg in args:
if len(arg) > 0 and arg[0] == '-':
arg = '" {}"'.format(arg)
args2.append(arg)
return args2
def str(self, am):
args = self.build(am)
if not args:
return ''
args2 = ["--args"]
args2.extend(self._escape(args))
return " ".join(args2)
def rflags(self, am, name):
args_str = self.str(am)
out = self.out(am, name)
if not self.outidx:
args_str = cat('-o', out, args_str)
if self.stdin:
args_str = cat(args_str, "<", self.stdin)
return cat(self.rflags_arg, args_str)
class Runs:
def __init__(self, runs=[], name=None):
self.runs = runs
self.name = name
def add(self, run):
self.runs.append(run)
def __iter__(self):
return self.runs.__iter__()
class GenMake:
def __init__(self, narchs, xarchs,
srcdir, dstdir, name,
xflags, bflags, mflags, sbtflags=[],
cc=None, rvcc=None, modes=None):
self.narchs = narchs
self.xarchs = xarchs
self.srcdir = srcdir
self.dstdir = dstdir
self.name = name
self.xflags = xflags
self.bflags = bflags
self.mflags = mflags
self.sbtflags = sbtflags
self.cc = cc
self.rvcc = rvcc
self.modes = modes if modes else SBT.modes
#
self.out_filter = None
#
self.txt = "### {} ###\n\n".format(name)
def append(self, txt):
self.txt = self.txt + txt
def append_cc(self, arch, flags):
s = "--cc="
if arch.is_rv32():
s = s + (self.rvcc if self.rvcc else GOPTS.rvcc)
else:
s = s + (self.cc if self.cc else GOPTS.cc)
return cat(flags, s)
def bld(self, arch, ins, out):
out_is_obj = out.endswith(".o")
objs = []
aobjs = []
if len(ins) == 1:
if not out_is_obj:
objs = [arch.out2objname(out)]
aobjs = [path(self.dstdir, objs[0])]
else:
for src in ins:
obj = arch.src2objname(src)
if not out_is_obj or obj != out:
objs.append(obj)
aobjs = [path(self.dstdir, obj) for obj in objs]
ains = [path(self.srcdir, i) for i in ins]
bflags = self.append_cc(arch, self.bflags)
fmtdata = {
"arch": arch.name,
"aobjs": " ".join(aobjs),
"srcdir": self.srcdir,
"dstdir": self.dstdir,
"ins": " ".join(ins),
"ains": " ".join(ains),
"out": out,
"bflags": " " + bflags if bflags else "",
"build": TOOLS.build,
}
self.append("""\
.PHONY: {out}
{out}: {dstdir}/{out}
{dstdir}/{out} {aobjs}: {ains}
\t{build} --arch {arch} --srcdir {srcdir} --dstdir {dstdir} {ins} -o {out}{bflags}
""".format(**fmtdata))
def _ssh_copy(self, fmtdata):
self.append("""\
.PHONY: {tgt}
{tgt}: {out}
\tscp {src} {rem}:{dst}
""".format(**fmtdata))
def _adb_copy(self, fmtdata):
self.append("""\
.PHONY: {tgt}
{tgt}: {out}
\t{rem} push {src} {dst}
""".format(**fmtdata))
def copy(self, am, name):
# don't copy if we're not on an x86 host OR
# if 'out' is a native binary OR
# if 'out' is a RISC-V binary (we can emulate it)
if not X86.is_native() or am.narch.is_native() or am.narch.is_rv32():
return ''
out = am.bin(name)
tgt = out + self.copy_suffix()
srcdir = self.dstdir
src = path(srcdir, out)
dstdir = am.narch.get_remote_path(srcdir)
dst = path(dstdir, out)
fmtdata = {
"out": out,
"tgt": tgt,
"src": src,
"rem": am.narch.rem_host,
"dst": dst,
}
if GOPTS.ssh_copy():
self._ssh_copy(fmtdata)
else:
self._adb_copy(fmtdata)
return tgt
@staticmethod
def mk_arm_dstdir_static(dstdir):
return ("ssh {} mkdir -p {}" if GOPTS.ssh_copy()
else "{} shell mkdir -p {}").format(
ARM.rem_host, ARM.get_remote_path(dstdir))
def mk_arm_dstdir(self, name):
tgt = name + "-arm-dstdir"
self.append("""\
.PHONY: {0}
{0}:
\t{1}
""".format( tgt,
self.mk_arm_dstdir_static(self.dstdir)))
return tgt
def run(self, name, robj, am, dep_bin=True):
dir = self.dstdir
bin = robj.bin(am, name)
suffix = "-" + robj.id if robj.id else ""
rflags = robj.rflags(am, name)
narch = am.narch
fmtdata = {
"arch": narch.name,
"dir": dir,
"bin": bin,
"suffix": suffix,
"rflags": " " + rflags if rflags else "",
"run": TOOLS.run,
"dep": " " + bin if dep_bin else "",
}
self.append("""\
.PHONY: {bin}{suffix}-run
{bin}{suffix}-run:{dep}
\t{run} --arch {arch} --dir {dir} {bin}{rflags}
""".format(**fmtdata))
def xlate(self, am, _in, out):
flags = '--sbtflags " -regs={}"'.format(am.mode)
for flag in self.sbtflags:
flags = flags + ' " {}"'.format(flag)
xflags = self.append_cc(am.narch, self.xflags)
fmtdata = {
"arch": am.narch.name,
"srcdir": self.srcdir,
"dstdir": self.dstdir,
"in": _in,
"out": out,
"xflags": " " + xflags if xflags else "",
"flags": flags,
"xlate": TOOLS.xlate,
}
self.append("""\
.PHONY: {out}
{out}: {dstdir}/{out}
{dstdir}/{out}: {dstdir}/{in}
\t{xlate} --arch {arch} --srcdir {srcdir} --dstdir {dstdir} {in} -o {out}{xflags} {flags}
""".format(**fmtdata))
def _diff(self, f0, f1):
if self.out_filter:
return (
"\tcat {0} | {2} >{0}.filt\n" +
"\tcat {1} | {2} >{1}.filt\n" +
"\tdiff {0}.filt {1}.filt").format(
f0, f1, self.out_filter)
else:
return "\tdiff {0} {1}".format(f0, f1)
def test1(self, run):
id = run.id
if run.outidx:
name = lambda am: run.out(am, name=self.name)
else:
name = lambda am: path(self.dstdir,
Run.build_name(am, self.name, id, Run.out_suffix()))
# gen diffs
diffs = []
def diff(f0, f1):
diffs.append(self._diff(f0, f1))
xams = self._xams()
for xam in xams:
if not xam.narch.is_native():
continue
xout = name(xam)
# foreign
# skip rv32 if on arm
if not xam.narch.is_arm():
fam = ArchAndMode(None, xam.farch)
fout = name(fam)
diff(fout, xout)
# native
if xam.narch in self.narchs:
nam = ArchAndMode(None, xam.narch)
nout = name(nam)
diff(nout, xout)
if GOPTS.rv32 == "rv8":
fam = ArchAndMode(RV32, None)
nam = ArchAndMode(None, X86)
fout = name(fam)
nout = name(nam)
diff(fout, nout)
tname = Run.build_name(None, self.name, id, None)
fmtdata = {
"name": tname,
"runs": " ".join(self.get_all_runs(Runs([run]), self.name)),
"diffs": "\n".join(diffs)
}
self.append("""\
.PHONY: {name}-test
{name}-test: {runs}
{diffs}
""".format(**fmtdata))
return tname + self.test_suffix()
def test(self, runs):
tests = []
for run in runs:
tests.append(self.test1(run))
if len(tests) > 1:
tsuf = self.test_suffix()
self.alias(self.name + tsuf, tests)
def measure(self, robj, dep_bin=True, rv32=False):
args_str = robj.str(None)
suffix = robj.id
mflags = self.mflags
if suffix:
if not mflags:
mflags = []
mflags.extend(["--id", self.name + '-' + suffix])
fmtdata = {
"measure": TOOLS.measure,
"dstdir": self.dstdir,
"name": self.name,
"rv32": " --rv32" if rv32 else "",
"suffix": "-" + suffix if suffix else "",
"args": " " + args_str if args_str else "",
"stdin": " --stdin=" + robj.stdin if robj.stdin else "",
"mflags": " " + " ".join(mflags) if mflags else "",
"dep": " " + self.name if dep_bin else "",
}
self.append("""\
.PHONY: {name}{suffix}-measure
{name}{suffix}-measure:{dep}
\t{measure} {dstdir} {name}{rv32}{args}{stdin}{mflags}
""".format(**fmtdata))
def alias(self, name, aliasees):
fmtdata = {
"name": name,
"aliasees": " ".join(aliasees),
}
self.append("""\
.PHONY: {name}
{name}: {aliasees}
""".format(**fmtdata))
def alias_build_all(self):
mod = self.name
nmods = [arch.add_prefix(mod) for arch in self.narchs]
xmods = [farch.add_prefix(narch.add_prefix(mod)) + "-" + mode
for (farch, narch) in self.xarchs
for mode in self.modes]
fmtdata = {
"mod": mod,
"nmods": " ".join(nmods),
"xmods": " ".join(xmods)
}
self.append("""\
.PHONY: {mod}
{mod}: {nmods} {xmods}
""".format(**fmtdata))
def _farchs(self):
return unique([farch for (farch, narch) in self.xarchs])
def _nams(self):
return [ArchAndMode(None, narch) for narch in self.narchs]
def _xams(self):
return [ArchAndMode(farch, narch, mode)
for (farch, narch) in self.xarchs
for mode in self.modes]
def _ufams(self):
farchs = unique([am.farch for am in self._xams() if am.farch])
return [ArchAndMode(farch, None) for farch in farchs]
def _unams(self):
narchs = unique([narch for narch in self.narchs])
return [ArchAndMode(None, narch) for narch in narchs]
def ams(self):
return self._nams() + self._xams()
def _ntgts(self, name):
return [am.bin(name) for am in self._nams()]
def _xtgts(self, name):
return [am.bin(name) for am in self._xams()]
def tgts(self, name):
return self._ntgts(name) + self._xtgts(name)
def apply_suffixes(self, tgts, suffixes, gsuf=None):
a = []
gsuf = gsuf if gsuf else ''
for tgt in tgts:
for suffix in suffixes:
suf = "-" + suffix if suffix else ""
a.append(tgt + suf + gsuf)
return a
@staticmethod
def run_suffix():
return "-run"
@staticmethod
def test_suffix():
return "-test"
@staticmethod
def copy_suffix():
return "-copy"
@staticmethod
def run_suffixes(runs):
return [r.id if r.id else '' for r in runs]
def get_runs(self, runs, am, name):
return self.apply_suffixes([am.bin(name)],
self.run_suffixes(runs),
self.run_suffix())
def get_all_runs(self, runs, name):
return self.apply_suffixes(self.tgts(name),
self.run_suffixes(runs),
self.run_suffix())
def get_all_tests(self, runs, name):
return self.apply_suffixes(self.tgts(name),
self.run_suffixes(runs),
self.test_suffix())
def alias_run_all(self, runs):
fmtdata = {
"name": self.name,
"runs": " ".join(self.get_all_runs(runs, self.name)),
}
self.append("""\
.PHONY: {name}-run
{name}-run: {runs}
""".format(**fmtdata))
def clean(self):
self.append("""\
.phony: {name}-clean
{name}-clean:
\trm -rf {dstdir}
""".format(**{
"name": self.name,
"dstdir": self.dstdir}))
| mit | 7,877,738,385,158,393,000 | 24.213793 | 89 | 0.484546 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.