input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
# Cleanup
del uproot_file
def test_histogram1D_to_from_existing_histogram(logging_mixin: Any) -> None:
""" Test passing a Histogram1D to ``from_existing_histogram``. It should return the same object. """
h_input = histogram.Histogram1D(
bin_edges=np.array([0, 1, 2]),
y=np.array([2.3, 5.4]),
errors_squared=np.array([2.3, 5.4]),
)
h_output = histogram.Histogram1D.from_existing_hist(h_input)
# Use explict equality check because they really are the same histograms.
assert h_output is h_input
@pytest.mark.ROOT # type: ignore
def test_derived_properties(logging_mixin: Any, test_root_hists: Any) -> None:
""" Test derived histogram properties (mean, std. dev, variance, etc). """
# Setup
h_root = test_root_hists.hist1D
h = histogram.Histogram1D.from_existing_hist(h_root)
# Mean
assert np.isclose(h.mean, h_root.GetMean())
# Standard deviation
assert np.isclose(h.std_dev, h_root.GetStdDev())
# Variance
assert np.isclose(h.variance, h_root.GetStdDev() ** 2)
@pytest.mark.ROOT # type: ignore
def test_recalculated_derived_properties(logging_mixin: Any, test_root_hists: Any) -> None:
""" Test derived histogram properties (mean, std. dev, variance, etc). """
# Setup
h_root = test_root_hists.hist1D
h = histogram.Histogram1D.from_existing_hist(h_root)
stats = histogram.calculate_binned_stats(h.bin_edges, h.y, h.errors_squared)
# Need to reset the stats to force ROOT to recalculate them.
# We do it after converting just to be clear that we're not taking advantage of the ROOT
# calculation (although we couldn't anyway given the current way that histogram is built).
h_root.ResetStats()
# Now check the results.
# Mean
assert np.isclose(histogram.binned_mean(stats), h_root.GetMean())
# Standard deviation
assert np.isclose(histogram.binned_standard_deviation(stats), h_root.GetStdDev())
# Variance
assert np.isclose(histogram.binned_variance(stats), h_root.GetStdDev() ** 2)
@pytest.mark.parametrize(
"bin_edges, y, errors_squared",
[ # type: ignore
(np.array([1, 2, 3]), np.array([1, 2, 3]), np.array([1, 2, 3])),
(np.array([1, 2, 3]), np.array([1, 2, 3]), np.array([1, 2])),
(np.array([1, 2, 3]), np.array([1, 2]), np.array([1, 2, 3])),
],
ids=["Too short bin edges", "Too long y", "Too long errors squared"],
)
def test_hist_input_length_validation(
logging_mixin: Any, bin_edges: np.ndarray, y: np.ndarray, errors_squared: np.ndarray
) -> None:
""" Check histogram input length validation. """
with pytest.raises(ValueError) as exception_info:
histogram.Histogram1D(bin_edges=bin_edges, y=y, errors_squared=errors_squared)
assert "Length of input arrays doesn't match!" in exception_info.value.args[0]
@pytest.mark.parametrize(
"bin_edges, y, errors_squared, expect_corrected_types",
[ # type: ignore
(np.array([1, 2, 3]), np.array([1, 2]), np.array([1, 2]), True),
([1, 2, 3], np.array([1, 2]), np.array([1, 2]), True),
([1, 2, 3], [1, 2], [1, 2], True),
# Apparently ``np.array`` can take pretty much anything I can throw at it. So we skip
# the nonconvertible test.
# (((12, 23), 12), ("something totally wrong", "wrong"), "for sure", False),
],
ids=["All correct", "Mixed inputs", "All wrong (convertible)"],
)
def test_hist_input_type_validation(
logging_mixin: Any, bin_edges: np.ndarray, y: np.ndarray, errors_squared: np.ndarray, expect_corrected_types: str
) -> None:
""" Check histogram input type validation. """
if expect_corrected_types:
h = histogram.Histogram1D(bin_edges=bin_edges, y=y, errors_squared=errors_squared)
for arr in [h.bin_edges, h.y, h.errors_squared]:
assert isinstance(arr, np.ndarray)
else:
with pytest.raises(ValueError) as exception_info:
h = histogram.Histogram1D(bin_edges=bin_edges, y=y, errors_squared=errors_squared)
assert "Arrays must be numpy arrays" in exception_info.value.args[0]
def test_hist_identical_arrays(logging_mixin: Any) -> None:
""" Test handling receiving identical numpy arrays. """
bin_edges = np.array([1, 2, 3])
y = np.array([1, 2])
h = histogram.Histogram1D(bin_edges=bin_edges, y=y, errors_squared=y)
# Even through we passed the same array, they should be copied by the validation.
assert np.may_share_memory(h.y, h.errors_squared) is False
@pytest.mark.ROOT
class TestWithRootHists:
def test_get_array_from_hist(self, logging_mixin: Any, test_root_hists: Any) -> None:
"""Test getting numpy arrays from a 1D hist.
Note:
This test is from the legacy get_array_from_hist(...) function. This functionality is
superseded by Histogram1D.from_existing_hist(...), but we leave this test for good measure.
"""
hist = test_root_hists.hist1D
hist_array = histogram.Histogram1D.from_existing_hist(hist)
# Determine expected values
x_bins = range(1, hist.GetXaxis().GetNbins() + 1)
expected_bin_edges = np.empty(len(x_bins) + 1)
expected_bin_edges[:-1] = [hist.GetXaxis().GetBinLowEdge(i) for i in x_bins]
expected_bin_edges[-1] = hist.GetXaxis().GetBinUpEdge(hist.GetXaxis().GetNbins())
expected_hist_array = histogram.Histogram1D(
bin_edges=expected_bin_edges,
y=np.array([hist.GetBinContent(i) for i in x_bins]),
errors_squared=np.array([hist.GetBinError(i) for i in x_bins]) ** 2,
)
logger.debug(f"sumw2: {len(hist.GetSumw2())}")
logger.debug(f"sumw2: {hist.GetSumw2N()}")
assert check_hist(hist_array, expected_hist_array) is True
def test_non_uniform_binning(self, logging_mixin: Any, setup_non_uniform_binning: Any) -> None:
""" Test non-uniform binning in Histogram1D. """
hist = setup_non_uniform_binning
# Determine expected values.
x_bins = range(1, hist.GetXaxis().GetNbins() + 1)
expected_bin_edges = np.empty(len(x_bins) + 1)
expected_bin_edges[:-1] = [hist.GetXaxis().GetBinLowEdge(i) for i in x_bins]
expected_bin_edges[-1] = hist.GetXaxis().GetBinUpEdge(hist.GetXaxis().GetNbins())
expected_hist = histogram.Histogram1D.from_existing_hist(hist)
# The naming is a bit confusing here, but basically we want to compare the
# non-uniform binning in a ROOT hist vs a Histogram1D. We also then extract the bin
# edges here as an extra cross-check.
assert np.allclose(expected_hist.bin_edges, expected_bin_edges)
# Check the calculated bin widths
assert np.allclose(expected_hist.bin_widths, expected_bin_edges[1:] - expected_bin_edges[:-1])
# Then we check all of the fields to be safe.
# (This is a bit redundant because both objects will use Histogram1D, but it doesn't hurt).
assert check_hist(hist, expected_hist)
# This uses uniform binning and it _shouldn't_ agree.
uniform_bins = np.linspace(expected_bin_edges[0], expected_bin_edges[-1], hist.GetXaxis().GetNbins() + 1)
logger.info(f"expected_bin_edges: {expected_bin_edges}")
logger.info(f"uniform_bins: {uniform_bins}")
assert not np.allclose(expected_hist.bin_edges, uniform_bins)
def test_Histogram1D_from_profile(self, logging_mixin: Any) -> None:
"""Test creating a Histogram1D from a TProfile.
The errors are retrieved differently than for a TH1.
"""
# Setup
import ROOT
profile = ROOT.TProfile("test", "test", 10, 0, 100)
for x in range(1000):
profile.Fill(x % 100, 3)
bin_edges = histogram.get_bin_edges_from_axis(profile.GetXaxis())
y = np.array([profile.GetBinContent(i) for i in range(1, profile.GetXaxis().GetNbins() + 1)])
errors = np.array([profile.GetBinError(i) for i in range(1, profile.GetXaxis().GetNbins() + 1)])
sumw2 = np.array(profile.GetSumw2())
# Check the histogram.
h = histogram.Histogram1D.from_existing_hist(profile)
np.testing.assert_allclose(bin_edges, h.bin_edges)
np.testing.assert_allclose(y, h.y)
np.testing.assert_allclose(errors, h.errors)
# Check that the errors aren't equal to the Sumw2 errors. They should not be for the TProfile
assert not np.allclose(h.errors, sumw2[1:-1])
@pytest.mark.parametrize("use_bin_edges", [False, True], ids=["Use bin centers", "Use bin edges"])
@pytest.mark.parametrize("set_zero_to_NaN", [False, True], ids=["Keep zeroes as zeroes", "Set zeroes to NaN"])
def test_get_array_from_hist2D(self, logging_mixin, use_bin_edges, set_zero_to_NaN, test_root_hists): # type: ignore
""" Test getting numpy arrays from a 2D hist. """
hist = test_root_hists.hist2D
x, y, hist_array = histogram.get_array_from_hist2D(
hist=hist, set_zero_to_NaN=set_zero_to_NaN, return_bin_edges=use_bin_edges
)
# Determine expected values
if use_bin_edges:
epsilon = 1e-9
x_bin_edges = np.empty(hist.GetXaxis().GetNbins() + 1)
x_bin_edges[:-1] = [hist.GetXaxis().GetBinLowEdge(i) for i in range(1, hist.GetXaxis().GetNbins() + 1)]
x_bin_edges[-1] = hist.GetXaxis().GetBinUpEdge(hist.GetXaxis().GetNbins())
y_bin_edges = np.empty(hist.GetYaxis().GetNbins() + 1)
y_bin_edges[:-1] = [hist.GetYaxis().GetBinLowEdge(i) for i in range(1, hist.GetYaxis().GetNbins() + 1)]
y_bin_edges[-1] = hist.GetYaxis().GetBinUpEdge(hist.GetYaxis().GetNbins())
x_mesh = np.arange(np.amin(x_bin_edges), np.amax(x_bin_edges) + epsilon, hist.GetXaxis().GetBinWidth(1))
y_mesh = np.arange(np.amin(y_bin_edges), np.amax(y_bin_edges) + epsilon, hist.GetYaxis().GetBinWidth(1))
else:
x_mesh = np.array([hist.GetXaxis().GetBinCenter(i) for i in range(1, hist.GetXaxis().GetNbins() + 1)])
y_mesh = np.array([hist.GetYaxis().GetBinCenter(i) for i in range(1, hist.GetYaxis().GetNbins() + 1)])
x_range = x_mesh
y_range = y_mesh
expected_x, expected_y = np.meshgrid(x_range, y_range)
expected_hist_array = np.array(
[
hist.GetBinContent(x, y)
for x in range(1, hist.GetXaxis().GetNbins() + 1)
for y in range(1, hist.GetYaxis().GetNbins() + 1)
],
dtype=np.float32,
).reshape(hist.GetYaxis().GetNbins(), hist.GetXaxis().GetNbins())
if set_zero_to_NaN:
expected_hist_array[expected_hist_array == 0] = np.nan
assert np.allclose(x, expected_x)
assert np.allclose(y, expected_y)
assert np.allclose(hist_array, expected_hist_array, equal_nan=True)
# Check particular values for good measure.
assert np.isclose(hist_array[1][0], 1.0)
if set_zero_to_NaN:
assert np.isnan(hist_array[0][1])
else:
assert np.isclose(hist_array[0][1], 0.0)
@pytest.fixture # type: ignore
def setup_basic_hist(logging_mixin: Any) -> Tuple[histogram.Histogram1D, np.ndarray, np.ndarray, np.ndarray]:
"""Setup a basic `Histogram1D` for basic tests.
This histogram contains 4 bins, with edges of [0, 1, 2, 3, 5], values of [2, 2, 3, 0], with
errors of [4, 2, 3, 0], simulating the first bin being filled once with a weight of 2, and the
rest being filled normally. It could be reproduced in ROOT with:
>>> bins = np.array([0, 1, 2, 3, 5], dtype = np.float64)
>>> hist = ROOT.TH1F("test", "test", 4, binning)
>>> hist.Fill(0, 2)
>>> hist.Fill(1)
>>> hist.Fill(1)
>>> hist.Fill(2)
>>> hist.Fill(2)
>>> hist.Fill(2)
Args:
None.
Returns:
hist, bin_edges, y, errors_squared
"""
bin_edges = np.array([0, 1, 2, 3, 5])
y = np.array([2, 2, 3, 0])
# As if the first bin was filled with weight of 2.
errors_squared = np.array([4, 2, 3, 0])
h = histogram.Histogram1D(bin_edges=bin_edges, y=y, errors_squared=errors_squared)
return h, bin_edges, y, errors_squared
@pytest.mark.parametrize(
"value, expected_bin",
[
(0, 0), # type: ignore
(0.5, 0),
(1, 1),
(1.0, 1),
(1.5, 1),
(1.99, 1),
(2, 2),
(3, 3),
(4.5, 3),
],
ids=[
"start bin 0",
"mid bin 0",
"start bin 1",
"float start bin 1",
"mid bin 1",
"end bin 1",
"bin 2",
"bin 3",
"upper bin 3",
],
)
def test_find_bin(logging_mixin, setup_basic_hist, value, expected_bin):
""" Test for finding the bin based on a given value. """
h, _, _, _ = setup_basic_hist
found_bin = h.find_bin(value)
assert found_bin == expected_bin
@pytest.mark.parametrize(
"test_equality",
[
False,
True, # type: ignore
],
ids=["Test inequality", "Test equality"],
)
@pytest.mark.parametrize(
"access_attributes_which_are_stored",
[False, True],
ids=["Do not access other attributes", "Access other attributes which are stored"],
)
def test_histogram1D_equality(logging_mixin, setup_basic_hist, test_equality, access_attributes_which_are_stored):
""" Test for Histogram1D equality. """
h, bin_edges, y, errors_squared = setup_basic_hist
h1 = histogram.Histogram1D(bin_edges=bin_edges, y=y, errors_squared=errors_squared)
h2 | |
MA'},
'1508881':{'en': 'Ashland, MA'},
'1508885':{'en': 'Spencer, MA'},
'1508886':{'en': 'Rutland, MA'},
'1508892':{'en': 'Leicester, MA'},
'1508894':{'en': 'Brockton, MA'},
'1508896':{'en': 'Brewster, MA'},
'1508897':{'en': 'Brockton, MA'},
'1508898':{'en': 'Westborough, MA'},
'1508923':{'en': 'Middleborough, MA'},
'1508926':{'en': 'Worcester, MA'},
'1508941':{'en': 'Brockton, MA'},
'1508943':{'en': 'Webster, MA'},
'1508945':{'en': 'Chatham, MA'},
'1508946':{'en': 'Middleborough, MA'},
'1508947':{'en': 'Middleborough, MA'},
'1508949':{'en': 'Webster, MA'},
'1508961':{'en': 'New Bedford, MA'},
'1508966':{'en': 'Bellingham, MA'},
'1508977':{'en': 'Taunton, MA'},
'1508979':{'en': 'New Bedford, MA'},
'1508984':{'en': 'New Bedford, MA'},
'1508985':{'en': 'New Bedford, MA'},
'1508987':{'en': 'Oxford, MA'},
'150899':{'en': 'New Bedford, MA'},
'1509':{'en': 'Washington State'},
'1509216':{'en': 'Spokane, WA'},
'1509225':{'en': 'Yakima, WA'},
'1509226':{'en': 'Newman Lake, WA'},
'1509232':{'en': 'Spokane, WA'},
'1509234':{'en': 'Connell, WA'},
'1509235':{'en': 'Cheney, WA'},
'1509242':{'en': 'Spokane, WA'},
'1509244':{'en': 'Airway Heights, WA'},
'1509248':{'en': 'Yakima, WA'},
'1509249':{'en': 'Yakima, WA'},
'1509252':{'en': 'Spokane, WA'},
'1509276':{'en': 'Deer Park, WA'},
'1509279':{'en': 'Spokane, WA'},
'1509290':{'en': 'Spokane, WA'},
'1509292':{'en': 'Elk, WA'},
'1509293':{'en': 'Wenatchee, WA'},
'1509299':{'en': 'Medical Lake, WA'},
'1509301':{'en': 'Walla Walla, WA'},
'150932':{'en': 'Spokane, WA'},
'1509332':{'en': 'Pullman, WA'},
'1509334':{'en': 'Pullman, WA'},
'1509340':{'en': 'Spokane, WA'},
'1509346':{'en': 'Royal City, WA'},
'1509349':{'en': 'Warden, WA'},
'1509353':{'en': 'Spokane, WA'},
'1509363':{'en': 'Spokane, WA'},
'1509374':{'en': 'Kennewick, WA'},
'1509375':{'en': 'Richland, WA'},
'1509382':{'en': 'Dayton, WA'},
'1509396':{'en': 'Kennewick, WA'},
'1509397':{'en': 'Colfax, WA'},
'1509413':{'en': 'Spokane, WA'},
'1509422':{'en': 'Okanogan, WA'},
'1509427':{'en': 'Stevenson, WA'},
'1509434':{'en': 'Spokane, WA'},
'1509443':{'en': 'Spokane, WA'},
'1509444':{'en': 'Spokane, WA'},
'1509447':{'en': 'Newport, WA'},
'1509448':{'en': 'Spokane, WA'},
'1509452':{'en': 'Yakima, WA'},
'1509453':{'en': 'Yakima, WA'},
'1509454':{'en': 'Yakima, WA'},
'1509455':{'en': 'Spokane, WA'},
'1509456':{'en': 'Spokane, WA'},
'1509457':{'en': 'Yakima, WA'},
'1509458':{'en': 'Spokane, WA'},
'1509459':{'en': 'Spokane, WA'},
'1509464':{'en': 'Spokane, WA'},
'1509465':{'en': 'Spokane, WA'},
'1509466':{'en': 'Spokane, WA'},
'1509467':{'en': 'Spokane, WA'},
'1509468':{'en': 'Spokane, WA'},
'1509469':{'en': 'Yakima, WA'},
'1509473':{'en': 'Spokane, WA'},
'1509474':{'en': 'Spokane, WA'},
'1509475':{'en': 'Spokane, WA'},
'1509476':{'en': 'Oroville, WA'},
'1509477':{'en': 'Spokane, WA'},
'1509482':{'en': 'Spokane, WA'},
'1509483':{'en': 'Spokane, WA'},
'1509484':{'en': 'Spokane, WA'},
'1509486':{'en': 'Tonasket, WA'},
'1509487':{'en': 'Spokane, WA'},
'1509488':{'en': 'Othello, WA'},
'1509489':{'en': 'Spokane, WA'},
'1509493':{'en': 'White Salmon, WA'},
'150952':{'en': 'Walla Walla, WA'},
'1509532':{'en': 'Spokane, WA'},
'1509533':{'en': 'Spokane, WA'},
'1509534':{'en': 'Spokane, WA'},
'1509535':{'en': 'Spokane, WA'},
'1509536':{'en': 'Spokane, WA'},
'150954':{'en': 'Pasco, WA'},
'1509548':{'en': 'Leavenworth, WA'},
'1509573':{'en': 'Yakima, WA'},
'1509574':{'en': 'Yakima, WA'},
'1509575':{'en': 'Yakima, WA'},
'1509576':{'en': 'Yakima, WA'},
'1509582':{'en': 'Kennewick, WA'},
'1509585':{'en': 'Kennewick, WA'},
'1509586':{'en': 'Kennewick, WA'},
'1509588':{'en': 'Benton City, WA'},
'1509590':{'en': 'Spokane, WA'},
'1509624':{'en': 'Spokane, WA'},
'1509625':{'en': 'Spokane, WA'},
'1509627':{'en': 'Richland, WA'},
'1509628':{'en': 'Richland, WA'},
'1509633':{'en': 'Grand Coulee, WA'},
'1509647':{'en': 'Wilbur, WA'},
'1509649':{'en': 'Roslyn, WA'},
'1509659':{'en': 'Ritzville, WA'},
'1509662':{'en': 'Wenatchee, WA'},
'1509663':{'en': 'Wenatchee, WA'},
'1509664':{'en': 'Wenatchee, WA'},
'1509665':{'en': 'Wenatchee, WA'},
'1509667':{'en': 'Wenatchee, WA'},
'1509674':{'en': 'Cle Elum, WA'},
'1509682':{'en': 'Chelan, WA'},
'1509684':{'en': 'Colville, WA'},
'1509685':{'en': 'Colville, WA'},
'1509687':{'en': 'Manson, WA'},
'1509689':{'en': 'Brewster, WA'},
'1509697':{'en': 'Selah, WA'},
'1509698':{'en': 'Selah, WA'},
'1509725':{'en': 'Davenport, WA'},
'1509734':{'en': 'Kennewick, WA'},
'1509735':{'en': 'Kennewick, WA'},
'1509736':{'en': 'Kennewick, WA'},
'1509737':{'en': 'Kennewick, WA'},
'1509738':{'en': 'Kettle Falls, WA'},
'1509744':{'en': 'Spokane, WA'},
'1509747':{'en': 'Spokane, WA'},
'1509750':{'en': 'Moses Lake, WA'},
'1509751':{'en': 'Clarkston, WA'},
'1509754':{'en': 'Ephrata, WA'},
'1509755':{'en': 'Spokane, WA'},
'1509758':{'en': 'Clarkston, WA'},
'1509760':{'en': 'Moses Lake, WA'},
'1509762':{'en': 'Moses Lake, WA'},
'1509764':{'en': 'Moses Lake, WA'},
'1509765':{'en': 'Moses Lake, WA'},
'1509766':{'en': 'Moses Lake, WA'},
'1509773':{'en': 'Goldendale, WA'},
'1509775':{'en': 'Republic, WA'},
'1509782':{'en': 'Cashmere, WA'},
'1509783':{'en': 'Kennewick, WA'},
'1509784':{'en': 'Entiat, WA'},
'1509786':{'en': 'Prosser, WA'},
'1509787':{'en': 'Quincy, WA'},
'1509826':{'en': 'Omak, WA'},
'1509829':{'en': 'Zillah, WA'},
'1509837':{'en': 'Sunnyside, WA'},
'1509838':{'en': 'Spokane, WA'},
'1509839':{'en': 'Sunnyside, WA'},
'1509843':{'en': 'Pomeroy, WA'},
'1509863':{'en': 'Spokane, WA'},
'1509865':{'en': 'Toppenish, WA'},
'1509868':{'en': 'Spokane, WA'},
'1509877':{'en': 'Wapato, WA'},
'1509882':{'en': 'Grandview, WA'},
'1509884':{'en': 'East Wenatchee, WA'},
'1509886':{'en': 'East Wenatchee, WA'},
'1509888':{'en': 'Wenatchee, WA'},
'1509891':{'en': 'Spokane Valley, WA'},
'1509892':{'en': 'Spokane Valley, WA'},
'150992':{'en': 'Spokane Valley, WA'},
'1509925':{'en': 'Ellensburg, WA'},
'1509932':{'en': 'Mattawa, WA'},
'1509933':{'en': 'Ellensburg, WA'},
'1509935':{'en': 'Chewelah, WA'},
'1509939':{'en': 'Spokane, WA'},
'1509942':{'en': 'Richland, WA'},
'1509943':{'en': 'Richland, WA'},
'1509946':{'en': 'Richland, WA'},
'1509949':{'en': 'Yakima, WA'},
'1509953':{'en': 'Spokane, WA'},
'1509962':{'en': 'Ellensburg, WA'},
'1509965':{'en': 'Yakima, WA'},
'1509966':{'en': 'Yakima, WA'},
'1509967':{'en': 'West Richland, WA'},
'1509968':{'en': 'Ellensburg, WA'},
'1509972':{'en': 'Yakima, WA'},
'1509982':{'en': 'Odessa, WA'},
'1509996':{'en': 'Winthrop, WA'},
'1509997':{'en': 'Twisp, WA'},
'1510':{'en': 'California'},
'1510204':{'en': 'Berkeley, CA'},
'1510208':{'en': 'Oakland, CA'},
'1510226':{'en': 'Fremont, CA'},
'151023':{'en': 'Richmond, CA'},
'1510238':{'en': 'Oakland, CA'},
'1510245':{'en': 'Hercules, CA'},
'1510248':{'en': 'Fremont, CA'},
'1510251':{'en': 'Oakland, CA'},
'1510252':{'en': 'Fremont, CA'},
'1510259':{'en': 'Hayward, CA'},
'1510261':{'en': 'Oakland, CA'},
'1510264':{'en': 'Hayward, CA'},
'1510265':{'en': 'Hayward, CA'},
'1510266':{'en': 'Hayward, CA'},
'1510268':{'en': 'Oakland, CA'},
'1510271':{'en': 'Oakland, CA'},
'1510272':{'en': 'Oakland, CA'},
'1510273':{'en': 'Oakland, CA'},
'1510293':{'en': 'Hayward, CA'},
'1510307':{'en': 'Richmond, CA'},
'1510317':{'en': 'San Leandro, CA'},
'1510324':{'en': 'Union City, CA'},
'1510336':{'en': 'Oakland, CA'},
'1510337':{'en': 'Alameda, CA'},
'1510339':{'en': 'Oakland, CA'},
'1510346':{'en': 'San Leandro, CA'},
'1510351':{'en': 'San Leandro, CA'},
'1510352':{'en': 'San Leandro, CA'},
'1510353':{'en': 'Fremont, CA'},
'1510357':{'en': 'San Leandro, CA'},
'1510383':{'en': 'Oakland, CA'},
'1510397':{'en': 'Hayward, CA'},
'1510412':{'en': 'Richmond, CA'},
'1510428':{'en': 'Oakland, CA'},
'1510429':{'en': 'Union City, CA'},
'1510430':{'en': 'Oakland, CA'},
'1510433':{'en': 'Oakland, CA'},
'1510434':{'en': 'Oakland, CA'},
'1510436':{'en': 'Oakland, CA'},
'1510437':{'en': 'Oakland, CA'},
'1510438':{'en': 'Fremont, CA'},
'1510440':{'en': 'Fremont, CA'},
'1510441':{'en': 'Union City, CA'},
'1510444':{'en': 'Oakland, CA'},
'1510445':{'en': 'Fremont, CA'},
'1510451':{'en': 'Oakland, CA'},
'1510452':{'en': 'Oakland, CA'},
'1510465':{'en': 'Oakland, CA'},
'1510471':{'en': 'Union City, CA'},
'1510475':{'en': 'Union City, CA'},
'1510477':{'en': 'Union City, CA'},
'1510481':{'en': 'San Leandro, CA'},
'1510482':{'en': 'Oakland, CA'},
'1510483':{'en': 'San Leandro, CA'},
'1510486':{'en': 'Berkeley, CA'},
'1510487':{'en': 'Union City, CA'},
'1510489':{'en': 'Union City, CA'},
'1510490':{'en': 'Fremont, CA'},
'1510494':{'en': 'Fremont, CA'},
'1510498':{'en': 'Fremont, CA'},
'1510505':{'en': 'Fremont, CA'},
'1510521':{'en': 'Alameda, CA'},
'1510522':{'en': 'Alameda, CA'},
'1510523':{'en': 'Alameda, CA'},
'151053':{'en': 'Oakland, CA'},
'1510537':{'en': 'Castro Valley, CA'},
'1510540':{'en': 'Berkeley, CA'},
'1510547':{'en': 'Oakland, CA'},
'1510548':{'en': 'Berkeley, CA'},
'1510549':{'en': 'Berkeley, CA'},
'1510553':{'en': 'Oakland, CA'},
'1510562':{'en': 'Oakland, CA'},
'1510567':{'en': 'Oakland, CA'},
'1510568':{'en': 'Oakland, CA'},
'1510569':{'en': 'Oakland, CA'},
'1510573':{'en': 'Fremont, CA'},
'1510574':{'en': 'Fremont, CA'},
'1510583':{'en': 'Hayward, CA'},
'1510596':{'en': 'Emeryville, CA'},
'1510614':{'en': 'San Leandro, CA'},
'1510620':{'en': 'Richmond, CA'},
'1510623':{'en': 'Fremont, CA'},
'1510625':{'en': 'Oakland, CA'},
'151063':{'en': 'Oakland, CA'},
'1510642':{'en': 'Berkeley, CA'},
'1510644':{'en': 'Berkeley, CA'},
'1510647':{'en': 'Berkeley, CA'},
'1510649':{'en': 'Berkeley, CA'},
'1510651':{'en': 'Fremont, CA'},
'1510652':{'en': 'Oakland, CA'},
'1510653':{'en': 'Oakland, CA'},
'1510654':{'en': 'Oakland, CA'},
'1510655':{'en': 'Oakland, CA'},
'1510656':{'en': 'Fremont, CA'},
'1510657':{'en': 'Fremont, CA'},
'1510659':{'en': 'Fremont, CA'},
'1510663':{'en': 'Oakland, CA'},
'1510665':{'en': 'Berkeley, CA'},
'1510666':{'en': 'Berkeley, CA'},
'1510667':{'en': 'San Leandro, CA'},
'1510668':{'en': 'Fremont, CA'},
'1510670':{'en': 'Hayward, CA'},
'1510675':{'en': 'Union City, CA'},
'1510683':{'en': 'Fremont, CA'},
'1510690':{'en': 'Hayward, CA'},
'1510704':{'en': 'Berkeley, CA'},
'1510713':{'en': 'Fremont, CA'},
'1510724':{'en': 'Pinole, CA'},
'1510728':{'en': 'Hayward, CA'},
'1510732':{'en': 'Hayward, CA'},
'1510733':{'en': 'Hayward, CA'},
'1510739':{'en': 'Fremont, CA'},
'1510742':{'en': 'Fremont, CA'},
'1510744':{'en': 'Fremont, CA'},
'1510745':{'en': 'Fremont, CA'},
'1510748':{'en': 'Alameda, CA'},
'1510749':{'en': 'Alameda, CA'},
'1510752':{'en': 'Oakland, CA'},
'1510763':{'en': 'Oakland, CA'},
'1510769':{'en': 'Alameda, CA'},
'1510770':{'en': 'Fremont, CA'},
'1510777':{'en': 'Oakland, CA'},
'151078':{'en': 'Hayward, CA'},
'1510787':{'en': 'Crockett, CA'},
'151079':{'en': 'Fremont, CA'},
'1510814':{'en': 'Alameda, CA'},
'1510818':{'en': 'Fremont, CA'},
'1510832':{'en': 'Oakland, CA'},
'1510834':{'en': 'Oakland, CA'},
'1510835':{'en': 'Oakland, CA'},
'1510836':{'en': 'Oakland, CA'},
'1510839':{'en': 'Oakland, CA'},
'1510841':{'en': 'Berkeley, CA'},
'1510842':{'en': 'Oakland, CA'},
'1510843':{'en': 'Berkeley, CA'},
'1510845':{'en': 'Berkeley, CA'},
'1510848':{'en': 'Berkeley, CA'},
'1510849':{'en': 'Berkeley, CA'},
'1510864':{'en': 'Alameda, CA'},
'1510865':{'en': 'Alameda, CA'},
'1510869':{'en': 'Oakland, CA'},
'1510879':{'en': 'Oakland, CA'},
'1510883':{'en': 'Berkeley, CA'},
'1510885':{'en': 'Hayward, CA'},
'1510886':{'en': 'Hayward, CA'},
'1510887':{'en': 'Hayward, CA'},
'1510888':{'en': 'Hayward, CA'},
'1510891':{'en': 'Oakland, CA'},
'1510893':{'en': 'Oakland, CA'},
'1510895':{'en': 'San Leandro, CA'},
'1510922':{'en': 'Oakland, CA'},
'1510923':{'en': 'Oakland, CA'},
'1510979':{'en': 'Fremont, CA'},
'1510981':{'en': 'Berkeley, CA'},
'1510986':{'en': 'Oakland, CA'},
'1512':{'en': 'Texas'},
'1512206':{'en': 'Austin, TX'},
'1512218':{'en': 'Round Rock, TX'},
'1512231':{'en': 'Austin, TX'},
'1512236':{'en': 'Austin, TX'},
'1512237':{'en': 'Smithville, TX'},
'1512241':{'en': 'Austin, TX'},
'1512243':{'en': 'Austin, TX'},
'1512244':{'en': 'Round Rock, TX'},
'1512246':{'en': 'Round Rock, TX'},
'1512247':{'en': 'Del Valle, TX'},
'1512250':{'en': 'Austin, TX'},
'1512255':{'en': 'Round Rock, TX'},
'1512258':{'en': 'Austin, TX'},
'1512261':{'en': 'Lakeway, TX'},
'1512264':{'en': 'Spicewood, TX'},
'1512266':{'en': 'Austin, TX'},
'1512268':{'en': 'Kyle, TX'},
'1512272':{'en': 'Manor, TX'},
'1512276':{'en': 'Austin, TX'},
'1512278':{'en': 'Manor, TX'},
'1512280':{'en': 'Austin, TX'},
'1512281':{'en': 'Elgin, TX'},
'1512282':{'en': 'Austin, TX'},
'1512284':{'en': 'Austin, TX'},
'1512285':{'en': 'Elgin, TX'},
'1512288':{'en': 'Austin, TX'},
'1512291':{'en': 'Austin, TX'},
'1512292':{'en': 'Austin, TX'},
'1512295':{'en': 'Buda, TX'},
'1512296':{'en': 'Austin, TX'},
'1512301':{'en': 'Austin, TX'},
'1512302':{'en': 'Austin, TX'},
'1512303':{'en': 'Bastrop, TX'},
'1512306':{'en': 'Austin, TX'},
'1512308':{'en': 'Bastrop, TX'},
'1512310':{'en': 'Round Rock, TX'},
'1512312':{'en': 'Buda, TX'},
'151232':{'en': 'Austin, TX'},
'1512321':{'en': 'Bastrop, TX'},
'151233':{'en': 'Austin, TX'},
'1512332':{'en': 'Bastrop, TX'},
'151234':{'en': 'Austin, TX'},
'1512341':{'en': 'Round Rock, TX'},
'1512351':{'en': 'Austin, TX'},
'1512352':{'en': 'Taylor, TX'},
'1512353':{'en': 'San Marcos, TX'},
'1512355':{'en': 'Bertram, TX'},
'1512358':{'en': 'Austin, TX'},
'1512360':{'en': 'Smithville, TX'},
'1512365':{'en': 'Taylor, TX'},
'151237':{'en': 'Austin, TX'},
'1512376':{'en': 'Lockhart, TX'},
'151238':{'en': 'Austin, TX'},
'1512388':{'en': 'Round Rock, TX'},
'1512391':{'en': 'Austin, TX'},
'1512392':{'en': 'San Marcos, TX'},
'1512393':{'en': 'San Marcos, TX'},
'1512394':{'en': 'Austin, TX'},
'1512396':{'en': 'San Marcos, TX'},
'1512398':{'en': 'Lockhart, | |
text for labels, title and axes ticks
ax.set_ylabel('Scores')
ax.set_title('Min and Max grain size '+str(th)+' threads')
ax.set_xticks(ind + width / 2)
ax.set_xticklabels(('G1', 'G2', 'G3', 'G4', 'G5'))
ax.legend((rects1[0], rects2[0]), ('Min', 'Max'))
def autolabel(rects):
"""
Attach a text label above each bar displaying its height
"""
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%d' % int(height),
ha='center', va='bottom')
#
# autolabel(rects1)
# autolabel(rects2)
plt.show()
i=i+1
#convert -delay 50 step_*.png animated.gif
#################################################################
#cache-miss based on block_size and matrix_size for chunk_size=1
#################################################################
c=1
animation=0
p3d=0
#plot number of cache misses based on matrix size for a chunk size and a block size
for benchmark in benchmarks:
for th in d_hpx[node][benchmark].keys():
# pp = PdfPages(perf_directory+'/bath_tub_'+benchmark+'_different_matrix_sizes_'+str(th)+'.pdf')
results=[]
bl=1
block_sizes=[]
m_sizes=[]
avg_l2_miss_rate=[]
real_block_sizes=[]
for b in d_hpx[node][benchmark][th].keys():
for m in mat_sizes[benchmark]:
k=d_hpx[node][benchmark][th][b][c]['size'].index(m)
if 'mflops' in d_hpx[node][benchmark][th][b][c].keys() and d_hpx[node][benchmark][th][b][c]['mflops'][k]:
b_r=int(b.split('-')[0])
b_c=int(b.split('-')[1])
rest1=b_r%simdsize
rest2=b_c%simdsize
if b_r>m:
b_r=m
if b_c>m:
b_c=m
if b_c%simdsize!=0:
b_c=b_c+simdsize-b_c%simdsize
equalshare1=math.ceil(m/b_r)
equalshare2=math.ceil(m/b_c)
chunk_sizes.append(c)
mflop=0
if 'add' in benchmark:
mflop=b_r*b_c
else:
mflop=b_r*b_c*(2*m)
m_sizes.append(m)
block_sizes.append(bl)
real_block_sizes.append(b)
results.append(d_hpx[node][benchmark][th][b][c]['mflops'][k])
l2_cm.append(d_hpx[node][benchmark][th][b][c]['counters'][k]['avg']['papi_tcm'])
l2_ch.append([d_hpx[node][benchmark][th][b][c]['counters'][k]['avg']['papi_tca'][l]-d_hpx[node][benchmark][th][b][c]['counters'][k]['avg']['papi_tcm'][l] for l in range(th)])
ind_miss=[d_hpx[node][benchmark][th][b][c]['counters'][k]['avg']['papi_tcm'][j]/d_hpx[node][benchmark][th][b][c]['counters'][k]['avg']['papi_tca'][j] for j in range(th)]
avg_l2_miss_rate.append(sum(ind_miss)/th)
bl=bl+1
y=block_sizes
x=m_sizes
z=avg_l2_miss_rate
if p3d:
if not animation:
fig = plt.figure(i)
ax = fig.add_subplot(1,1,1, projection='3d')
triang = mtri.Triangulation(x, y)
ax.plot_trisurf(triang, z, cmap='jet')
ax.scatter(x,y,z, marker='.', s=10, c="black", alpha=0.5)
ax.view_init(elev=10, azim=110)
ax.set_xlabel('Matrix size')
ax.set_ylabel('Block size')
ax.set_zlabel('L2 cache miss rate')
plt.title(benchmark+' matrix size:'+str(m)+' '+str(th)+' threads')
# plt.savefig(pp, format='pdf',bbox_inches='tight')
print('')
i=i+1
else:
# surf=ax.plot_trisurf(y, x, z, cmap=plt.cm.viridis, linewidth=0.2)
# fig.colorbar( surf, shrink=0.5, aspect=5)
# ax.view_init(10, 60)
# plt.show()
for angle in range(0,360,10):
fig = plt.figure(i)
ax = fig.gca(projection='3d')
triang = mtri.Triangulation(x, y)
ax.plot_trisurf(triang, z, cmap='jet')
ax.scatter(x,y,z, marker='.', s=10, c="black", alpha=0.5)
ax.view_init(elev=10, azim=angle)
ax.set_xlabel('Grain size')
ax.set_ylabel('Block size')
ax.set_zlabel('L2 cache miss rate')
plt.title(benchmark+' chunk size:1 '+str(th)+' threads')
filename='/home/shahrzad/repos/Blazemark/results/png/step_'+str(angle)+'.png'
plt.savefig(filename, dpi=96)
plt.gca()
if not animation:
plt.show()
pp.close()
else:
plt.figure(i)
plt.plot(real_block_sizes,z, label=str(th)+' threads matrix_size:'+str(m))
plt.ylabel('l2_cache_misse rate')
plt.xlabel('block size')
plt.title(benchmark+' '+str(th)+' threads')
plt.grid(True, 'both')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
#################################################################
#cache-miss based on block_size and matrix_size for chunk_size=1
#################################################################
c=1
b='4-1024'
node='marvin'
benchmark='dmatdmatadd'
th=4
#plot number of cache misses based on matrix size for a chunk size and a block size
for benchmark in benchmarks:
for th in d_hpx[node][benchmark].keys():
# pp = PdfPages(perf_directory+'/bath_tub_'+benchmark+'_different_matrix_sizes_'+str(th)+'.pdf')
results=[]
for b in d_hpx[node][benchmark][th].keys():
l2_cm=[]
l2_ch=[]
l2_miss_rate=[]
for m in mat_sizes[benchmark]:
chunk_sizes=[]
grain_sizes=[]
k=d_hpx[node][benchmark][th][b][c]['size'].index(m)
if 'mflops' in d_hpx[node][benchmark][th][b][c].keys() and d_hpx[node][benchmark][th][b][c]['mflops'][k]:
b_r=int(b.split('-')[0])
b_c=int(b.split('-')[1])
rest1=b_r%simdsize
rest2=b_c%simdsize
if b_r>m:
b_r=m
if b_c>m:
b_c=m
if b_c%simdsize!=0:
b_c=b_c+simdsize-b_c%simdsize
equalshare1=math.ceil(m/b_r)
equalshare2=math.ceil(m/b_c)
chunk_sizes.append(c)
num_blocks=equalshare1*equalshare2
num_elements_uncomplete=0
if b_c<m:
num_elements_uncomplete=(m%b_c)*b_r
mflop=0
if benchmark=='dmatdmatadd':
mflop=b_r*b_c
elif benchmark=='dmatdmatdmatadd':
mflop=b_r*b_c*2
else:
mflop=b_r*b_c*(2*m)
num_elements=[mflop]*num_blocks
if num_elements_uncomplete:
for j in range(1,equalshare1+1):
num_elements[j*equalshare2-1]=num_elements_uncomplete
data_type=8
grain_size=sum(num_elements[0:c])
num_mat=3
if benchmark=='dmatdmatdmatadd':
num_mat=4
cost=c*mflop*num_mat/data_type
grain_sizes.append(grain_size)
results.append(d_hpx[node][benchmark][th][b][c]['mflops'][k])
l2_cm.append(d_hpx[node][benchmark][th][b][c]['counters'][k]['avg']['papi_tcm'])
l2_ch.append([d_hpx[node][benchmark][th][b][c]['counters'][k]['avg']['papi_tca'][l]-d_hpx[node][benchmark][th][b][c]['counters'][k]['avg']['papi_tcm'][l] for l in range(th)])
l2_miss_rate.append([d_hpx[node][benchmark][th][b][c]['counters'][k]['avg']['papi_tcm'][j]/d_hpx[node][benchmark][th][b][c]['counters'][k]['avg']['papi_tca'][j] for j in range(th)])
if len(chunk_sizes)!=0:
for t in range(th):
tl=[l[t] for l in l2_miss_rate]
plt.figure(i+t)
plt.plot(mat_sizes[benchmark], tl, label=str(th)+' threads block_size:'+str(b_r)+'-'+str(b_c)+' num_blocks:'+str(equalshare1*equalshare2)+' block size '+str(b)+' core '+str(t))
plt.ylabel('l2_cache_misse rate')
plt.xlabel('matrix size')
plt.xscale('log')
plt.title(benchmark)
plt.grid(True, 'both')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
i=i+th
#################################################################
#cache-miss based on block_size and matrix_size for all chunk_sizes
#################################################################
for node in d_hpx.keys():
#plot number of cache misses based on matrix size for a chunk size and a block size
for benchmark in d_hpx[node].keys():
for th in d_hpx[node][benchmark].keys():
# pp = PdfPages(perf_directory+'/bath_tub_'+benchmark+'_different_matrix_sizes_'+str(th)+'.pdf')
results=[]
for m in mat_sizes[benchmark]:
l2_cm=[]
l2_ch=[]
l2_miss_rate=[]
grain_sizes=[]
avg_l2_miss_rate=[]
for b in d_hpx[node][benchmark][th].keys():
for c in d_hpx[node][benchmark][th][b].keys():
k=d_hpx[node][benchmark][th][b][c]['size'].index(m)
if 'mflops' in d_hpx[node][benchmark][th][b][c].keys() and d_hpx[node][benchmark][th][b][c]['mflops'][k]:
b_r=int(b.split('-')[0])
b_c=int(b.split('-')[1])
rest1=b_r%simdsize
rest2=b_c%simdsize
if b_r>m:
b_r=m
if b_c>m:
b_c=m
if b_c%simdsize!=0:
b_c=b_c+simdsize-b_c%simdsize
equalshare1=math.ceil(m/b_r)
equalshare2=math.ceil(m/b_c)
chunk_sizes.append(c)
num_blocks=equalshare1*equalshare2
num_elements_uncomplete=0
if b_c<m:
num_elements_uncomplete=(m%b_c)*b_r
mflop=0
if benchmark=='dmatdmatadd':
mflop=b_r*b_c
elif benchmark=='dmatdmatdmatadd':
mflop=b_r*b_c*2
else:
mflop=b_r*b_c*(2*m)
num_elements=[mflop]*num_blocks
if num_elements_uncomplete:
for j in range(1,equalshare1+1):
num_elements[j*equalshare2-1]=num_elements_uncomplete
data_type=8
grain_size=sum(num_elements[0:c])
num_mat=3
if benchmark=='dmatdmatdmatadd':
num_mat=4
cost=c*mflop*num_mat/data_type
grain_sizes.append(grain_size)
results.append(d_hpx[node][benchmark][th][b][c]['mflops'][k])
l2_cm.append(d_hpx[node][benchmark][th][b][c]['counters'][k]['avg']['papi_tcm'])
l2_ch.append([d_hpx[node][benchmark][th][b][c]['counters'][k]['avg']['papi_tca'][l]-d_hpx[node][benchmark][th][b][c]['counters'][k]['avg']['papi_tcm'][l] for l in range(th)])
l2_miss_rate.append([d_hpx[node][benchmark][th][b][c]['counters'][k]['avg']['papi_tcm'][j]/d_hpx[node][benchmark][th][b][c]['counters'][k]['avg']['papi_tca'][j] for j in range(th)])
ind_miss=[d_hpx[node][benchmark][th][b][c]['counters'][k]['avg']['papi_tcm'][j]/d_hpx[node][benchmark][th][b][c]['counters'][k]['avg']['papi_tca'][j] for j in range(th)]
avg_l2_miss_rate.append(sum(ind_miss)/th)
plt.figure(i)
indices=np.argsort(np.array(grain_sizes))
plt.plot([grain_sizes[i] for i in indices], [avg_l2_miss_rate[i] for i in indices], label=str(th)+' threads matrix_size:'+str(m))
plt.ylabel('l2_cache_misse rate')
plt.xlabel('grain size')
plt.xscale('log')
plt.title(benchmark)
plt.grid(True, 'both')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
i=i+1
(d_hpx, chunk_sizes, block_sizes, thr, benchmarks, mat_sizes)=create_dict_relative_norepeat_counters_onebyone(papi_directory)
perf_directory='/home/shahrzad/repos/Blazemark/data/performance_plots/matrix/08-07-2019/performance_counters'
c=1
node='marvin'
benchmark='dmatdmatadd'
th=4
m=912
#plot number of cache misses based on block size for a chunk size and a matrix size
for node in d_hpx.keys():
for benchmark in benchmarks:
# pp = PdfPages(perf_directory+'/cache_miss_rate_'+benchmark+'_'+node+'.pdf')
for th in d_hpx[node][benchmark].keys():
for c in [1,2]:
results=[]
for m in mat_sizes[benchmark]:
l2_cm=[]
l2_ch=[]
l2_miss_rate=[]
avg_l2_miss_rate=[]
block_sizes=[]
grain_sizes=[]
chunk_sizes=[]
for b in d_hpx[node][benchmark][th].keys():
k=d_hpx[node][benchmark][th][b][c]['size'].index(m)
if 'mflops' in d_hpx[node][benchmark][th][b][c].keys() and d_hpx[node][benchmark][th][b][c]['mflops'][k]:
b_r=int(b.split('-')[0])
b_c=int(b.split('-')[1])
rest1=b_r%simdsize
rest2=b_c%simdsize
if b_r>m:
b_r=m
if b_c>m:
b_c=m
if b_c%simdsize!=0:
b_c=b_c+simdsize-b_c%simdsize
equalshare1=math.ceil(m/b_r)
equalshare2=math.ceil(m/b_c)
chunk_sizes.append(c)
num_blocks=equalshare1*equalshare2
num_elements_uncomplete=0
if b_c<m:
num_elements_uncomplete=(m%b_c)*b_r
block_sizes.append(b)
mflop=0
if benchmark=='dmatdmatadd':
mflop=b_r*b_c
elif benchmark=='dmatdmatdmatadd':
mflop=b_r*b_c*2
else:
mflop=b_r*b_c*(2*m)
num_elements=[mflop]*num_blocks
if num_elements_uncomplete:
for j in range(1,equalshare1+1):
num_elements[j*equalshare2-1]=num_elements_uncomplete
data_type=8
grain_size=sum(num_elements[0:c])
num_mat=3
if benchmark=='dmatdmatdmatadd':
num_mat=4
cost=c*mflop*num_mat/data_type
grain_sizes.append(grain_size)
# block_sizes.append(str(b_r)+'-'+str(b_c))
results.append(d_hpx[node][benchmark][th][b][c]['mflops'][k])
l2_cm.append(d_hpx[node][benchmark][th][b][c]['counters'][k]['avg']['papi_tcm'])
l2_ch.append([d_hpx[node][benchmark][th][b][c]['counters'][k]['avg']['papi_tca'][l]-d_hpx[node][benchmark][th][b][c]['counters'][k]['avg']['papi_tcm'][l] for l in range(th)])
ind_miss=[d_hpx[node][benchmark][th][b][c]['counters'][k]['avg']['papi_tcm'][j]/d_hpx[node][benchmark][th][b][c]['counters'][k]['avg']['papi_tca'][j] for j in range(th)]
l2_miss_rate.append(ind_miss)
avg_l2_miss_rate.append(sum(ind_miss)/th)
if len(chunk_sizes)!=0:
plt.figure(i)
plt.axes([0, 0, 2, 1])
plt.plot(block_sizes, avg_l2_miss_rate, label='matrix size:'+str(m))
plt.ylabel('L2_cache_misse rate')
plt.xlabel('block size')
plt.title(node+' '+benchmark+' chunk_size: '+str(c)+' '+str(th)+' threads')
plt.grid(True, 'both')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
G=np.argsort(np.asarray(grain_sizes))
plt.figure(i+1)
plt.ylabel('L2_cache_misse rate')
plt.xlabel('grain size')
plt.title(node+' '+benchmark+' chunk_size: '+str(c)+' '+str(th)+' threads')
plt.grid(True, 'both')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.plot([grain_sizes[g] for g in G], [avg_l2_miss_rate[g] for g in G], label='matrix size:'+str(m), marker='+')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
xs=[grain_sizes[g] for g in G]
ys=[avg_l2_miss_rate[g] for g in G]
zs=[block_sizes[g] for g in G]
for x,y,z in zip(xs,ys,zs):
label = (z)
plt.annotate(label, # this is the text
(x,y), # this is the point to label
textcoords="offset points", # how to position the text
xytext=(0,10), # distance from text to points (x,y)
ha='center') # horizontal alignment can be left, right or center
# plt.savefig(pp, format='pdf',bbox_inches='tight')
# print('')
i=i+2
plt.show()
pp.close()
c=1
node='marvin'
benchmark='dmatdmatadd'
th=4
m=912
#plot number of cache misses based on block size for a matrix size and a chunk size
for node in d_hpx.keys():
for benchmark in benchmarks:
# pp = PdfPages(perf_directory+'/cache_miss_rate_'+benchmark+'_'+node+'.pdf')
for th in d_hpx[node][benchmark].keys():
for m in mat_sizes[benchmark]:
results=[]
for c in d_hpx[node][benchmark][1]['4-1024'].keys():
l2_cm=[]
l2_ch=[]
l2_miss_rate=[]
avg_l2_miss_rate=[]
block_sizes=[]
grain_sizes=[]
chunk_sizes=[]
for b in d_hpx[node][benchmark][th].keys():
k=d_hpx[node][benchmark][th][b][c]['size'].index(m)
if 'mflops' in d_hpx[node][benchmark][th][b][c].keys() and d_hpx[node][benchmark][th][b][c]['mflops'][k]:
b_r=int(b.split('-')[0])
b_c=int(b.split('-')[1])
rest1=b_r%simdsize
rest2=b_c%simdsize
if b_r>m:
b_r=m
if b_c>m:
b_c=m
if b_c%simdsize!=0:
b_c=b_c+simdsize-b_c%simdsize
equalshare1=math.ceil(m/b_r)
equalshare2=math.ceil(m/b_c)
chunk_sizes.append(c)
num_blocks=equalshare1*equalshare2
num_elements_uncomplete=0
if b_c<m:
num_elements_uncomplete=(m%b_c)*b_r
block_sizes.append(b)
mflop=0
if benchmark=='dmatdmatadd':
mflop=b_r*b_c
elif benchmark=='dmatdmatdmatadd':
mflop=b_r*b_c*2
else:
mflop=b_r*b_c*(2*m)
num_elements=[mflop]*num_blocks
if num_elements_uncomplete:
for j in range(1,equalshare1+1):
num_elements[j*equalshare2-1]=num_elements_uncomplete
data_type=8
grain_size=sum(num_elements[0:c])
num_mat=3
if benchmark=='dmatdmatdmatadd':
num_mat=4
cost=c*mflop*num_mat/data_type
grain_sizes.append(grain_size)
# block_sizes.append(str(b_r)+'-'+str(b_c))
results.append(d_hpx[node][benchmark][th][b][c]['mflops'][k])
l2_cm.append(d_hpx[node][benchmark][th][b][c]['counters'][k]['avg']['papi_tcm'])
l2_ch.append([d_hpx[node][benchmark][th][b][c]['counters'][k]['avg']['papi_tca'][l]-d_hpx[node][benchmark][th][b][c]['counters'][k]['avg']['papi_tcm'][l] for l in range(th)])
ind_miss=[d_hpx[node][benchmark][th][b][c]['counters'][k]['avg']['papi_tcm'][j]/d_hpx[node][benchmark][th][b][c]['counters'][k]['avg']['papi_tca'][j] for j in range(th)]
l2_miss_rate.append(ind_miss)
avg_l2_miss_rate.append(sum(ind_miss)/th)
if len(chunk_sizes)!=0:
plt.figure(i)
plt.axes([0, 0, 2, 1])
plt.plot(block_sizes, avg_l2_miss_rate, label='chunk size:'+str(c))
plt.ylabel('L2_cache_misse rate')
plt.xlabel('block size')
plt.title(node+' '+benchmark+' matrix_size: '+str(m)+' '+str(th)+' threads')
plt.grid(True, 'both')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
G=np.argsort(np.asarray(grain_sizes))
plt.figure(i+1)
plt.ylabel('L2_cache_misse rate')
plt.xlabel('grain size')
plt.title(node+' '+benchmark+' matrix_size: '+str(m)+' '+str(th)+' threads')
plt.grid(True, 'both')
plt.plot([grain_sizes[g] for g in G], [avg_l2_miss_rate[g] for g in G], label='matrix size:'+str(m), marker='+')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
xs=[grain_sizes[g] for g in G]
ys=[avg_l2_miss_rate[g] for g in G]
zs=[block_sizes[g] for g in G]
for x,y,z in zip(xs,ys,zs):
label = (z)
plt.annotate(label, # this is the text
(x,y), # this is the point to label
textcoords="offset points", # how to position the text
xytext=(0,10), # distance from text to points (x,y)
ha='center') # horizontal alignment can be left, right or center
# plt.savefig(pp, format='pdf',bbox_inches='tight')
# print('')
i=i+2
plt.show()
pp.close()
#plot number of cache misses based on chunk size for a matrix size and a block size
for node in d_hpx.keys():
for benchmark in benchmarks:
pp = PdfPages(perf_directory+'/cache_miss_rate_grain_size_'+benchmark+'_'+node+'.pdf')
for th in d_hpx[node][benchmark].keys():
for m in mat_sizes[benchmark]:
results=[]
overall_avg_l2_miss_rate=[]
overall_grain_sizes=[]
for b in d_hpx[node][benchmark][th].keys():
l2_cm=[]
l2_ch=[]
l2_miss_rate=[]
avg_l2_miss_rate=[]
block_sizes=[]
grain_sizes=[]
chunk_sizes=[]
for c in d_hpx[node][benchmark][1]['4-1024'].keys():
k=d_hpx[node][benchmark][th][b][c]['size'].index(m)
if 'mflops' in d_hpx[node][benchmark][th][b][c].keys() and d_hpx[node][benchmark][th][b][c]['mflops'][k]:
b_r=int(b.split('-')[0])
b_c=int(b.split('-')[1])
rest1=b_r%simdsize
rest2=b_c%simdsize
if b_r>m:
b_r=m
if b_c>m:
b_c=m
if b_c%simdsize!=0:
b_c=b_c+simdsize-b_c%simdsize
equalshare1=math.ceil(m/b_r)
equalshare2=math.ceil(m/b_c)
chunk_sizes.append(c)
num_blocks=equalshare1*equalshare2
num_elements_uncomplete=0
if b_c<m:
num_elements_uncomplete=(m%b_c)*b_r
block_sizes.append(b)
mflop=0
if benchmark=='dmatdmatadd':
mflop=b_r*b_c
elif benchmark=='dmatdmatdmatadd':
mflop=b_r*b_c*2
else:
mflop=b_r*b_c*(2*m)
num_elements=[mflop]*num_blocks
if num_elements_uncomplete:
for j in range(1,equalshare1+1):
num_elements[j*equalshare2-1]=num_elements_uncomplete
data_type=8
grain_size=sum(num_elements[0:c])
num_mat=3
if benchmark=='dmatdmatdmatadd':
num_mat=4
| |
bzImage build command: " + argv_str)
tokens = argv_str.split()
if len(tokens) < 5:
verbose("Warning: not well-formated bzImage build command", LEVEL_0)
return ''
outfile = os.path.abspath(os.path.join(pwddir, tokens[-1]))
infiles = tokens[1 : len(tokens)-1]
infiles = [os.path.abspath(os.path.join(pwddir, afile)) for afile in infiles]
record_raw_info(outfile, infiles, pwddir, argv_str)
return outfile
def process_ar_command(prog, pwddir, argv_str):
'''
Process the gcc command
:param prog: the program binary
:param pwddir: the present working directory for the command
:param argv_str: the full command with all its command line options/parameters
'''
verbose("\npwd: " + pwddir + " Found one ar command: " + argv_str)
(outfile, infiles) = get_all_subfiles_in_ar_cmdline(argv_str, pwddir)
if infiles: # if empty infiles, no need to record
record_raw_info(outfile, infiles, pwddir, argv_str)
return outfile
# found out that java compiler like maven can compile .java to .class in memory without creating new process
# so this javac process hookup will not work, same reason for jar command hookup.
def process_javac_command(prog, pwddir, argv_str):
'''
Process the javac command
:param prog: the program binary
:param pwddir: the present working directory for the command
:param argv_str: the full command with all its command line options/parameters
'''
verbose("\npwd: " + pwddir + " Found one javac command: " + argv_str)
if ".java " not in argv_str and argv_str[-5:] != ".java":
verbose("Warning: no input .java file for javac line: " + argv_str)
return
tokens = argv_str.split()
for token in tokens:
if token[-5:] == ".java":
java_file = token
outfile = token[:-5] + ".class"
break
if java_file[0] != '/':
java_file = os.path.join(pwddir, java_file)
java_file = os.path.abspath(java_file)
if outfile[0] != '/':
outfile = os.path.join(pwddir, outfile)
outfile = os.path.abspath(outfile)
record_raw_info(outfile, infiles, pwddir, argv_str)
return outfile
def process_jar_command(prog, pwddir, argv_str):
'''
Process the jar command
:param prog: the program binary
:param pwddir: the present working directory for the command
:param argv_str: the full command with all its command line options/parameters
'''
verbose("\npwd: " + pwddir + " Found one jar command: " + argv_str)
(outfile, infiles) = get_all_subfiles_in_jar_cmdline(argv_str, pwddir)
record_raw_info(outfile, infiles, pwddir, argv_str)
return outfile
def find_bom_outfile_in_bomdir(outfile, bomdir):
'''
Try to find the .bom-embedded outfile in the bomdir
:param outfile: the output file of the shell command
:param bomdir: the directory to find the bom-embedded outfile
'''
if not os.path.exists(outfile):
return ''
checksum = get_git_file_hash(outfile)
afiles = find_specific_file(bomdir, checksum + "*" + os.path.basename(outfile))
# afiles = find_specific_file_in_modification_time_order(bomdir, checksum + "*" + os.path.basename(outfile))
if not afiles:
return ''
if len(afiles) > 1:
verbose("Warning: multiple with_bom files found: " + str(afiles));
return afiles[0]
def embed_bom_after_cmd(prog, pwddir, argv_str, outfile):
'''
Embed .bom section into outfile and overwrite original outfile
:param prog: the program binary
:param pwddir: the present working directory for the command
:param argv_str: the full command with all its command line options/parameters
:param outfile: the output file of the shell command
returns True if embedding is successful, otherwise False.
'''
if not outfile or not os.path.exists(outfile):
return
# Use /tmp/embed_bomdir instead of the default ${PWD}/.gitbom directory as gitBOM repo dir
bomdir = os.path.join(g_tmpdir, "embed_bomdir")
lseek_lines_file = os.path.join(g_tmpdir, "bomsh_hook_lseek_lines")
# Invoke the bomsh_create_bom script to generate hash-tree and gitBOM docs
cmd = g_create_bom_script + ' -r ' + cmd_quote(g_raw_logfile) + ' --tmpdir ' + g_tmpdir + ' -b ' + bomdir + ' --lseek_lines_file ' + lseek_lines_file + ' || true'
#cmd = g_create_bom_script + ' --new_gitbom_doc_for_unary_transform -r ' + cmd_quote(g_raw_logfile) + ' --tmpdir ' + g_tmpdir + ' -b ' + bomdir + ' --lseek_lines_file ' + lseek_lines_file + ' || true'
get_shell_cmd_output(cmd)
# find the bom-embedded outfile in bomdir
with_bom_dir = os.path.join(bomdir, "metadata", "bomsh", "with_bom_files")
embed_outfile = find_bom_outfile_in_bomdir(outfile, with_bom_dir)
if not embed_outfile:
return
# record this operation as a binary converting command. This is required in order to create hash-tree from bomsh_hook_raw_logfile later.
checksum = get_git_file_hash(embed_outfile)
infiles = [outfile,]
record_raw_info(embed_outfile, infiles, pwddir, "embed_bom_after_cmd for " + outfile + " orig_build_cmd: " + argv_str, outfile_checksum=checksum)
# overwrite the outfile and keep a copy of the original outfile
embed_outfile_orig = embed_outfile + ".orig"
os.system("cp " + outfile + " " + embed_outfile_orig + " ; cp " + embed_outfile + " " + outfile)
verbose("After " + prog + " command, overwrite with bom-embedded outfile: " + outfile)
afile = os.path.join(g_tmpdir, "bomsh_hook_embed_bom_file")
write_text_file(afile, checksum)
#verbose("embed_bom_after_cmd, writing embed_outfile_checksum: " + checksum + " to file: " + afile)
def read_hook_embed_bom_file():
'''
Read the saved outfile checksum from the hook_embed_bom_file
'''
afile = os.path.join(g_tmpdir, "bomsh_hook_embed_bom_file")
if not os.path.exists(afile):
return ''
return read_text_file(afile).strip()
def process_shell_command(prog, pwddir, argv_str, pid_str):
'''
Process the shell command that we are interested in.
:param prog: the program binary
:param pwddir: the present working directory for the command
:param argv_str: the full command with all its command line options/parameters
:param pid_str: the string with process ID, in format of "PID: pid XXX YYY"
'''
pid = ''
tokens = pid_str.split()
if len(tokens) > 1:
pid = tokens[1]
if args.pre_exec:
verbose("pre_exec run")
global g_last_embed_outfile_checksum
if g_embed_bom_after_commands and prog in g_embed_bom_after_commands:
# read saved embed_outfile_checksum to later check if this shell command is redundant command
g_last_embed_outfile_checksum = read_hook_embed_bom_file()
# Process the shell command, to record the raw info
if is_cc_compiler(prog):
outfile = process_gcc_command(prog, pwddir, argv_str)
elif prog == "/usr/bin/ar":
outfile = process_ar_command(prog, pwddir, argv_str)
elif is_cc_linker(prog):
outfile = process_ld_command(prog, pwddir, argv_str)
elif prog == "/usr/bin/objcopy":
outfile = process_objcopy_command(prog, pwddir, argv_str, pid)
elif prog == "arch/x86/boot/tools/build":
outfile = process_bzImage_build_command(prog, pwddir, argv_str)
elif prog in g_samefile_converters:
outfile = process_samefile_converter_command(prog, pwddir, argv_str, pid)
elif prog == "/usr/bin/install":
outfile = process_install_command(prog, pwddir, argv_str)
elif prog == "/usr/bin/rustc":
outfile = process_rustc_command(prog, pwddir, argv_str)
elif prog == "bomsh_openat_file":
outfile = process_samefile_converter_command(prog, pwddir, argv_str, pid)
elif prog == "/usr/bin/javac":
outfile = process_javac_command(prog, pwddir, argv_str)
elif prog == "/usr/bin/jar":
outfile = process_jar_command(prog, pwddir, argv_str)
elif is_golang_prog(prog):
outfile = process_golang_command(prog, pwddir, argv_str)
# if user wants to embed .bom into binaries for some commands
if not g_not_embed_bom_flag and not args.pre_exec and prog in g_embed_bom_after_commands:
# only if this command is not redundant, not pre-exec mode
embed_bom_after_cmd(prog, pwddir, argv_str, outfile)
############################################################
#### End of shell command handling routines ####
############################################################
def rtd_parse_options():
"""
Parse command options.
"""
parser = argparse.ArgumentParser(
description = "This tool parses the command and records raw info of input/output file checksums")
parser.add_argument("--version",
action = "version",
version=VERSION)
parser.add_argument('-s', '--shell_cmd_file',
help = "the shell command file to analyze the command")
parser.add_argument('-r', '--raw_logfile',
help = "the raw log file, to store input/output file checksums")
parser.add_argument('-l', '--logfile',
help = "the log file, must be absolute path, not relative path")
parser.add_argument('-w', '--watched_programs',
help = "the comma-separated list of programs to watch")
parser.add_argument('--watched_pre_exec_programs',
help = "the comma-separated list of pre_exec programs to watch")
parser.add_argument('-t', '--trace_logfile',
help = "the verbose trace log file")
parser.add_argument('--create_bom_script',
help = "the bomsh_create_bom script file")
parser.add_argument('--tmpdir',
help = "tmp directory, which is /tmp by default")
parser.add_argument('--cc_compilers',
help = "the comma-separated C compiler paths, like /usr/bin/gcc,/usr/bin/clang")
parser.add_argument('--cc_linkers',
help = "the comma-separated C linker paths, like /usr/bin/ld,/usr/bin/llvm-ld")
parser.add_argument("--embed_bom_after_commands",
help = "embed .bom ELF section after a command on an ELF binary, which is a list of comma-separated programs")
parser.add_argument("--pre_exec",
action = "store_true",
help = "pre-exec mode, invoked before executing the process")
parser.add_argument("-n", "--no_auto_embed_bom_for_compiler_linker",
action = "store_true",
help = "not automatically embed bom-id to ELF binary for cc/ld commands")
parser.add_argument("--no_dependent_headers",
action = "store_true",
help = "not include C header files for hash tree dependency")
parser.add_argument("-v", "--verbose",
action = "count",
default = 0,
help = "verbose output, can be supplied multiple times"
" to increase verbosity")
# Parse the command line arguments
args = parser.parse_args()
if not (args.shell_cmd_file):
print ("Please specify the shell command file with -s option!")
print ("")
parser.print_help()
sys.exit()
global g_create_bom_script
if args.create_bom_script:
g_create_bom_script = args.create_bom_script
global g_logfile
global g_trace_logfile
global g_raw_logfile
global g_tmpdir
if args.tmpdir:
g_tmpdir = args.tmpdir
g_logfile = os.path.join(g_tmpdir, "bomsh_hook_logfile")
g_trace_logfile = os.path.join(g_tmpdir, "bomsh_hook_trace_logfile")
g_raw_logfile = os.path.join(g_tmpdir, "bomsh_hook_raw_logfile")
if args.logfile:
g_logfile = args.logfile
if args.trace_logfile:
g_trace_logfile = args.trace_logfile
if args.raw_logfile:
g_raw_logfile = args.raw_logfile
if args.no_auto_embed_bom_for_compiler_linker:
g_embed_bom_after_commands.clear()
if args.embed_bom_after_commands:
g_embed_bom_after_commands.extend(args.embed_bom_after_commands.split(","))
if args.cc_compilers:
g_cc_compilers.extend(args.cc_compilers.split(","))
if args.cc_linkers:
g_cc_linkers.extend(args.cc_linkers.split(","))
return args
def main():
global args
# parse | |
<filename>sdk/python/pulumi_azure/media/asset.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['AssetArgs', 'Asset']
@pulumi.input_type
class AssetArgs:
def __init__(__self__, *,
media_services_account_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
alternate_id: Optional[pulumi.Input[str]] = None,
container: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
storage_account_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Asset resource.
:param pulumi.Input[str] media_services_account_name: Specifies the name of the Media Services Account. Changing this forces a new Media Asset to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Media Asset should exist. Changing this forces a new Media Asset to be created.
:param pulumi.Input[str] alternate_id: The alternate ID of the Asset.
:param pulumi.Input[str] container: The name of the asset blob container. Changing this forces a new Media Asset to be created.
:param pulumi.Input[str] description: The Asset description.
:param pulumi.Input[str] name: The name which should be used for this Media Asset. Changing this forces a new Media Asset to be created.
:param pulumi.Input[str] storage_account_name: The name of the storage account where to store the media asset. Changing this forces a new Media Asset to be created.
"""
pulumi.set(__self__, "media_services_account_name", media_services_account_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if alternate_id is not None:
pulumi.set(__self__, "alternate_id", alternate_id)
if container is not None:
pulumi.set(__self__, "container", container)
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if storage_account_name is not None:
pulumi.set(__self__, "storage_account_name", storage_account_name)
@property
@pulumi.getter(name="mediaServicesAccountName")
def media_services_account_name(self) -> pulumi.Input[str]:
"""
Specifies the name of the Media Services Account. Changing this forces a new Media Asset to be created.
"""
return pulumi.get(self, "media_services_account_name")
@media_services_account_name.setter
def media_services_account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "media_services_account_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the Resource Group where the Media Asset should exist. Changing this forces a new Media Asset to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="alternateId")
def alternate_id(self) -> Optional[pulumi.Input[str]]:
"""
The alternate ID of the Asset.
"""
return pulumi.get(self, "alternate_id")
@alternate_id.setter
def alternate_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "alternate_id", value)
@property
@pulumi.getter
def container(self) -> Optional[pulumi.Input[str]]:
"""
The name of the asset blob container. Changing this forces a new Media Asset to be created.
"""
return pulumi.get(self, "container")
@container.setter
def container(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The Asset description.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name which should be used for this Media Asset. Changing this forces a new Media Asset to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="storageAccountName")
def storage_account_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the storage account where to store the media asset. Changing this forces a new Media Asset to be created.
"""
return pulumi.get(self, "storage_account_name")
@storage_account_name.setter
def storage_account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_account_name", value)
@pulumi.input_type
class _AssetState:
def __init__(__self__, *,
alternate_id: Optional[pulumi.Input[str]] = None,
container: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
media_services_account_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
storage_account_name: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Asset resources.
:param pulumi.Input[str] alternate_id: The alternate ID of the Asset.
:param pulumi.Input[str] container: The name of the asset blob container. Changing this forces a new Media Asset to be created.
:param pulumi.Input[str] description: The Asset description.
:param pulumi.Input[str] media_services_account_name: Specifies the name of the Media Services Account. Changing this forces a new Media Asset to be created.
:param pulumi.Input[str] name: The name which should be used for this Media Asset. Changing this forces a new Media Asset to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Media Asset should exist. Changing this forces a new Media Asset to be created.
:param pulumi.Input[str] storage_account_name: The name of the storage account where to store the media asset. Changing this forces a new Media Asset to be created.
"""
if alternate_id is not None:
pulumi.set(__self__, "alternate_id", alternate_id)
if container is not None:
pulumi.set(__self__, "container", container)
if description is not None:
pulumi.set(__self__, "description", description)
if media_services_account_name is not None:
pulumi.set(__self__, "media_services_account_name", media_services_account_name)
if name is not None:
pulumi.set(__self__, "name", name)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if storage_account_name is not None:
pulumi.set(__self__, "storage_account_name", storage_account_name)
@property
@pulumi.getter(name="alternateId")
def alternate_id(self) -> Optional[pulumi.Input[str]]:
"""
The alternate ID of the Asset.
"""
return pulumi.get(self, "alternate_id")
@alternate_id.setter
def alternate_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "alternate_id", value)
@property
@pulumi.getter
def container(self) -> Optional[pulumi.Input[str]]:
"""
The name of the asset blob container. Changing this forces a new Media Asset to be created.
"""
return pulumi.get(self, "container")
@container.setter
def container(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The Asset description.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="mediaServicesAccountName")
def media_services_account_name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Media Services Account. Changing this forces a new Media Asset to be created.
"""
return pulumi.get(self, "media_services_account_name")
@media_services_account_name.setter
def media_services_account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "media_services_account_name", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name which should be used for this Media Asset. Changing this forces a new Media Asset to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Resource Group where the Media Asset should exist. Changing this forces a new Media Asset to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="storageAccountName")
def storage_account_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the storage account where to store the media asset. Changing this forces a new Media Asset to be created.
"""
return pulumi.get(self, "storage_account_name")
@storage_account_name.setter
def storage_account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_account_name", value)
class Asset(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
alternate_id: Optional[pulumi.Input[str]] = None,
container: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
media_services_account_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
storage_account_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a Media Asset.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_account = azure.storage.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
account_tier="Standard",
account_replication_type="GRS")
example_service_account = azure.media.ServiceAccount("exampleServiceAccount",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
storage_accounts=[azure.media.ServiceAccountStorageAccountArgs(
id=example_account.id,
is_primary=True,
)])
example_asset = azure.media.Asset("exampleAsset",
resource_group_name=example_resource_group.name,
media_services_account_name=example_service_account.name,
description="Asset description")
```
## Import
Media Assets can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:media/asset:Asset example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Media/mediaservices/account1/assets/asset1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] alternate_id: The alternate ID of the Asset.
:param pulumi.Input[str] container: The name of the asset blob container. Changing this forces a new Media Asset to be created.
:param pulumi.Input[str] description: The Asset description.
:param pulumi.Input[str] media_services_account_name: Specifies the name of the Media Services Account. Changing this forces a new Media Asset to be created.
:param pulumi.Input[str] name: The name which should be used for this Media Asset. Changing this forces a new Media Asset to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Media Asset should exist. Changing this forces a new Media Asset to be created.
:param pulumi.Input[str] storage_account_name: The name of the storage account where to store the media asset. Changing this forces a new Media Asset to be created.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: AssetArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Media Asset.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_account = azure.storage.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
account_tier="Standard",
account_replication_type="GRS")
example_service_account = azure.media.ServiceAccount("exampleServiceAccount",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
storage_accounts=[azure.media.ServiceAccountStorageAccountArgs(
id=example_account.id,
is_primary=True,
)])
example_asset = azure.media.Asset("exampleAsset",
resource_group_name=example_resource_group.name,
media_services_account_name=example_service_account.name,
description="Asset description")
```
## Import
| |
x, pars: 0]
counter = -1
for p in product(*possibles):
## Comtinations
counter += 1
## print p, counter
## Sequential parameters
types = pos_types[np.random.randint(0, len(pos_types))]
# types = 'object'
auto_excluded = pos_auto_excluded[np.random.randint(0, 3)]
if types in ['object', 'listobject']:
typeret = ''
else:
typeret = pos_typeret[np.random.randint(0, len(pos_typeret))]
const = pos_constantneighs[np.random.randint(0, 3)]
inforet = pos_inforet[np.random.randint(0, len(pos_inforet))]
infof = pos_infof[np.random.randint(0, len(pos_infof))]
rel_pos = pos_relativepos[np.random.randint(0, len(pos_relativepos))]
auto_excl = pos_autoexclude[np.random.randint(0, len(pos_autoexclude))]
## Non exhaustive
# if np.random.random() < 0.25:
# continue
## Forbidden combinations
if types in ['list', 'object', 'listobject'] and p[8]:
continue
if p[6] is False and type(inforet) == np.ndarray:
continue
## Instantiation
ret = DummyRetriever(n, autodata=p[0], input_map=p[1], output_map=p[2],
info_ret=inforet, info_f=infof,
constant_info=p[3], perturbations=p[4],
autoexclude=auto_excl, ifdistance=p[5],
relative_pos=rel_pos, bool_input_idx=p[6],
typeret=typeret, preferable_input_idx=p[7],
constant_neighs=const, bool_listind=p[8],
auto_excluded=auto_excluded, types=types)
## Selecting point_i
if p[6] is False:
if types == 'listobject':
i = DummyLocObject(np.array([0]))
j = [i, DummyLocObject(np.array([1]))]
else:
i = np.array([0])
j = [np.array([0]), np.array([1])]
else:
i = 0
j = [0, 1]
## Testing functions standards
## Get Information
################
# Assert information getting
info_i, info_i2 = ret._get_info_i(i, 0), ret._get_info_i(j, 0)
# print info_i, info_i2
assert(info_i == 0)
assert(np.all(info_i2 == 0))
## Get locations
################
# print p[6], types
if p[6]:
loc_i = ret.get_loc_i([0])
if types == 'listobject':
loc_i = [e.location for e in loc_i]
else:
if types == 'listobject':
retloc = DummyLocObject(np.array([0]))
loc_i = ret.get_loc_i([retloc])
loc_i = [e.location for e in loc_i]
else:
loc_i = ret.get_loc_i([np.array([0])])
# print loc_i, counter, ret._get_loc_from_idxs, p[6], p[7]
assert(len(loc_i) == 1)
# assert(type(loc_i) == type(ret.data_input))
# print loc_i, types
assert(type(loc_i[0]) == np.ndarray)
assert(len(loc_i[0].shape) == 1)
assert(all(loc_i[0] == np.array([0])))
if p[6]:
loc_i = ret.get_loc_i([0, 1])
if types == 'listobject':
loc_i = [e.location for e in loc_i]
else:
if types == 'listobject':
aux = DummyLocObject(np.array([0]))
loc_i = ret.get_loc_i([aux, DummyLocObject(np.array([1]))])
loc_i = [e.location for e in loc_i]
else:
loc_i = ret.get_loc_i([np.array([0]), np.array([1])])
# print loc_i, ret.get_loc_i, p[6]
assert(len(loc_i) == 2)
# assert(type(loc_i) == type(ret.data_input))
assert(type(loc_i[0]) == np.ndarray)
assert(type(loc_i[1]) == np.ndarray)
assert(len(loc_i[0].shape) == 1)
assert(len(loc_i[1].shape) == 1)
assert(all(loc_i[0] == np.array([0])))
assert(all(loc_i[1] == np.array([1])))
## Get indices
################
if p[6]:
loc_i = [0]
else:
if types == 'listobject':
loc_i = [DummyLocObject(np.array([0]))]
else:
loc_i = [np.array([0])]
i_loc = ret.get_indice_i(loc_i, 0)
# print i_loc, loc_i, counter, ret.get_indice_i, ret._get_idxs_from_locs
# print list(ret.data_input)
assert(len(i_loc) == 1)
assert(type(i_loc) == list)
assert(type(i_loc[0]) in inttypes)
if p[6]:
i_loc = ret.get_indice_i([0, 1])
else:
if types == 'listobject':
loc_i = [DummyLocObject(np.array([0]))]
loc_i += [DummyLocObject(np.array([1]))]
else:
loc_i = [np.array([0]), np.array([1])]
i_loc = ret.get_indice_i(loc_i, 0)
# print i_loc, ret.get_indice_i, p[6]
assert(len(i_loc) == 2)
assert(type(i_loc) == list)
assert(type(i_loc[0]) in inttypes)
assert(type(i_loc[1]) in inttypes)
assert(i_loc[0] == 0)
assert(i_loc[1] == 1)
## Preparing input
##################
ret._general_prepare_input([i], kr=0)
ret._general_prepare_input(j, kr=0)
# Assert element getting
# print i, j, p[6], p[7], ret._prepare_input
e1, e2 = ret._prepare_input(i, 0), ret._prepare_input(j, 0)
# print i, j, e1, e2, p[6], p[7], ret._prepare_input, ret.get_indice_i
# print ret.preferable_input_idx
if types == 'listobject' and p[7] is not True:
e1, e2 = [e.location for e in e1], [e.location for e in e2]
if p[7]:
# print e1, e2, type(e1), type(e2)
assert(e1 == [0])
assert(e2 == [0, 1])
else:
# print e1, e2, ret._prepare_input, p[6]
assert(e1 == [np.array([0])])
# print e1, ret._prepare_input, p[6], p[7]
assert(all([type(e) == np.ndarray for e in e1]))
# print e2, type(e2[0]), ret._prepare_input, p[6], p[7], counter
assert(np.all([e2 == [np.array([0]), np.array([1])]]))
assert(all([type(e) == np.ndarray for e in e2]))
## Retrieve and output
######################
# Assert correct retrieving
## Retrieve individual
neighs, dists = ret._retrieve_neighs_general_spec(i, 0, p[5])
# print dists, type(dists), p[5]
assert(type(neighs[0][0]) in inttypes)
assert(dists is None or not p[5] is False)
# print dists, type(dists), p[5]
## Output map
neighs2, dists2 = ret._output_map[0](ret, i, (neighs, dists))
assert(type(neighs2[0][0]) in inttypes)
assert(dists2 is None or not p[5] is False)
## Output
# print neighs, dists
neighs, dists = ret._format_output(i, neighs, dists)
if auto_excl and not auto_excluded:
# print neighs, dists, ret._exclude_auto, i, counter
assert(len(neighs) == 1)
assert(len(neighs[0]) == 0)
# assert(type(neighs[0][0]) in inttypes)
assert(dists is None or not p[5] is False)
else:
assert(type(neighs[0][0]) in inttypes)
assert(dists is None or not p[5] is False)
neighs, dists = ret._retrieve_neighs_general_spec(i, 0, p[5])
# print dists, type(dists), p[5]
assert(type(neighs[0][0]) in inttypes)
assert(dists is None or not p[5] is False)
# print dists, type(dists), p[5]
## Output map
neighs2, dists2 = ret._output_map[0](ret, i, (neighs, dists))
assert(type(neighs2[0][0]) in inttypes)
assert(dists2 is None or not p[5] is False)
## Output
# print neighs, dists
neighs, dists = ret._format_output(i, neighs, dists)
if auto_excl and not auto_excluded:
# print neighs, dists, ret._exclude_auto, i, counter
assert(len(neighs) == 1)
assert(len(neighs[0]) == 0)
# assert(type(neighs[0][0]) in inttypes)
assert(dists is None or not p[5] is False)
else:
assert(type(neighs[0][0]) in inttypes)
assert(dists is None or not p[5] is False)
## Retrieve multiple
neighs, dists = ret._retrieve_neighs_general_spec(j, 0, p[5])
assert(type(neighs[0][0]) in inttypes)
assert(dists is None or not p[5] is False)
# print neighs, p, counter
# print ret.staticneighs, type(neighs[0][0])
## Output map
neighs2, dists2 = ret._output_map[0](ret, i, (neighs, dists))
assert(type(neighs2[0][0]) in inttypes)
assert(dists2 is None or not p[5] is False)
## Output
neighs, dists = ret._format_output(j, neighs, dists)
if auto_excl and not auto_excluded:
assert(len(neighs) == 2)
assert(len(neighs[0]) == 0)
assert(len(neighs[1]) == 0)
# assert(type(neighs[0][0]) in inttypes)
assert(dists is None or not p[5] is False)
else:
assert(type(neighs[0][0]) in inttypes)
assert(dists is None or not p[5] is False)
if p[3]:
neighs_info = ret.retrieve_neighs(i)
neighs_info.get_information()
neighs_info = ret[i]
neighs_info.get_information()
else:
neighs_info = ret.retrieve_neighs(i, p[3])
neighs_info.get_information()
if np.random.random() < 0.1:
len(ret)
ret.export_neighs_info()
if not ret._heterogenous_input:
ret._n0
if not ret._heterogenous_output:
ret._n1
ret.shape
ret.data_input
ret.data_output
if ret.k_perturb == 0:
k_option = 1
else:
k_options = [-1, 100]
k_option = k_options[np.random.randint(0, 2)]
if np.random.random() < 0.1:
## Iterations
ret.set_iter()
for iss, nei in ret:
break
if not ret._constant_ret:
## The k has to be between 0 and k_perturb+1
try:
boolean = False
ret.retrieve_neighs(0, k=k_option)
boolean = True
raise Exception("It has to halt here.")
except:
if boolean:
raise Exception("It has to halt here.")
try:
boolean = False
ret._map_perturb(k_option)
boolean = True
raise Exception("It has to halt here.")
except:
if boolean:
raise Exception("It has to halt here.")
#### Special cases
ret = DummyRetriever(n, constant_info=True, bool_input_idx=True,
preferable_input_idx=True)
net = ret.compute_neighnet(mapper=0)
net = ret.compute_neighnet(mapper=0, datavalue=1.)
ret = DummyRetriever(n, constant_info=True, bool_input_idx=True,
preferable_input_idx=True, ifdistance=False)
net = ret.compute_neighnet(mapper=0)
###########################################################################
######### Preparation parameters for general testing
## Perturbations
k_perturb1, k_perturb2, k_perturb3 = 5, 10, 3
k_perturb4 = k_perturb1+k_perturb2+k_perturb3
## Create perturbations
reind = np.vstack([np.random.permutation(n) for i in range(k_perturb1)])
perturbation1 = PermutationPerturbation(reind.T)
perturbation2 = NonePerturbation(k_perturb2)
perturbation3 = JitterLocations(0.2, k_perturb3)
perturbation4 = [perturbation1, perturbation2, perturbation3]
pos_perturbations = [None, perturbation1, perturbation2, perturbation3,
perturbation4]
_input_map = lambda s, i: i
_output_map = [lambda s, i, x: x]
pos_ifdistance = [True, False]
pos_inmap = [None, _input_map]
pos_constantinfo = [True, False, None]
pos_boolinidx = [True, False]
def assert_correctneighs(neighs_info, ifdistance, constant, staticneighs,
ks, iss):
iss = [iss] if type(iss) == int else iss
assert(type(neighs_info.iss) == list)
assert(neighs_info.staticneighs == staticneighs)
if not staticneighs:
# print neighs_info.ks, ks
assert(type(neighs_info.ks) == list)
assert(neighs_info.ks == ks)
if ifdistance:
assert(neighs_info.sp_relative_pos is not None)
else:
assert(neighs_info.sp_relative_pos is None)
# print neighs_info.iss, iss, neighs_info.staticneighs
assert(neighs_info.iss == iss)
###########################################################################
#### KRetriever
##################
pos_inforet = [1, 2, 5, 10]
pos_outmap = [None, _output_map]
pos_autoexclude = [False, True]
pos_pars_ret = [None, 1000]
pos = [pos_inforet, pos_ifdistance, pos_inmap, pos_outmap,
pos_constantinfo, pos_boolinidx, pos_perturbations, pos_autoexclude]
for p in product(*pos):
## Random
pret = pos_pars_ret[np.random.randint(0, len(pos_pars_ret))]
## Instantiation
ret = KRetriever(locs, info_ret=p[0], ifdistance=p[1], input_map=p[2],
output_map=p[3], constant_info=p[4], autoexclude=p[7],
bool_input_idx=p[5], perturbations=p[6],
pars_ret=pret)
# print p
## Selecting point_i
if p[5] is False:
i = locs[0]
j = [locs[0], locs[1]]
else:
i = 0
j = [0, 1]
## Get Information
################
# Assert information getting
info_i, info_i2 = ret._get_info_i(i, {}), ret._get_info_i(j, {})
# print info_i, info_i2, ret._default_ret_val, p[0], p, ret._get_info_i
if p[0] is None:
assert(info_i == ret._default_ret_val)
assert(info_i2 == ret._default_ret_val)
else:
assert(info_i == p[0])
assert(info_i2 == p[0])
## Get locations
################
loc_i = ret.get_loc_i([i])
assert(len(loc_i) == 1)
assert(type(loc_i[0]) == np.ndarray)
assert(len(loc_i[0].shape) == 1)
assert(all(loc_i[0] == locs[0]))
loc_i = ret.get_loc_i(j)
assert(len(loc_i) == 2)
assert(type(loc_i[0]) == np.ndarray)
assert(type(loc_i[1]) == np.ndarray)
assert(len(loc_i[0].shape) == 1)
assert(len(loc_i[1].shape) == 1)
assert(all(loc_i[0] == locs[0]))
assert(all(loc_i[1] == locs[1]))
## Get indices
################
i_loc = ret.get_indice_i([i], 0)
assert(len(i_loc) == 1)
assert(type(i_loc) == list)
assert(type(i_loc[0]) in inttypes)
assert(i_loc[0] == 0)
i_loc = | |
<gh_stars>1-10
import re
import config
import logging
import sqlite3
from time import mktime
from os.path import isfile
from importlib import reload
from markov import CorpusModel
from telethon import TelegramClient, events
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
USER_RIGHT_LEVEL_BANNED = -1
USER_RIGHT_LEVEL_RESTRICTED = 1
USER_RIGHT_LEVEL_NORMAL = 2
USER_RIGHT_LEVEL_TRUSTED = 3
USER_RIGHT_LEVEL_ADMIN = 4
USER_RIGHT_LEVEL_ROOT = 5
DEFAULT_USER_RIGHT_LEVEL = 2
USER_RIGHT_LEVEL_NAME = {
USER_RIGHT_LEVEL_BANNED: '被封禁用户',
USER_RIGHT_LEVEL_RESTRICTED: '受限用户',
USER_RIGHT_LEVEL_NORMAL: '一般用户',
USER_RIGHT_LEVEL_TRUSTED: '受信任用户',
USER_RIGHT_LEVEL_ADMIN: '管理员',
USER_RIGHT_LEVEL_ROOT: 'root',
}
COMMAND_LIST = (
'/addword_cn',
'/addword_tw',
'/ban',
'/clddbg',
'/cutdbg',
'/erase',
'/grantadmin',
'/grantnormal',
'/granttrusted',
'/policy',
'/reload',
'/reload_config',
'/restrict',
'/rmword_cn',
'/rmword_tw',
'/source',
'/start',
'/userweight',
)
conn = sqlite3.connect(config.dbfile)
cursor = conn.cursor()
if config.proxy:
import socks
bot = TelegramClient(config.session_name, config.api_id, config.api_hash,
proxy=(socks.SOCKS5, config.proxy_ip, config.proxy_port)).start(bot_token=config.bot_token)
else:
bot = TelegramClient(config.session_name, config.api_id, config.api_hash).start(bot_token=config.bot_token)
bot_name = config.bot_name
escaped_bot_name = re.escape(bot_name)
logging.info('Initializing corpus model...')
model = CorpusModel()
if isfile(config.dbfile):
logging.info('Loading corpora from db file...')
model.load_db(config.dbfile)
elif isfile('./lines.txt'):
logging.info('Loading corpora from txt file...')
model.load('./lines.txt')
elif isfile('./corpora.json'):
logging.info('Loading corpora from json file...')
model.load_json('./corpora.json')
else:
logging.info('Corpora file not found. Starting from scratch.')
get_line_weight = None
try:
get_line_weight = config.get_line_weight
except AttributeError:
logging.info('`get_line_weight` not found in config, so weights are set to 1.0')
get_line_weight = lambda line: 1.0
def add_user(user_tgid, user_name='', user_right=DEFAULT_USER_RIGHT_LEVEL, user_weight=1.):
cursor.execute("""
INSERT OR IGNORE INTO user (user_tgid, user_name, user_right, user_weight)
VALUES (?,?,?,?)
""", (user_tgid, user_name, user_right, user_weight))
conn.commit()
def find_user(user_tgid, user_name='', user_right=DEFAULT_USER_RIGHT_LEVEL, user_weight=1.):
# return: user_id, will insert if not exist
add_user(user_tgid, user_name, user_right, user_weight)
cursor.execute("SELECT user_id FROM user WHERE user_tgid = ?", (user_tgid,))
rst, = cursor.fetchone()
return rst
def update_user(user_tgid, user_name='', user_right=DEFAULT_USER_RIGHT_LEVEL, user_weight=1.):
user_id = find_user(user_tgid, user_name, user_right, user_weight)
cursor.execute("""
UPDATE user SET user_name = ?, user_right = ?, user_weight = ?
WHERE user_id = ?
""", (user_name, user_right, user_weight, user_id))
conn.commit()
def get_user_name(user_tgid):
user_id = find_user(user_tgid)
cursor.execute("SELECT user_name FROM user WHERE user_id = ?", (user_id,))
rst, = cursor.fetchone()
return rst or ''
def get_user_right(user_tgid):
user_id = find_user(user_tgid)
cursor.execute("SELECT user_right FROM user WHERE user_id = ?", (user_id,))
rst, = cursor.fetchone()
return rst or DEFAULT_USER_RIGHT_LEVEL
def set_user_right(user_tgid, new_right):
user_id = find_user(user_tgid)
cursor.execute("UPDATE user SET user_right = ? WHERE user_id = ?", (new_right, user_id))
conn.commit()
def get_user_weight(user_tgid):
user_id = find_user(user_tgid)
cursor.execute("SELECT user_weight FROM user WHERE user_id = ?", (user_id,))
rst, = cursor.fetchone()
return rst or 1.
def set_user_weight(user_tgid, new_weight):
user_id = find_user(user_tgid)
cursor.execute("UPDATE user SET user_weight = ? WHERE user_id = ?", (new_weight, user_id))
conn.commit()
def is_banned(user_tgid):
return get_user_right(user_tgid) <= USER_RIGHT_LEVEL_BANNED
def chat_is_allowed(chat_id):
# we allow all PMs here
return chat_id > 0 or chat_id in config.chat_ids
def add_chat(chat_tgid, chat_name=''):
cursor.execute("""
INSERT OR IGNORE INTO chat (chat_tgid, chat_name)
VALUES (?,?)
""", (chat_tgid, chat_name))
conn.commit()
def find_chat(chat_tgid, chat_name=''):
# return: chat_id, will insert if not exist
add_chat(chat_tgid, chat_name)
cursor.execute("SELECT chat_id FROM chat WHERE chat_tgid = ?", (chat_tgid,))
rst, = cursor.fetchone()
return rst
LOG_TEMPLATES = {
'pm': '[{userid}](tg://user?id={userid}) ({username}) sent a pm.',
'erase': '[{userid}](tg://user?id={userid}) ({username}) erased {linecount} line(s) in [{chatid}](https://t.me/c/{chatid}/{msgid}):\n{lines}',
'right': '[{userid}](tg://user?id={userid}) ({username}) changed rights of [{targetid}](tg://user?id={targetid}) ({targetname}) from {right_old} to {right_new} in [{chatid}](https://t.me/c/{chatid}/{msgid}).',
'userweight': '[{userid}](tg://user?id={userid}) ({username}) changed weight of [{targetid}](tg://user?id={targetid}) ({targetname}) from {weight_old} to {weight_new} in [{chatid}](https://t.me/c/{chatid}/{msgid}).',
'lineweight': '[{userid}](tg://user?id={userid}) ({username}) changed weight of the following line(s) from {weight_old} to {weight_new} in [{chatid}](https://t.me/c/{chatid}/{msgid}).\n{lines}',
'addword': '[{userid}](tg://user?id={userid}) ({username}) added the following word(s) for {lang} in [{chatid}](https://t.me/c/{chatid}/{msgid}):\n{words}',
'rmword': '[{userid}](tg://user?id={userid}) ({username}) removed the following word(s) for {lang} in [{chatid}](https://t.me/c/{chatid}/{msgid}):\n{words}',
}
async def log_in_chat(log_type, fwd_msgs=None, **kwargs):
'''
log_type: pm, erase, right, userweight, lineweight, addword
fwd_msgs: telethon Message(s) object
'''
try:
log_chat_id = config.log_chat_id
if not log_chat_id:
return
except AttributeError:
return
# for some reason, message links with chat id like -100xxxxxx does not work
if kwargs.get('chatid'):
chatid = str(kwargs['chatid'])
if chatid.startswith('-100'):
kwargs['chatid'] = int(chatid[4:])
try:
log_text = (f'#{log_type}\n'
f'{LOG_TEMPLATES.get(log_type, "").format(**kwargs)}')
except KeyError:
log_text = (f'#{log_type}\n'
f'An error occured when trying to log. See the following kwargs:\n'
f'{str(kwargs)}')
await bot.send_message(log_chat_id, log_text, parse_mode='md')
if fwd_msgs:
await bot.forward_messages(log_chat_id, fwd_msgs)
async def parse(event, cmd='', use_reply=False):
# parse the command from messages
text = ''
if use_reply and event.message.reply_to_msg_id:
# Use the replied message first
reply_to_msg = await event.message.get_reply_message()
# For stickers: use the emoji
if reply_to_msg.sticker:
try:
text = reply_to_msg.media.document.attributes[1].alt or ''
except (AttributeError, IndexError) as e:
text = ''
text = reply_to_msg.message or reply_to_msg.raw_text
if not text:
# Don't use replied message
text = event.message.message or event.raw_text
else:
# Sticker emoji
if event.message.sticker:
text = event.message.file.emoji
else:
# Text and image caption
text = event.message.message or event.raw_text
if cmd and text[:len(cmd)] == cmd:
# strike command from the text (based on prefix match)
try:
text = text.split(' ', 1)[1]
except IndexError:
text = ''
return text
@bot.on(events.NewMessage(incoming=True, pattern=rf'^/reload_config($|\s|@{escaped_bot_name})'))
async def reload_config(event):
global get_line_weight
if not chat_is_allowed(event.chat_id) or is_banned(event.sender_id):
return
sender_id = event.sender_id
user_right = get_user_right(sender_id)
if user_right < USER_RIGHT_LEVEL_ROOT:
await event.respond(f'❌ 此操作需要 {USER_RIGHT_LEVEL_NAME[USER_RIGHT_LEVEL_ROOT]} 权限,'
f'您的权限是 {USER_RIGHT_LEVEL_NAME[user_right]}。')
return
reload(config)
try:
get_line_weight = config.get_line_weight
except AttributeError:
logging.info('`get_line_weight` not found in config, so weights are set to 1.0')
get_line_weight = lambda line: 1.0
await event.respond('✅ 已重新载入配置文件。')
@bot.on(events.NewMessage(incoming=True, pattern=rf'^/reload($|\s|@{escaped_bot_name})'))
async def reload_right(event):
if not chat_is_allowed(event.chat_id) or is_banned(event.sender_id):
return
chat_id = event.chat_id
sender_id = event.sender_id
logging.info(f'chat_id: {chat_id}, sender_id: {sender_id}')
chat = await event.get_chat()
sender = await event.get_sender()
if not sender:
return
cursor.execute("SELECT user_name, user_right, user_weight FROM user WHERE user_tgid = ?", (sender_id,))
rst = cursor.fetchone()
cur_name, cur_right, cur_weight = rst or ('', DEFAULT_USER_RIGHT_LEVEL, 1.0)
user_name = cur_name
# we prefer first name + last name, if None we use username
try:
user_name = sender.first_name
if user_name and sender.last_name:
user_name += (' ' + sender.last_name)
elif sender.last_name:
user_name = sender.last_name
else:
user_name = sender.username or ''
except AttributeError:
# maybe sender is indeed a Channel
pass
# determine user right
# once a user is promoted to group admin, they will get bot admin right
## even if they are demoted later
user_right = cur_right
if chat_id in config.admin_chat_ids and cur_right < USER_RIGHT_LEVEL_ADMIN:
permissions = await bot.get_permissions(chat, sender)
if permissions.is_admin:
user_right = USER_RIGHT_LEVEL_ADMIN
if config.user_right_override and config.user_right_override.get(sender_id):
user_right = config.user_right_override[sender_id]
# update results if changed
if cur_name != user_name or cur_right != user_right:
update_user(sender_id, user_name=user_name, user_right=user_right, user_weight=cur_weight)
await event.respond(f'您好 [{user_name or sender_id}](tg://user?id={sender_id}),'
f'您当前的权限是 {USER_RIGHT_LEVEL_NAME[user_right]}。', parse_mode='md')
async def handle_set_right(event, new_right):
chat_id = event.chat_id
sender_id = event.sender_id
# only usable in groups
if event.chat_id > 0 or not chat_is_allowed(event.chat_id) or is_banned(event.sender_id):
return
user_right = get_user_right(sender_id)
if user_right < USER_RIGHT_LEVEL_ADMIN:
await event.respond(f'❌ 此操作需要 {USER_RIGHT_LEVEL_NAME[USER_RIGHT_LEVEL_ADMIN]} 权限,'
f'您的权限是 {USER_RIGHT_LEVEL_NAME[user_right]}。\n'
f'如果您已成为特定群的群管,可使用 /reload 指令刷新权限。')
return
target_tgid = 0
if event.message.reply_to_msg_id:
# Use the replied user as target first
reply_to_msg = await event.message.get_reply_message()
try:
target_tgid = reply_to_msg.from_id.user_id
except:
pass
if not target_tgid:
text = event.message.message or event.raw_text
try:
target_tgid = int(text.split(' ', 1)[1])
except (IndexError, ValueError):
pass
if not target_tgid:
await event.respond(f'❌ 未找到目标 id。')
return
target_right = get_user_right(target_tgid)
if (new_right == USER_RIGHT_LEVEL_ROOT or target_right == USER_RIGHT_LEVEL_ROOT) and user_right < USER_RIGHT_LEVEL_ROOT:
await event.respond(f'❌ 此操作需要 {USER_RIGHT_LEVEL_NAME[USER_RIGHT_LEVEL_ROOT]} 权限,'
f'您的权限是 {USER_RIGHT_LEVEL_NAME[user_right]}。')
return
if new_right == target_right:
await event.respond('目标用户已经是该权限,无事发生。')
return
if target_right == USER_RIGHT_LEVEL_ADMIN:
await event.respond(f'⚠️ 目标用户的权限是 {USER_RIGHT_LEVEL_NAME[USER_RIGHT_LEVEL_ADMIN]},'
f'希望您不是在打管理战。管理操作均留有日志,如有滥权行为,请向操作者报告。')
set_user_right(target_tgid, new_right)
user_name = get_user_name(sender_id) or sender_id
target_name = get_user_name(target_tgid) or target_tgid
await log_in_chat('right', fwd_msgs=event.message, username=user_name, userid=sender_id,
targetname=target_name, targetid=target_tgid, right_old=target_right, right_new=new_right,
chatid=chat_id, msgid=event.message.id)
await event.respond(f'✅ [{target_tgid}](tg://user?id={target_tgid}) 的权限已从 {USER_RIGHT_LEVEL_NAME[target_right]} 变更为 {USER_RIGHT_LEVEL_NAME[new_right]}。')
@bot.on(events.NewMessage(incoming=True, pattern=rf'^/ban($|\s|@{escaped_bot_name})'))
async def ban(event):
await handle_set_right(event, USER_RIGHT_LEVEL_BANNED)
@bot.on(events.NewMessage(incoming=True, pattern=rf'^/restrict($|\s|@{escaped_bot_name})'))
async def restrict(event):
await handle_set_right(event, USER_RIGHT_LEVEL_RESTRICTED)
@bot.on(events.NewMessage(incoming=True, pattern=rf'^/grantnormal($|\s|@{escaped_bot_name})'))
async def grantnormal(event):
await handle_set_right(event, USER_RIGHT_LEVEL_NORMAL)
@bot.on(events.NewMessage(incoming=True, pattern=rf'^/granttrusted($|\s|@{escaped_bot_name})'))
async def granttrusted(event):
await handle_set_right(event, USER_RIGHT_LEVEL_TRUSTED)
@bot.on(events.NewMessage(incoming=True, pattern=rf'^/grantadmin($|\s|@{escaped_bot_name})'))
async def grantadmin(event):
await handle_set_right(event, USER_RIGHT_LEVEL_ADMIN)
@bot.on(events.NewMessage(incoming=True, pattern=rf'^/userweight($|\s|@{escaped_bot_name})'))
async def userweight(event):
chat_id = event.chat_id
sender_id = event.sender_id
# only usable in groups
if event.chat_id > 0 or not chat_is_allowed(event.chat_id) or is_banned(event.sender_id):
return
user_right = get_user_right(sender_id)
if user_right < USER_RIGHT_LEVEL_ADMIN:
await event.respond(f'❌ 此操作需要 {USER_RIGHT_LEVEL_NAME[USER_RIGHT_LEVEL_ADMIN]} 权限,'
f'您的权限是 {USER_RIGHT_LEVEL_NAME[user_right]}。\n'
f'如果您已成为特定群的群管,可使用 /reload 指令刷新权限。')
return
target_tgid, new_weight = 0, None
text = await parse(event, cmd='/userweight')
if event.message.reply_to_msg_id:
# Use the replied user as target first
reply_to_msg = await event.message.get_reply_message()
try:
target_tgid = reply_to_msg.from_id.user_id
new_weight = float(text)
except:
pass
if not target_tgid or new_weight is None:
try:
target_tgid_str, new_weight_str = text.split(' ', 1)
target_tgid = int(target_tgid_str)
new_weight = float(new_weight_str)
except (IndexError, ValueError):
pass
if not target_tgid or new_weight is None:
await event.respond(f'❌ 未找到目标 id 或指定的权重无效。用法:/userweight <用户id> <新的权重>,或者回复目标并使用 /userweight <新的权重>')
return
target_right = get_user_right(target_tgid)
if target_right == USER_RIGHT_LEVEL_ROOT and user_right < USER_RIGHT_LEVEL_ROOT:
await event.respond(f'❌ 此操作需要 {USER_RIGHT_LEVEL_NAME[USER_RIGHT_LEVEL_ROOT]} 权限,'
f'您的权限是 {USER_RIGHT_LEVEL_NAME[user_right]}。')
return
if target_right == USER_RIGHT_LEVEL_ADMIN:
await event.respond(f'⚠️ 目标用户的权限是 {USER_RIGHT_LEVEL_NAME[USER_RIGHT_LEVEL_ADMIN]},'
f'希望您不是在打管理战。管理操作均留有日志,如有滥权行为,请向操作者报告。')
cur_weight = get_user_weight(target_tgid)
if cur_weight == new_weight:
await event.respond(f'目标用户权重已经是 {cur_weight},无事发生。')
return
set_user_weight(target_tgid, new_weight)
user_name = get_user_name(sender_id) or sender_id
target_name = get_user_name(target_tgid) or target_tgid
await log_in_chat('userweight', fwd_msgs=event.message, username=user_name, userid=sender_id,
targetname=target_name, | |
= np.linalg.inv(H_body_lb3)
# Get gt files and days
gt_files = np.sort([gt_f for gt_f in listdir(join(data_path, gt_folder)) if gt_f[-4:] == '.csv'])
cov_files = np.sort([cov_f for cov_f in listdir(join(data_path, cov_folder)) if cov_f[-4:] == '.csv'])
days = [d[:-4].split('_')[1] for d in gt_files]
# Load all gt poses
print('\nLoading days groundtruth poses...')
t0 = time.time()
gt_H = []
gt_t = []
for d, gt_f in enumerate(gt_files):
t1 = time.time()
gt_pkl_file = join(data_path, gt_folder, gt_f[:-4] + '.pkl')
if exists(gt_pkl_file):
# Read pkl
with open(gt_pkl_file, 'rb') as f:
day_gt_t, day_gt_H = pickle.load(f)
else:
# File paths
gt_csv = join(data_path, gt_folder, gt_f)
# Load gt
gt = np.loadtxt(gt_csv, delimiter=',')
# Convert gt to homogenous rotation/translation matrix
day_gt_t = gt[:, 0]
day_gt_H = ssc_to_homo(gt[:, 1:])
# Save pickle
with open(gt_pkl_file, 'wb') as f:
pickle.dump([day_gt_t, day_gt_H], f)
t2 = time.time()
print('{:s} {:d}/{:d} Done in {:.1f}s'.format(gt_f, d, gt_files.shape[0], t2 - t1))
gt_t += [day_gt_t]
gt_H += [day_gt_H]
if show_day_trajectory:
cov_csv = join(data_path, cov_folder, cov_files[d])
cov = np.loadtxt(cov_csv, delimiter=',')
t_cov = cov[:, 0]
t_cov_bool = np.logical_and(t_cov > np.min(day_gt_t), t_cov < np.max(day_gt_t))
t_cov = t_cov[t_cov_bool]
# Note: Interpolation is not needed, this is done as a convinience
interp = scipy.interpolate.interp1d(day_gt_t, day_gt_H[:, :3, 3], kind='nearest', axis=0)
node_poses = interp(t_cov)
plt.figure()
plt.scatter(day_gt_H[:, 1, 3], day_gt_H[:, 0, 3], 1, c=-day_gt_H[:, 2, 3], linewidth=0)
plt.scatter(node_poses[:, 1], node_poses[:, 0], 1, c=-node_poses[:, 2], linewidth=5)
plt.axis('equal')
plt.title('Ground Truth Position of Nodes in SLAM Graph')
plt.xlabel('East (m)')
plt.ylabel('North (m)')
plt.colorbar()
plt.show()
t2 = time.time()
print('Done in {:.1f}s\n'.format(t2 - t0))
# Out files
out_folder = join(data_path, 'day_ply')
if not exists(out_folder):
makedirs(out_folder)
# Focus on a particular point
p0 = np.array([-220, -527, 12])
center_radius = 10.0
point_radius = 50.0
# Loop on days
for d, day in enumerate(days):
#if day != '2012-02-05':
# continue
day_min_t = gt_t[d][0]
day_max_t = gt_t[d][-1]
frames_folder = join(data_path, 'frames_ply', day)
f_times = np.sort([float(f[:-4]) for f in listdir(frames_folder) if f[-4:] == '.ply'])
# If we want, load only SLAM nodes
if only_SLAM_nodes:
# Load node timestamps
cov_csv = join(data_path, cov_folder, cov_files[d])
cov = np.loadtxt(cov_csv, delimiter=',')
t_cov = cov[:, 0]
t_cov_bool = np.logical_and(t_cov > day_min_t, t_cov < day_max_t)
t_cov = t_cov[t_cov_bool]
# Find closest lidar frames
t_cov = np.expand_dims(t_cov, 1)
diffs = np.abs(t_cov - f_times)
inds = np.argmin(diffs, axis=1)
f_times = f_times[inds]
# Is this frame in gt
f_t_bool = np.logical_and(f_times > day_min_t, f_times < day_max_t)
f_times = f_times[f_t_bool]
# Interpolation gt poses to frame timestamps
interp = scipy.interpolate.interp1d(gt_t[d], gt_H[d], kind='nearest', axis=0)
frame_poses = interp(f_times)
N = len(f_times)
world_points = []
world_frames = []
world_frames_c = []
print('Reading', day, ' => ', N, 'files')
for f_i, f_t in enumerate(f_times):
t1 = time.time()
#########
# GT pose
#########
H = frame_poses[f_i].astype(np.float32)
# s = '\n'
# for cc in H:
# for c in cc:
# s += '{:5.2f} '.format(c)
# s += '\n'
# print(s)
#############
# Focus check
#############
if np.linalg.norm(H[:3, 3] - p0) > center_radius:
continue
###################################
# Local frame coordinates for debug
###################################
# Create artificial frames
x = np.linspace(0, 1, 50, dtype=np.float32)
points = np.hstack((np.vstack((x, x*0, x*0)), np.vstack((x*0, x, x*0)), np.vstack((x*0, x*0, x)))).T
colors = ((points > 0.1).astype(np.float32) * 255).astype(np.uint8)
hpoints = np.hstack((points, np.ones_like(points[:, :1])))
hpoints = np.matmul(hpoints, H.T)
hpoints[:, 3] *= 0
world_frames += [hpoints[:, :3]]
world_frames_c += [colors]
#######################
# Load velo point cloud
#######################
# Load frame ply file
f_name = '{:.0f}.ply'.format(f_t)
data = read_ply(join(frames_folder, f_name))
points = np.vstack((data['x'], data['y'], data['z'])).T
#intensity = data['intensity']
hpoints = np.hstack((points, np.ones_like(points[:, :1])))
hpoints = np.matmul(hpoints, H.T)
hpoints[:, 3] *= 0
hpoints[:, 3] += np.sqrt(f_t - f_times[0])
# focus check
focus_bool = np.linalg.norm(hpoints[:, :3] - p0, axis=1) < point_radius
hpoints = hpoints[focus_bool, :]
world_points += [hpoints]
t2 = time.time()
print('File {:s} {:d}/{:d} Done in {:.1f}s'.format(f_name, f_i, N, t2 - t1))
if len(world_points) < 2:
continue
world_points = np.vstack(world_points)
###### DEBUG
world_frames = np.vstack(world_frames)
world_frames_c = np.vstack(world_frames_c)
write_ply('testf.ply',
[world_frames, world_frames_c],
['x', 'y', 'z', 'red', 'green', 'blue'])
###### DEBUG
print(world_points.shape, world_points.dtype)
# Subsample merged frames
# world_points, features = grid_subsampling(world_points[:, :3],
# features=world_points[:, 3:],
# sampleDl=0.1)
features = world_points[:, 3:]
world_points = world_points[:, :3]
print(world_points.shape, world_points.dtype)
write_ply('test' + day + '.ply',
[world_points, features],
['x', 'y', 'z', 't'])
# Generate gt annotations
# Subsample day ply (for visualization)
# Save day ply
# a = 1/0
def local_PCA(points):
# Compute the barycenter
center = np.mean(points, axis=0)
# Centered clouds
points_c = points - center
# Covariance matrix
C = (points_c.T).dot(points_c) / points.shape[0]
# Eigenvalues
return np.linalg.eigh(C)
def estimate_normals_planarity_debug(cloud):
"""
Custom function that estimates normals and planarity of lidar frames, using polar coordinates neighborhoods.
:param cloud: Open3D PointCloud.
:return: planarities (Normals are modified in place)
"""
# Rescale for numerical stability
#
t = [time.time()]
# Get point cloud
points = cloud.astype(np.float32)
normals0, planarity, linearity = polar_normals(points.astype(np.float32), verbose=1)
scores0 = planarity + linearity
t += [time.time()]
print(normals0.dtype, normals0.shape)
print(scores0.dtype, scores0.shape)
# Transform to polar coordinates
polar_points = cart2pol(points)
t += [time.time()]
# Define search radius in l1 metric. Vertical angular resolution of HDL32 is 1.29
angular_res = 1.29 * np.pi / 180
polar_r = 1.5 * angular_res
# Define horizontal scale (smaller distance for the neighbor in horizontal direction)
horizontal_scale = 0.5
# Use log of range so that neighbor radius is proportional to the range.
range_scale = 4.0
polar_points[:, 0] = np.log(polar_points[:, 0]) * polar_r / (np.log((1 + polar_r) / (1 - polar_r)) * range_scale)
# Apply horizontal scale
polar_points[:, 2] *= 1 / horizontal_scale
t += [time.time()]
# Create 2d KDTree to search lidar neighborhoods
polar_tree = KDTree(polar_points, metric='l2')
t += [time.time()]
# Find neighbors
all_neighb_inds = polar_tree.query_radius(polar_points, polar_r)
t += [time.time()]
# Rescale everything
# polar_points[:, 2] *= horizontal_scale
# polar_points[:, 0] = np.exp(polar_points[:, 0] * np.log((1 + polar_r) / (1 - polar_r)) * range_scale / polar_r)
# Compute covariance matrices
all_eigenvalues = np.empty(polar_points.shape, dtype=np.float32)
all_eigenvectors = np.empty((polar_points.shape[0], 3, 3), dtype=np.float32)
for i, neighb_inds in enumerate(all_neighb_inds):
all_eigenvalues[i, :], all_eigenvectors[i, :, :] = local_PCA(points[neighb_inds, :])
t += [time.time()]
# Compute normals and planarity
normals = all_eigenvectors[:, :, 0]
sphericity = 1 -all_eigenvalues[:, 0] / (all_eigenvalues[:, 2] + 1e-9)
t += [time.time()]
# Choose random point for showing
rand_inds = np.random.randint(polar_points.shape[0], size=100)
features = np.zeros_like(polar_points[:, 2])
for ri, rand_id in enumerate(rand_inds):
features[all_neighb_inds[rand_id]] = ri
features[rand_id] = 2 * len(rand_inds)
write_ply('ttt_xyz.ply',
[points, normals, features, sphericity, scores0],
['x', 'y', 'z', 'nx', 'ny', 'nz', 'f', 'score', 'cpp_score'])
# polar_points[:, 1] *= 180 / np.pi
# polar_points[:, 2] *= 180 / np.pi
#polar_points[:, 0] = np.exp(polar_points[:, 0] * np.log((1 + polar_r) / (1 - polar_r)) * range_scale / polar_r)
polar_points = polar_points[:, [2, 1, 0]]
write_ply('ttt_rtp.ply',
[polar_points, polar_points[:, 1] * 0, features],
['x', 'y', 'z', 'i', 'f'])
# Filter outlier from ray/edges
# Assign normals to pointcloud structure
#cloud.normals = o3d.utility.Vector3dVector(normals)
t += [time.time()]
# Display timings
print('\n*****************\n')
print('Validation timings:')
i = 0
print('C++ ....... {:.1f}s'.format(1000 * (t[i + 1] - t[i])))
i += 1
print('polar ..... {:.1f}s'.format(1000 * (t[i + 1] - t[i])))
i += 1
print('scale ... {:.1f}s'.format(1000 * (t[i + 1] - t[i])))
i += 1
print('Tree ...... {:.1f}s'.format(1000 * (t[i + 1] - t[i])))
i += 1
print('neighb .... {:.1f}s'.format(1000 * (t[i + 1] - t[i])))
i += 1
print('PCA ...... {:.1f}s'.format(1000 * (t[i + 1] - t[i])))
i += 1
print('features . {:.1f}s'.format(1000 * (t[i + 1] - t[i])))
i += 1
print('Assign ... {:.1f}s'.format(1000 * (t[i + 1] - t[i])))
print('\n*****************\n')
return sphericity
def estimate_normals_planarity(cloud):
"""
Custom function that estimates normals and planarity of lidar frames, using polar coordinates neighborhoods.
:param cloud: Open3D PointCloud.
:return: planarities (Normals are modified in place)
"""
# Get point cloud
points = np.asarray(cloud.points)
normals, planarity, linearity = polar_normals(points.astype(np.float32), verbose=1)
# Assign normals to pointcloud structure
cloud.normals = o3d.utility.Vector3dVector(normals)
return scores
def gaussian_conv_filter(dimension=3, size=5):
# Sigma according to size
sig = (size/2 - 0.5) / 2
eps = 1e-6
# Get coordinates
coords = np.arange(-size/2 + 0.5, size/2, 1.0)
if dimension == 2:
x, y = | |
<reponame>bpinsard/nipy
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Utilities for extracting masks from EPI images and applying them to time
series.
"""
from __future__ import absolute_import
import math
# Major scientific libraries imports
import numpy as np
from scipy import ndimage
# Neuroimaging libraries imports
from nibabel import load, nifti1, save
from ..io.nibcompat import get_header, get_affine, get_unscaled_data
from ..externals.six import string_types
###############################################################################
# Operating on connect component
###############################################################################
def largest_cc(mask):
""" Return the largest connected component of a 3D mask array.
Parameters
-----------
mask: 3D boolean array
3D array indicating a mask.
Returns
--------
mask: 3D boolean array
3D array indicating a mask, with only one connected component.
"""
# We use asarray to be able to work with masked arrays.
mask = np.asarray(mask)
labels, label_nb = ndimage.label(mask)
if not label_nb:
raise ValueError('No non-zero values: no connected components')
if label_nb == 1:
return mask.astype(np.bool)
label_count = np.bincount(labels.ravel().astype(np.int))
# discard 0 the 0 label
label_count[0] = 0
return labels == label_count.argmax()
def threshold_connect_components(map, threshold, copy=True):
""" Given a map with some coefficients set to zero, segment the
connect components with number of voxels smaller than the
threshold and set them to 0.
Parameters
----------
map: ndarray,
The spatial map to segment
threshold: scalar,
The minimum number of voxels to keep a cluster.
copy: bool, optional
If copy is false, the input array is modified inplace
Returns
-------
map: ndarray,
the map with connected components removed
"""
labels, _ = ndimage.label(map)
weights = np.bincount(labels.ravel())
if copy:
map = map.copy()
for label, weight in enumerate(weights):
if label == 0:
continue
if weight < threshold:
map[labels == label] = 0
return map
###############################################################################
# Utilities to calculate masks
###############################################################################
def compute_mask_files(input_filename, output_filename=None,
return_mean=False, m=0.2, M=0.9, cc=1,
exclude_zeros=False, opening=2):
"""
Compute a mask file from fMRI nifti file(s)
Compute and write the mask of an image based on the grey level
This is based on an heuristic proposed by T.Nichols:
find the least dense point of the histogram, between fractions
m and M of the total image histogram.
In case of failure, it is usually advisable to increase m.
Parameters
----------
input_filename : string
nifti filename (4D) or list of filenames (3D).
output_filename : string or None, optional
path to save the output nifti image (if not None).
return_mean : boolean, optional
if True, and output_filename is None, return the mean image also, as
a 3D array (2nd return argument).
m : float, optional
lower fraction of the histogram to be discarded.
M: float, optional
upper fraction of the histogram to be discarded.
cc: boolean, optional
if cc is True, only the largest connect component is kept.
exclude_zeros: boolean, optional
Consider zeros as missing values for the computation of the
threshold. This option is useful if the images have been
resliced with a large padding of zeros.
opening: int, optional
Size of the morphological opening performed as post-processing
Returns
-------
mask : 3D boolean array
The brain mask
mean_image : 3d ndarray, optional
The main of all the images used to estimate the mask. Only
provided if `return_mean` is True.
"""
if isinstance(input_filename, string_types):
# One single filename or image
nim = load(input_filename) # load the image from the path
vol_arr = get_unscaled_data(nim)
header = get_header(nim)
affine = get_affine(nim)
if vol_arr.ndim == 4:
if isinstance(vol_arr, np.memmap):
# Get rid of memmapping: it is faster.
mean_volume = np.array(vol_arr, copy=True).mean(axis=-1)
else:
mean_volume = vol_arr.mean(axis=-1)
# Make a copy, to avoid holding a reference on the full array,
# and thus polluting the memory.
first_volume = vol_arr[..., 0].copy()
elif vol_arr.ndim == 3:
mean_volume = first_volume = vol_arr
else:
raise ValueError('Need 4D file for mask')
del vol_arr
else:
# List of filenames
if len(list(input_filename)) == 0:
raise ValueError('input_filename should be a non-empty '
'list of file names')
# We have several images, we do mean on the fly,
# to avoid loading all the data in the memory
# We do not use the unscaled data here?:
# if the scalefactor is being used to record real
# differences in intensity over the run this would break
for index, filename in enumerate(input_filename):
nim = load(filename)
if index == 0:
first_volume = nim.get_data().squeeze()
mean_volume = first_volume.copy().astype(np.float32)
header = get_header(nim)
affine = get_affine(nim)
else:
mean_volume += nim.get_data().squeeze()
mean_volume /= float(len(list(input_filename)))
del nim
if np.isnan(mean_volume).any():
tmp = mean_volume.copy()
tmp[np.isnan(tmp)] = 0
mean_volume = tmp
mask = compute_mask(mean_volume, first_volume, m, M, cc,
opening=opening, exclude_zeros=exclude_zeros)
if output_filename is not None:
header['descrip'] = 'mask'
output_image = nifti1.Nifti1Image(mask.astype(np.uint8),
affine=affine,
header=header)
save(output_image, output_filename)
if not return_mean:
return mask
else:
return mask, mean_volume
def compute_mask(mean_volume, reference_volume=None, m=0.2, M=0.9,
cc=True, opening=2, exclude_zeros=False):
"""
Compute a mask file from fMRI data in 3D or 4D ndarrays.
Compute and write the mask of an image based on the grey level
This is based on an heuristic proposed by T.Nichols:
find the least dense point of the histogram, between fractions
m and M of the total image histogram.
In case of failure, it is usually advisable to increase m.
Parameters
----------
mean_volume : 3D ndarray
mean EPI image, used to compute the threshold for the mask.
reference_volume: 3D ndarray, optional
reference volume used to compute the mask. If none is give, the
mean volume is used.
m : float, optional
lower fraction of the histogram to be discarded.
M: float, optional
upper fraction of the histogram to be discarded.
cc: boolean, optional
if cc is True, only the largest connect component is kept.
opening: int, optional
if opening is larger than 0, an morphological opening is performed,
to keep only large structures. This step is useful to remove parts of
the skull that might have been included.
exclude_zeros: boolean, optional
Consider zeros as missing values for the computation of the
threshold. This option is useful if the images have been
resliced with a large padding of zeros.
Returns
-------
mask : 3D boolean ndarray
The brain mask
"""
if reference_volume is None:
reference_volume = mean_volume
sorted_input = np.sort(mean_volume.reshape(-1))
if exclude_zeros:
sorted_input = sorted_input[sorted_input != 0]
limiteinf = int(math.floor(m * len(sorted_input)))
limitesup = int(math.floor(M * len(sorted_input)))
delta = sorted_input[limiteinf + 1:limitesup + 1] \
- sorted_input[limiteinf:limitesup]
ia = delta.argmax()
threshold = 0.5 * (sorted_input[ia + limiteinf]
+ sorted_input[ia + limiteinf + 1])
mask = (reference_volume >= threshold)
if cc:
mask = largest_cc(mask)
if opening > 0:
mask = ndimage.binary_opening(mask.astype(np.int),
iterations=opening)
return mask.astype(bool)
def compute_mask_sessions(session_images, m=0.2, M=0.9, cc=1, threshold=0.5,
exclude_zeros=False, return_mean=False, opening=2):
""" Compute a common mask for several sessions of fMRI data.
Uses the mask-finding algorithmes to extract masks for each
session, and then keep only the main connected component of the
a given fraction of the intersection of all the masks.
Parameters
----------
session_images : list of (list of strings) or nipy image objects
A list of images/list of nifti filenames. Each inner list/image
represents a session.
m : float, optional
lower fraction of the histogram to be discarded.
M: float, optional
upper fraction of the histogram to be discarded.
cc: boolean, optional
if cc is True, only the largest connect component is kept.
threshold : float, optional
the inter-session threshold: the fraction of the
total number of session in for which a voxel must be in the
mask to be kept in the common mask.
threshold=1 corresponds to keeping the intersection of all
masks, whereas threshold=0 is the union of all masks.
exclude_zeros: boolean, optional
Consider zeros as missing values for the computation of the
threshold. This option is useful if the images have been
resliced with a large padding of zeros.
return_mean: boolean, optional
if return_mean is True, the mean image accross subjects is
returned.
opening: int, optional,
size of the morphological opening
Returns
-------
mask : 3D boolean ndarray
The brain mask
mean : 3D float array
The mean image
"""
mask, mean = None, None
for index, session in enumerate(session_images):
if hasattr(session, 'get_data'):
mean = session.get_data()
if mean.ndim > 3:
mean = | |
not in oval] for B in BIBD]
# We relabel the points to 0,1,2,...
V = [x for x in range(73) if x not in oval]
rel = dict(zip(V,range(len(V))))
PBD = [[rel[x] for x in B] for B in PBD]
return OA_from_PBD(7,66,PBD,check=False)
def OA_7_68():
r"""
Return an OA(7,68)
Construction shared by <NAME>.
.. SEEALSO::
:func:`sage.combinat.designs.orthogonal_arrays.OA_from_PBD`
EXAMPLES::
sage: from sage.combinat.designs.orthogonal_arrays import is_orthogonal_array
sage: from sage.combinat.designs.database import OA_7_68
sage: OA = OA_7_68()
sage: is_orthogonal_array(OA,7,68,2)
True
The design is available from the general constructor::
sage: designs.orthogonal_arrays.is_available(7,68)
True
"""
# base block of a (73,9,1) BIBD
B = [0, 19, 26, 14, 63, 15, 32, 35, 65]
# The corresponding BIBD
BIBD = [[(x+i)%73 for x in B] for i in range(73)]
# the first 5 elements of an oval
#
# (this is the only difference with the OA(7,66) construction)
oval = [(-x)%73 for x in B][:5]
# PBD minus the oval
PBD = [[x for x in B if x not in oval] for B in BIBD]
# We relabel the points to 0,1,2,...
V = [x for x in range(73) if x not in oval]
rel = dict(zip(V,range(len(V))))
PBD = [[rel[x] for x in B] for B in PBD]
return OA_from_PBD(7,68,PBD,check=False)
def OA_8_69():
r"""
Return an OA(8,69)
Construction shared by <NAME>.
.. SEEALSO::
:func:`sage.combinat.designs.orthogonal_arrays.OA_from_PBD`
EXAMPLES::
sage: from sage.combinat.designs.orthogonal_arrays import is_orthogonal_array
sage: from sage.combinat.designs.database import OA_8_69
sage: OA = OA_8_69()
sage: is_orthogonal_array(OA,8,69,2)
True
The design is available from the general constructor::
sage: designs.orthogonal_arrays.is_available(8,69)
True
"""
# base block of a (73,9,1) BIBD
B = [1,2,4,8,16,32,37,55,64]
# The corresponding BIBD
BIBD = [[(x+i)%73 for x in B] for i in range(73)]
oval = [72,71,69,65]
# PBD minus the oval
PBD = [[x for x in B if x not in oval] for B in BIBD]
sets_of_size_seven = [R for R in PBD if len(R) == 7]
others = [R for R in PBD if len(R) != 7]
# 68, 27, and 52 are the only elements appearing twice in the rows of
# sets_of_size_seven, and each row contains exactly one of them.
# We split them into "balanced" halves.
O1 = sets_of_size_seven[:3]
O2 = sets_of_size_seven[-3:]
assert all(x in sum(O1,[]) for x in (68,27,52))
assert all(x in sum(O2,[]) for x in (68,27,52))
# Blocks of "others", without the 0..0,1..1,2..2 ... rows
OA = OA_from_PBD(8,69,others,check=False)[:-69]
# Blocks of O1
OA_8_7 = orthogonal_array(8,7,check=False)
for B in O1:
for BB in OA_8_7:
OA.append([B[i] for i in BB])
# Blocks of O2
OA_8_7_minus_TD_8_1 = OA_8_7
OA_8_7_minus_TD_8_1.remove([0]*8)
for B in O2:
# Making sure the double element is the first one
B.sort(key=lambda x: int(bool(x not in (68,27,52))))
for BB in OA_8_7:
OA.append([B[i] for i in BB])
# Adding the missing 0..0,1..1,... rows
done = sum(O1,[])+sum(O2,[])
missing = [x for x in range(73) if x not in done and x not in oval]
for x in missing:
OA.append([x]*8)
# Relabelling everything to 0..68
relabel = dict(zip([x for x in range(73) if x not in oval],range(69)))
OA = [[relabel[x] for x in B] for B in OA]
return OA
def OA_7_74():
r"""
Return an OA(7,74)
Construction shared by <NAME>.
.. SEEALSO::
:func:`sage.combinat.designs.orthogonal_arrays.OA_from_PBD`
EXAMPLES::
sage: from sage.combinat.designs.orthogonal_arrays import is_orthogonal_array
sage: from sage.combinat.designs.database import OA_7_74
sage: OA = OA_7_74()
sage: is_orthogonal_array(OA,7,74,2)
True
The design is available from the general constructor::
sage: designs.orthogonal_arrays.is_available(7,74)
True
"""
# base block of a (91,10,1) BIBD
B = [0,1,3,9,27,81,61,49,56,77]
# The corresponding BIBD
BIBD = [[(x+i)%91 for x in B] for i in range(91)]
# an oval
oval = [(-x)%91 for x in B][-7:]
# PBD minus the oval+B
to_delete = oval + B
PBD = [[x for x in B if x not in to_delete] for B in BIBD]
PBD.remove([])
# We relabel the points to 0,1,2,...
V = [x for x in range(91) if x not in to_delete]
rel = dict(zip(V,range(len(V))))
PBD = [[rel[x] for x in B] for B in PBD]
return OA_from_PBD(7,74,PBD,check=False)
def OA_8_76():
r"""
Return an OA(8,76)
Construction shared by <NAME>.
.. SEEALSO::
:func:`sage.combinat.designs.orthogonal_arrays.OA_from_PBD`
EXAMPLES::
sage: from sage.combinat.designs.orthogonal_arrays import is_orthogonal_array
sage: from sage.combinat.designs.database import OA_8_76
sage: OA = OA_8_76()
sage: is_orthogonal_array(OA,8,76,2)
True
The design is available from the general constructor::
sage: designs.orthogonal_arrays.is_available(8,76)
True
"""
# base block of a (91,10,1) BIBD
B = [0,1,3,9,27,81,61,49,56,77]
# The corresponding BIBD
BIBD = [[(x+i)%91 for x in B] for i in range(91)]
oval = [2,4,5,12,24]
to_remove = oval + B
# PBD minus the oval
PBD = [[x for x in B if x not in to_remove] for B in BIBD]
PBD.remove([])
sets_of_size_seven = [R for R in PBD if len(R) == 7]
others = [R for R in PBD if len(R) != 7]
# critical_points are the 10 elements appearing twice in the rows of the 10
# sets_of_size_seven, and each row contains exactly two of them
critical_points = [57,83,52,13,15,64,37,50,63,31]
# We reorder the rows such that every element of critical_points is exactly
# once the first element of a row.
for i,x in zip(critical_points,sets_of_size_seven):
x.sort(key=lambda x:-int(x==i))
assert x[0]==i
# Blocks of "others", without the 0..0,1..1,2..2 ... rows
OA = OA_from_PBD(8,76,others,check=False)[:-76]
OA_8_7 = orthogonal_array(8,7,check=False)
OA_8_7_minus_TD_8_1 = OA_8_7
OA_8_7_minus_TD_8_1.remove([0]*8)
for B in sets_of_size_seven:
for BB in OA_8_7:
OA.append([B[i] for i in BB])
# Adding the missing 0..0,1..1,... rows
done = sum(sets_of_size_seven,[])
missing = [x for x in range(91) if x not in done and x not in to_remove]
for x in missing:
OA.append([x]*8)
# Relabelling everything to 0..68
relabel = dict(zip([x for x in range(91) if x not in to_remove],range(91)))
OA = [[relabel[x] for x in B] for B in OA]
return OA
def OA_11_80():
r"""
Return an OA(11,80)
As explained in the Handbook III.3.76 [DesignHandbook]_. Uses the fact that
`80 = 2^4 \times 5` and that `5` is prime.
.. SEEALSO::
:func:`sage.combinat.designs.orthogonal_arrays.OA_n_times_2_pow_c_from_matrix`
EXAMPLES::
sage: from sage.combinat.designs.designs_pyx import is_orthogonal_array
sage: from sage.combinat.designs.database import OA_11_80
sage: OA = OA_11_80()
sage: is_orthogonal_array(OA,11,80,2)
True
The design is available from the general constructor::
sage: designs.orthogonal_arrays.is_available(11,80)
True
"""
from sage.rings.finite_rings.finite_field_constructor import FiniteField
A = [
[(0,None), (0,None), (0,None), (0,None), (0,None), (0,None), (0,None), (0,None), (0,None), (0,None)],
[(0,None), (1,None), (2,3), (3,None), (4,3), (2,None), (3,3), (4,None), (0,3), (1,3)],
[(0,None), (2,8), (4,6), (1,3), (3,3), (3,13), (0,13), (2,6), (4,14), (1,12)],
[(0,None), (3,11), (1,0), (4,9), (2,0), (3,7), (1,8), (4,10), (2,10), (0,11)],
[(0,None), (4,8), (3,14), (2,14), (1,12), (2,10), (1,10), (0,3), (4,5), (3,8)],
[(0,None), (1,8), (4,14), (4,12), (1,1), (0,1), (2,8), (3,12), (3,6), (2,1)],
[(1,None), (0,6), (1,1), (4,4), (4,13), (2,6), (0,14), (2,9), (3,0), (3,3)],
[(4,None), (1,9), (0,7), (1,1), (4,8), (3,5), (2,14), (0,0), (2,None), (3,0)],
[(4,None), (4,6), (1,2), (0,None), (1,13), (3,8), (3,2), (2,0), (0,14), (2,None)],
[(1,None), (4,9), (4,1), (1,0), (0,4), (2,5), (3,None), (3,5), (2,None), (0,None)]
]
Y = [None, 0, 1, 14, 12, 7, 2, 11, 3, 6]
return OA_n_times_2_pow_c_from_matrix(11,4,FiniteField(5),A,Y,check=False)
def OA_15_112():
r"""
Returns an OA(15,112)
Published by <NAME> in [Ab1995]_. Uses the fact that 112 = `2^4
\times 7` and that `7` is prime.
.. SEEALSO::
:func:`sage.combinat.designs.orthogonal_arrays.OA_n_times_2_pow_c_from_matrix`
EXAMPLES::
sage: from sage.combinat.designs.designs_pyx import is_orthogonal_array
sage: from sage.combinat.designs.database import OA_15_112
sage: OA = OA_15_112()
sage: is_orthogonal_array(OA,15,112,2)
True
The design is available from the general constructor::
sage: designs.orthogonal_arrays.is_available(15,112)
True
"""
from sage.rings.finite_rings.finite_field_constructor import FiniteField
A = [
[(0,None), (0,None), (0,None), (0,None), (0,None), (0,None), (0,None), (0,None), (1,None), (4,None), (2,None), (2,None), (4,None), (1,None)],
[(0,None), (1,None), (2,None), (3, 5), (4, 9), (5, 11), (6, 12), (1, 10), (0, 10), (1, 11), (4, 13), (2, 6), (2, 2), (4, 1)],
[(0,None), (2, 3), (4, 6), (6, 0), (1, 1), (3, 12), (5, 6), (4, 2), (1, 9), (0, 3), (1, 7), (4, 7), (2, 8), (2, 5)],
[(0,None), (3, 3), (6, 2), (2, 3), (5, 2), (1, 9), (4, 13), (2, 8), (4, 12), (1, 12), (0, 7), (1, 10), (4, 11), (2, 14)],
[(0,None), (4,None), (1, 0), (5, 1), (2, 0), (6, 7), (3, 4), (2, 11), (2, 9), (4, 13), (1, 3), (0, 7), (1, 11), (4, 2)],
[(0,None), (5,None), (3, 14), (1, 7), (6, 5), (4, 3), (2, 1), (4, | |
with_file')
try:
#remove file from binary, whether having written it to dst_fullpath or not. To prevent bloating
binary = binary[:binary_offset]
if len(binary) == 0:
if MessageFields.WITH_BINARY in transport_json:
del transport_json[MessageFields.WITH_BINARY]
except Exception:
self.logger.exception(f'{self.connector.source_id} from peer {self.peername} '
'handle_incoming_connection with_file removal')
else:
if dst_dirpath is not False:
self.logger.warning(f'{self.connector.source_id} handle_incoming_connection from peer '
f'{self.peername} tried to create file in non existing directory '
f'{dst_dirpath} for type {with_file.get("dst_type")}, ignoring...')
if binary:
binary_offset = with_file.get('binary_offset', 0)
#remove file from binary, whether having written it to dst_fullpath or not. To prevent bloating
binary = binary[:binary_offset]
if len(binary) == 0:
if MessageFields.WITH_BINARY in transport_json:
del transport_json[MessageFields.WITH_BINARY]
#check if this message is a response to an awaiting request, and update put_msg_to_queue_recv
response_id = transport_json.get(MessageFields.RESPONSE_ID)
if response_id is not None:
if response_id not in self.connector.messages_awaiting_response[message_type].get(self.peername, {}):
self.logger.warning(f'{self.connector.source_id} handle_incoming_connection from peer '
f'{self.peername} got response_id {response_id} not existing in '
f'messages_awaiting_response for type {message_type}. '
'Forwarding to queue_recv anyway...')
else:
#set the response in case this is the response to a request that came with AWAIT_RESPONSE true
self.logger.debug(f'{self.connector.source_id} handle_incoming_connection from peer '
f'{self.peername} got response_id {response_id} in '
f'messages_awaiting_response for type {message_type}')
self.connector.messages_awaiting_response[message_type][self.peername][response_id][1] = \
(transport_json, data, binary)
self.connector.messages_awaiting_response[message_type][self.peername][response_id][0].set()
put_msg_to_queue_recv = False
if put_msg_to_queue_recv:
# send the message to queue
self.logger.debug(f'{self.connector.source_id} handle_incoming_connection from '
f'peer {self.peername} putting message to queue_recv')
try:
self.connector.queue_recv.put_nowait((transport_json, data, binary))
except Exception:
self.logger.exception('queue_recv.put_nowait')
transport_json, data, binary = None, None, None
except asyncio.CancelledError:
raise
except Exception as exc:
if isinstance(exc, asyncio.IncompleteReadError):
if self.connector.is_server:
self.logger.warning(f'{self.connector.source_id} handle_incoming_connection from '
f'peer {self.peername} Client disconnected')
else:
self.logger.warning(f'{self.connector.source_id} handle_incoming_connection from '
f'peer {self.peername} Server disconnected')
elif isinstance(exc, ConnectionResetError):
self.logger.warning(f'{self.connector.source_id} handle_incoming_connection from '
f'peer {self.peername} ConnectionResetError : {exc}')
elif isinstance(exc, ssl.SSLError):
self.logger.warning(f'{self.connector.source_id} handle_incoming_connection from '
f'peer {self.peername} SSLError : {exc}')
else:
self.logger.exception(f'{self.connector.source_id} handle_incoming_connection '
f'from peer {self.peername}')
if not self.is_stopping:
if self.connector.disk_persistence:
self.stop_nowait_for_persistence()
self.stop_task(client_wait_for_reconnect=True)
return
async def handle_outgoing_connection(self):
if self.connector.disk_persistence:
if self.peername not in self.connector.queue_send_transition_to_connect:
if self.connector.persistence_existence_check(self.peername):
self.logger.info(f'{self.connector.source_id} Creating queue_send_transition_to_connect '
f'for peer {self.peername}')
self.connector.queue_send_transition_to_connect[self.peername] = asyncio.Queue(maxsize=\
self.connector.MAX_QUEUE_SIZE)
if self.peername in self.connector.queue_send_transition_to_connect:
#loading persistence messages into queue_send_transition_to_connect
await self.connector.load_messages_from_persistence(self.peername)
queue_send = self.connector.queue_send_transition_to_connect[self.peername]
self.logger.info(f'{self.connector.source_id} Entering handle_outgoing_connection_queue for peer '
f'{self.peername} with queue_send_transition_to_connect of length {queue_send.qsize()}')
await self.handle_outgoing_connection_queue(queue_send, lambda :not queue_send.empty())
del self.connector.queue_send_transition_to_connect[self.peername]
queue_send = self.connector.queue_send[self.peername]
self.logger.info(f'{self.connector.source_id} Entering handle_outgoing_connection_queue for peer '
f'{self.peername} with queue_send of length '+str(queue_send.qsize()))
await self.handle_outgoing_connection_queue(queue_send, lambda :True)
async def handle_outgoing_connection_queue(self, queue_send, condition):
while condition():
try:
self.logger.debug(self.connector.source_id+' handle_outgoing_connection wait for queue_send')
if self.connector.SUPPORT_PRIORITIES:
priority, the_time, (transport_json, data, binary) = message_tuple = await queue_send.get()
message_tuple = message_tuple[2]
else:
transport_json, data, binary = message_tuple = await queue_send.get()
#self.connector.msg_counts['queue_sent']+=1
self.logger.debug(self.connector.source_id+' Received message from queue_send : ' + str(transport_json))
if DEBUG_SHOW_DATA:
self.logger.info('With data : '+str(data))
queue_send.task_done() #if someone uses 'join'
with_file_dict = transport_json.get(MessageFields.WITH_FILE)
#embed file if WITH_FILE
file_src_path = None
if with_file_dict:
file_src_path = str(with_file_dict['src_path'])#, with_file_dict['dst_name'], with_file_dict['dst_type'], with_file_dict['binary_offset']=0
binary_file = None
try:
with open(file_src_path, 'rb') as fd:
binary_file = fd.read()
if len(binary_file) > self.connector.max_size_file_upload_send:
raise Exception(f'{self.connector.source_id} cannot send too large file of size {len(binary_file)}')
except Exception as exc:
self.logger.exception('handle_outgoing_connection handling file : '+str(file_src_path))
#del transport_json[MessageFields.WITH_FILE]
#send the error msg to peer application, without file
transport_json[MessageFields.WITH_FILE]['file_error'] = str(exc)
else:
if binary_file:
#append the file byte content to "binary"
if binary:
len_binary = len(binary)
self.logger.info('handle_outgoing_connection prepare message with both binary and '
f'binary file at offset {len_binary}')
with_file_dict['binary_offset'] = len_binary
binary = binary + binary_file
else:
binary = binary_file
transport_json[MessageFields.WITH_BINARY] = True
binary_file = None
#use_ack = self.use_ack
#if self.use_ack_is_list:
# use_ack = (transport_json[MessageFields.MESSAGE_TYPE] in self.connector.use_ack)
#if use_ack or transport_json.get(MessageFields.WAIT_FOR_ACK):
if transport_json.get(MessageFields.WAIT_FOR_ACK):
#if this message has wait_for_ack true, we add the transport_id field to tell the peer that ACK is expected
#then we send message, and wait for ACK from peer before going on (ACK is expected in handle_incoming_connection)
self.transport_id += 1
if self.transport_id > self.MAX_TRANSPORT_ID:
self.transport_id = 0
self.ack_dict[self.transport_id] = asyncio.Event()
transport_json[MessageFields.TRANSPORT_ID] = self.transport_id
retry = 0
while retry <= self.MAX_RETRIES_BEFORE_ACK:
self.logger.debug(f'handle_outgoing_connection {self.connector.source_id} send message with '
f'transport_id {self.transport_id} expecting ACK')
await self.send_message(transport_json=transport_json, data=data, binary=binary)
transport_json, data, binary, message_tuple = None, None, None, None
try:
await asyncio.wait_for(self.ack_dict[self.transport_id].wait(), timeout=self.connector.ASYNC_TIMEOUT)
self.logger.info(f'handle_outgoing_connection {self.connector.source_id} received ACK for '
f'transport id {self.transport_id}')
del self.ack_dict[self.transport_id]
break
except asyncio.TimeoutError:
self.logger.warning(f'handle_outgoing_connection timed out waiting for ACK for '
f'transport id {self.transport_id} at retry {retry}')
retry += 1
else:
msg = f'handle_outgoing_connection ACK was not received for transport id {self.transport_id}'
self.logger.warning(msg)
#do we want to just go on, or restart ?
#raise Exception(msg)
else:
#send the message
self.logger.debug(f'handle_outgoing_connection sending message')
await self.send_message(transport_json=transport_json, data=data, binary=binary)
transport_json, data, binary, message_tuple = None, None, None, None
if file_src_path:
if with_file_dict.get('delete',True):
#delete file by default, unless specified False
try:
self.logger.info(f'handle_outgoing_connection Removing file {file_src_path} after upload')
os.remove(file_src_path)
except Exception:
self.logger.exception(f'handle_outgoing_connection trying to remove file {file_src_path}')
except asyncio.CancelledError:
raise
except Exception:
self.logger.exception(self.connector.source_id+' handle_outgoing_connection')
if not self.is_stopping:
if self.connector.disk_persistence:
self.connector.full_duplex_connections.pop(self.peername, None)
if queue_send != self.connector.queue_send.pop(self.peername, None):
self.logger.info(f'Special case : disconnection happens during transition. '
f'Transferring {queue_send.qsize()} messages')
#we should copy queue_send_transition_to_connect content into a new recreated persistent file
count = 0
disk_persistence_is_list = isinstance(self.connector.disk_persistence, list)
while not queue_send.empty():
if self.connector.SUPPORT_PRIORITIES:
priority, the_time, (transport_json, data, binary) = queue_send.get_nowait()
else:
transport_json, data, binary = queue_send.get_nowait()
disk_persistence = True
if disk_persistence_is_list:
#disk_persistence can be a list of message types
disk_persistence = (transport_json.get(MessageFields.MESSAGE_TYPE) in \
self.connector.disk_persistence)
if disk_persistence:
count += 1
message = self.connector.pack_message(transport_json=transport_json,
data=data, binary=binary)
self.logger.info(f'Emptying transition queue_send, Storing message '
f'number {count} to persistence to peername {self.peername}')
self.connector.store_message_to_persistence(self.peername, message, ignore_count=True)
else:
#regular case of disconnection
self.stop_nowait_for_persistence(message_tuple=message_tuple)
self.stop_task(client_wait_for_reconnect=True)
return
#4|2|json|4|data|4|binary
async def send_message(self, message=None, message_type=None, source_id=None, destination_id=None, request_id=None,
response_id=None, transport_json=None, data=None, binary=None, message_type_publish=None):
try:
# async with self.lock_connection:
update_msg_counts = True
if message:
self.logger.debug(self.connector.source_id+' send_message of length {}'.format(len(message)))
if DEBUG_SHOW_DATA:
self.logger.info('and with data {}'.format(message))
else:
if transport_json:
self.logger.debug(f'{self.connector.source_id} send_message {message_type or transport_json} '
f'with data length {len(data or "")}')
if DEBUG_SHOW_DATA:
self.logger.info('and with data {}'.format(data))
if binary:
self.logger.info('and with binary {}'.format(binary))
#fill source_id (which is mandatory) if not supplied
if MessageFields.SOURCE_ID not in transport_json:
transport_json[MessageFields.SOURCE_ID] = self.connector.source_id
else:
#internal cases only like ssl/handhake_no_ssl, ack
if not source_id:
source_id = self.connector.source_id
message = self.connector.pack_message(message_type=message_type, source_id=source_id,
destination_id=destination_id, request_id=request_id,
response_id=response_id, transport_json=transport_json,
data=data, binary=binary, message_type_publish=message_type_publish)
if DEBUG_SHOW_DATA:
self.logger.info('send_message full message ready to send : '+str(message))
if transport_json and transport_json[MessageFields.MESSAGE_TYPE] == '_ack':
update_msg_counts = False
# send the length to be sent next
self.writer.write(message[:Structures.MSG_4_STRUCT.size])
self.writer.write(message[Structures.MSG_4_STRUCT.size:])
try:
await asyncio.wait_for(self.writer.drain(), timeout=self.connector.ASYNC_TIMEOUT)
except asyncio.TimeoutError as exc:
self.logger.warning('send_message TimeoutError : '+str(exc))
if self.connector.debug_msg_counts and update_msg_counts:
self.connector.msg_counts['queue_sent']+=1
self.logger.debug(self.connector.source_id+' send_message Finished sending message')
except asyncio.CancelledError:
raise
except ConnectionResetError as exc:
self.logger.warning(self.connector.source_id+' ConnectionResetError : '+str(exc)+' with peer '+self.peername)
raise
except Exception:
self.logger.exception(self.connector.source_id+' send_message with peer '+self.peername)
#4|2|json|4|data|4|binary
async def recv_message(self):
try:
self.logger.debug(self.connector.source_id+ ' recv_message')
next_length_bytes = await self.reader.readexactly(Structures.MSG_4_STRUCT.size)
#async with self.lock_connection:
message_size = Structures.MSG_4_STRUCT.unpack(next_length_bytes)[0]
self.logger.debug('recv_message got message of length : '+str(message_size))
message = await self.reader.readexactly(message_size)
#message = await asyncio.wait_for(self.reader.readexactly(message_size), timeout=self.connector.ASYNC_TIMEOUT)
transport_json, data, binary = self.connector.unpack_message(next_length_bytes+message)
if self.connector.debug_msg_counts:
if transport_json.get(MessageFields.MESSAGE_TYPE) != '_ack':
self.connector.msg_counts['queue_recv']+=1
self.logger.debug('recv_message with : '+str(transport_json)+', and data length : '+str(len(data)))
if DEBUG_SHOW_DATA:
self.logger.info('and with data {}'.format(data))
if binary:
self.logger.info('and with binary {}'.format(binary))
return [transport_json, data, binary]
except asyncio.CancelledError:
raise
except asyncio.IncompleteReadError:
self.logger.warning(f'{self.connector.source_id} recv_message : peer {self.peername} disconnected')
raise
except ConnectionResetError as exc:
self.logger.warning(f'{self.connector.source_id} recv_message ConnectionResetError : {exc} with '
f'peer {self.peername}')
raise
except ssl.SSLError as exc:
self.logger.warning(f'{self.connector.source_id} recv_message SSLError : {exc} with peer {self.peername}')
raise
except Exception:
self.logger.exception(f'{self.connector.source_id} recv_message with peer {self.peername}')
raise
async def handle_ssl_messages_server(self, data=None, transport_json=None):
try:
if not self.connector.ssl_allow_all:
data_json = json.loads(data.decode())
if data_json.get('cmd') == 'get_new_certificate':
if self.client_certificate_serial != self.connector.ssl_helper.default_client_serial:
self.logger.warning(f'handle_ssl_messages_server Client '
f'{self.connector.ssl_helper.source_id_2_cert["cert_2_source_id"].get(self.client_certificate_serial)}'
' tried to get_new_certificate with private certificate. Stopping...')
self.stop_task()
return
self.logger.info(f'handle_ssl_messages_server receiving get_new_certificate for {self.extra_info}, '
'and calling create_client_certificate')
#we could have check if client current certificate is default, but | |
<gh_stars>0
"""
This file deals with 'model parameters' issues regarding the Cluster Class (e.g. GNFW parameteres)
"""
import astropy.units as u
import numpy as np
from ClusterModel.ClusterTools import cluster_global
from ClusterModel.ClusterTools import cluster_profile
from ClusterModel.ClusterTools import cluster_spectra
#==================================================
# Admin class
#==================================================
class Modpar(object):
""" Modpar class
This class searves as a parser to the main Cluster class, to
include the subclass Modpar in this other file. All the definitions of the
model parameters should be here.
Profile models are now: ['GNFW', 'SVM', 'beta', 'doublebeta']
Spectral models are now: ['PowerLaw', 'ExponentialCutoffPowerLaw']
Attributes
----------
The attributes are the same as the Cluster class, see model.py
Methods
----------
- _validate_profile_model_parameters(self, inpar, unit): dedicated to check and validate the parameters
of profile models
- _validate_spectrum_model_parameters(self, inpar, unit): dedicated to check and validate the parameters
of spectral models
- set_pressure_gas_gNFW_param(self, pressure_model='P13UPP'): set the gas pressure profile parameters to the
universal value from different results
- set_pressure_gas_isoT_param(self, kBT): set gas pressure profile parameters so that the cluster is isothermal
- set_density_gas_isoT_param(self, kBT): set gas density profile parameters so that the cluster is isothermal
- set_density_crp_isobaric_scal_param(self, scal=1.0): set CRp densitry profile parameters to have isobaric scaling
- set_density_crp_isodens_scal_param(self, scal=1.0): set CRp densitry profile parameters to have isodensity scaling
- set_magfield_isobaric_scal_param(self, Bnorm, scal=0.5): set mag field profile parameters to have isobaric scaling
- set_magfield_isodens_scal_param(self, Bnorm, scal=0.5): set mag field profile parameters to have isodensity scaling
- _get_generic_profile(self, radius, model, derivative=False): get any profile base on model type
"""
#==================================================
# Validate profile model parameters
#==================================================
def _validate_profile_model_parameters(self, inpar, unit):
"""
Check the profile parameters.
Parameters
----------
- inpar (dict): a dictionary containing the input parameters
- unit (str): contain the unit of the profile, e.g. keV/cm-3 for pressure
Outputs
----------
- outpar (dict): a dictionary with output parameters
"""
# List of available authorized models
model_list = ['GNFW', 'SVM', 'beta', 'doublebeta']
# Deal with unit
if unit == '' or unit == None:
hasunit = False
else:
hasunit = True
# Check that the input is a dictionary
if type(inpar) != dict :
raise TypeError("The model should be a dictionary containing the name key and relevant parameters")
# Check that input contains a name
if 'name' not in inpar.keys() :
raise ValueError("The model dictionary should contain a 'name' field")
# Check that the name is in the acceptable name list
if not inpar['name'] in model_list:
print('The profile model can be:')
print(model_list)
raise ValueError("The requested model is not available")
#---------- Deal with the case of GNFW
if inpar['name'] == 'GNFW':
# Check the content of the dictionary
cond1 = 'P_0' in inpar.keys() and 'a' in inpar.keys() and 'b' in inpar.keys() and 'c' in inpar.keys()
cond2 = 'c500' in inpar.keys() or 'r_p' in inpar.keys()
cond3 = not('c500' in inpar.keys() and 'r_p' in inpar.keys())
if not (cond1 and cond2 and cond3):
raise ValueError("The GNFW model should contain: {'P_0','c500' or 'r_p' (not both), 'a', 'b', 'c'}.")
# Check units and values
if hasunit:
try:
test = inpar['P_0'].to(unit)
except:
raise TypeError("P_0 should be homogeneous to "+unit)
if inpar['P_0'] < 0:
raise ValueError("P_0 should be >=0")
if 'r_p' in inpar.keys():
try:
test = inpar['r_p'].to('kpc')
except:
raise TypeError("r_p should be homogeneous to kpc")
if inpar['r_p'] <= 0:
raise ValueError("r_p should be >0")
if 'c500' in inpar.keys():
if inpar['c500'] <=0:
raise ValueError("c500 should be > 0")
# All good at this stage, setting parameters
if 'c500' in inpar.keys():
c500 = inpar['c500']
r_p = self._R500/inpar['c500']
if 'r_p' in inpar.keys():
c500 = (self._R500/inpar['r_p']).to_value('')
r_p = inpar['r_p']
if hasunit:
P0 = inpar['P_0'].to(unit)
else:
P0 = inpar['P_0']*u.adu
outpar = {"name": 'GNFW',
"P_0" : P0,
"c500": c500,
"r_p" : r_p.to('kpc'),
"a" : inpar['a'],
"b" : inpar['b'],
"c" : inpar['c']}
#---------- Deal with the case of SVM
if inpar['name'] == 'SVM':
# Check the content of the dictionary
cond1 = 'n_0' in inpar.keys() and 'r_c' in inpar.keys() and 'beta' in inpar.keys()
cond2 = 'r_s' in inpar.keys() and 'alpha' in inpar.keys() and 'epsilon' in inpar.keys() and 'gamma' in inpar.keys()
if not (cond1 and cond2):
raise ValueError("The SVM model should contain: {'n_0','beta','r_c','r_s', 'alpha', 'gamma', 'epsilon'}.")
# Check units
if hasunit:
try:
test = inpar['n_0'].to(unit)
except:
raise TypeError("n_0 should be homogeneous to "+unit)
try:
test = inpar['r_c'].to('kpc')
except:
raise TypeError("r_c should be homogeneous to kpc")
try:
test = inpar['r_s'].to('kpc')
except:
raise TypeError("r_s should be homogeneous to kpc")
# Check values
if inpar['n_0'] < 0:
raise ValueError("n_0 should be >= 0")
if inpar['r_c'] <= 0:
raise ValueError("r_c should be larger than 0")
if inpar['r_s'] <= 0:
raise ValueError("r_s should be larger than 0")
if hasunit:
n0 = inpar['n_0'].to(unit)
else:
n0 = inpar['n_0']*u.adu
# All good at this stage, setting parameters
outpar = {"name" : 'SVM',
"n_0" : n0,
"r_c" : inpar['r_c'].to('kpc'),
"r_s" : inpar['r_s'].to('kpc'),
"alpha" : inpar['alpha'],
"beta" : inpar['beta'],
"gamma" : inpar['gamma'],
"epsilon": inpar['epsilon']}
#---------- Deal with the case of beta
if inpar['name'] == 'beta':
# Check the content of the dictionary
cond1 = 'n_0' in inpar.keys() and 'r_c' in inpar.keys() and 'beta' in inpar.keys()
if not cond1:
raise ValueError("The beta model should contain: {'n_0','beta','r_c'}.")
# Check units
if hasunit:
try:
test = inpar['n_0'].to(unit)
except:
raise TypeError("n_0 should be homogeneous to "+unit)
try:
test = inpar['r_c'].to('kpc')
except:
raise TypeError("r_c should be homogeneous to kpc")
# Check values
if inpar['n_0'] < 0:
raise ValueError("n_0 should be >= 0")
if inpar['r_c'] <= 0:
raise ValueError("r_c should be larger than 0")
if hasunit:
n0 = inpar['n_0'].to(unit)
else:
n0 = inpar['n_0']*u.adu
# All good at this stage, setting parameters
outpar = {"name" : 'beta',
"n_0" : n0,
"r_c" : inpar['r_c'].to('kpc'),
"beta" : inpar['beta']}
#---------- Deal with the case of doublebeta
if inpar['name'] == 'doublebeta':
# Check the content of the dictionary
cond1 = 'n_01' in inpar.keys() and 'r_c1' in inpar.keys() and 'beta1' in inpar.keys()
cond2 = 'n_02' in inpar.keys() and 'r_c2' in inpar.keys() and 'beta2' in inpar.keys()
if not (cond1 and cond2):
raise ValueError("The double beta model should contain: {'n_01','beta1','r_c1','n_02','beta2','r_c2'}.")
# Check units
if hasunit:
try:
test = inpar['n_01'].to(unit)
except:
raise TypeError("n_01 should be homogeneous to "+unit)
try:
test = inpar['n_02'].to(unit)
except:
raise TypeError("n_02 should be homogeneous to "+unit)
try:
test = inpar['r_c1'].to('kpc')
except:
raise TypeError("r_c1 should be homogeneous to kpc")
try:
test = inpar['r_c2'].to('kpc')
except:
raise TypeError("r_c2 should be homogeneous to kpc")
# Check values
if inpar['n_01'] < 0:
raise ValueError("n_01 should be >= 0")
if inpar['r_c1'] <= 0:
raise ValueError("r_c1 should be larger than 0")
if inpar['n_02'] < 0:
raise ValueError("n_02 should be >= 0")
if inpar['r_c2'] <= 0:
raise ValueError("r_c2 should be larger than 0")
if hasunit:
n01 = inpar['n_01'].to(unit)
n02 = inpar['n_02'].to(unit)
else:
n01 = inpar['n_01']*u.adu
n02 = inpar['n_02']*u.adu
# All good at this stage, setting parameters
outpar = {"name" : 'doublebeta',
"n_01" : n01,
"r_c1" : inpar['r_c1'].to('kpc'),
"beta1" : inpar['beta1'],
"n_02" : n02,
"r_c2" : inpar['r_c2'].to('kpc'),
"beta2" : inpar['beta2']}
return outpar
#==================================================
# Validate spectrum model parameters
#==================================================
def _validate_spectrum_model_parameters(self, inpar, unit):
"""
Check the spectrum parameters.
Parameters
----------
- inpar (dict): a dictionary containing the input parameters
- unit (str): contain the unit of the spectrum, e.g. GeV-1 cm-3 for proton
Outputs
----------
- outpar (dict): a dictionary with output parameters
"""
# List of available authorized models
model_list = ['PowerLaw', 'ExponentialCutoffPowerLaw']
# Deal with unit
if unit == '' or unit == None:
hasunit = False
else:
hasunit = True
# Check that the input is a dictionary
if type(inpar) != dict :
raise TypeError("The model should be a dictionary containing the name key and relevant parameters")
# Check that input contains a name
if 'name' not in inpar.keys() :
raise ValueError("The model dictionary should contain a 'name' field")
# Check that the name is in the acceptable name list
if not inpar['name'] in model_list:
| |
["2003-12-24", 98],
["2003-12-25", 96],
["2003-12-26", 48],
["2003-12-27", 89],
["2003-12-28", 130],
["2003-12-29", 90],
["2003-12-30", 106],
["2003-12-31", 111],
["2004-01-01", 128],
["2004-01-02", 83],
["2004-01-03", 60],
["2004-01-04", 109],
["2004-01-05", 137],
["2004-01-06", 147],
["2004-01-07", 99],
["2004-01-08", 73],
["2004-01-09", 126],
["2004-01-10", 73],
["2004-01-11", 72],
["2004-01-12", 87],
["2004-01-13", 85],
["2004-01-14", 115],
["2004-01-15", 121],
["2004-01-16", 97],
["2004-01-17", 109],
["2004-01-18", 74],
["2004-01-19", 52],
["2004-01-20", 49],
["2004-01-21", 41],
["2004-01-22", 64],
["2004-01-23", 80],
["2004-01-24", 38],
["2004-01-25", 58],
["2004-01-26", 106],
["2004-01-27", 57],
["2004-01-28", 106],
["2004-01-29", 111],
["2004-01-31", 118],
["2004-02-01", 109],
["2004-02-02", 53],
["2004-02-03", 50],
["2004-02-04", 59],
["2004-02-06", 56],
["2004-02-07", 68],
["2004-02-08", 52],
["2004-02-09", 68],
["2004-02-10", 130],
["2004-02-11", 95],
["2004-02-12", 103],
["2004-02-13", 124],
["2004-02-14", 95],
["2004-02-15", 92],
["2004-02-16", 95],
["2004-02-17", 135],
["2004-02-18", 242],
["2004-02-19", 451],
["2004-02-20", 140],
["2004-02-21", 109],
["2004-02-23", 88],
["2004-02-24", 164],
["2004-02-25", 145],
["2004-02-26", 46],
["2004-02-27", 85],
["2004-02-28", 125],
["2004-02-29", 54],
["2004-03-01", 83],
["2004-03-02", 73],
["2004-03-03", 60],
["2004-03-04", 85],
["2004-03-05", 73],
["2004-03-06", 51],
["2004-03-07", 56],
["2004-03-08", 108],
["2004-03-09", 179],
["2004-03-10", 446],
["2004-03-11", 84],
["2004-03-13", 104],
["2004-03-14", 87],
["2004-03-15", 143],
["2004-03-16", 206],
["2004-03-17", 77],
["2004-03-19", 114],
["2004-03-20", 87],
["2004-03-21", 92],
["2004-03-22", 165],
["2004-03-23", 104],
["2004-03-24", 33],
["2004-03-25", 88],
["2004-03-26", 137],
["2004-03-27", 151],
["2004-03-28", 338],
["2004-03-29", 239],
["2004-03-30", 139],
["2004-03-31", 79],
["2004-04-01", 123],
["2004-04-02", 64],
["2004-04-03", 51],
["2004-04-05", 133],
["2004-04-06", 93],
["2004-04-07", 39],
["2004-04-08", 111],
["2004-04-09", 145],
["2004-04-10", 193],
["2004-04-11", 131],
["2004-04-12", 131],
["2004-04-13", 108],
["2004-04-14", 95],
["2004-04-15", 141],
["2004-04-16", 186],
["2004-04-17", 156],
["2004-04-18", 260],
["2004-04-19", 138],
["2004-04-20", 133],
["2004-04-21", 107],
["2004-04-22", 143],
["2004-04-23", 61],
["2004-04-24", 109],
["2004-04-25", 151],
["2004-04-26", 63],
["2004-04-27", 63],
["2004-04-28", 79],
["2004-04-29", 138],
["2004-04-30", 47],
["2004-05-01", 67],
["2004-05-02", 84],
["2004-05-03", 95],
["2004-05-04", 73],
["2004-05-05", 89],
["2004-05-06", 91],
["2004-05-07", 152],
["2004-05-08", 189],
["2004-05-09", 92],
["2004-05-10", 97],
["2004-05-11", 107],
["2004-05-12", 81],
["2004-05-13", 89],
["2004-05-14", 93],
["2004-05-15", 92],
["2004-05-16", 50],
["2004-05-17", 61],
["2004-05-18", 66],
["2004-05-19", 77],
["2004-05-21", 56],
["2004-05-22", 65],
["2004-05-23", 86],
["2004-05-24", 134],
["2004-05-25", 141],
["2004-05-26", 30],
["2004-05-27", 83],
["2004-05-28", 111],
["2004-05-29", 56],
["2004-05-30", 66],
["2004-05-31", 56],
["2004-06-01", 100],
["2004-06-02", 109],
["2004-06-03", 118],
["2004-06-04", 107],
["2004-06-05", 74],
["2004-06-06", 58],
["2004-06-07", 88],
["2004-06-08", 100],
["2004-06-09", 109],
["2004-06-10", 125],
["2004-06-11", 114],
["2004-06-12", 110],
["2004-06-13", 118],
["2004-06-14", 135],
["2004-06-15", 147],
["2004-06-16", 99],
["2004-06-17", 29],
["2004-06-18", 75],
["2004-06-19", 73],
["2004-06-20", 97],
["2004-06-21", 102],
["2004-06-22", 93],
["2004-06-23", 78],
["2004-06-24", 58],
["2004-06-25", 61],
["2004-06-26", 100],
["2004-06-27", 106],
["2004-06-28", 139],
["2004-06-29", 152],
["2004-06-30", 49],
["2004-07-01", 46],
["2004-07-02", 85],
["2004-07-03", 97],
["2004-07-04", 58],
["2004-07-05", 56],
["2004-07-06", 59],
["2004-07-07", 74],
["2004-07-08", 63],
["2004-07-09", 59],
["2004-07-10", 91],
["2004-07-11", 70],
["2004-07-12", 53],
["2004-07-13", 55],
["2004-07-14", 67],
["2004-07-15", 97],
["2004-07-16", 123],
["2004-07-17", 118],
["2004-07-18", 100],
["2004-07-19", 80],
["2004-07-20", 135],
["2004-07-21", 67],
["2004-07-22", 70],
["2004-07-23", 105],
["2004-07-24", 55],
["2004-07-25", 78],
["2004-07-26", 78],
["2004-07-27", 59],
["2004-07-28", 111],
["2004-07-29", 78],
["2004-07-30", 30],
["2004-07-31", 78],
["2004-08-01", 91],
["2004-08-02", 119],
["2004-08-03", 95],
["2004-08-04", 73],
["2004-08-05", 76],
["2004-08-06", 89],
["2004-08-07", 117],
["2004-08-08", 145],
["2004-08-09", 143],
["2004-08-10", 84],
["2004-08-11", 84],
["2004-08-12", 51],
["2004-08-13", 31],
["2004-08-14", 83],
["2004-08-15", 76],
["2004-08-16", 51],
["2004-08-17", 67],
["2004-08-18", 75],
["2004-08-19", 68],
["2004-08-20", 80],
["2004-08-21", 99],
["2004-08-22", 70],
["2004-08-23", 60],
["2004-08-24", 105],
["2004-08-25", 122],
["2004-08-26", 100],
["2004-08-27", 125],
["2004-08-28", 70],
["2004-08-29", 57],
["2004-08-30", 79],
["2004-08-31", 68],
["2004-09-01", 61],
["2004-09-02", 67],
["2004-09-03", 77],
["2004-09-04", 64],
["2004-09-05", 96],
["2004-09-06", 101],
["2004-09-07", 24],
["2004-09-08", 61],
["2004-09-09", 80],
["2004-09-10", 85],
["2004-09-11", 88],
["2004-09-12", 95],
["2004-09-13", 101],
["2004-09-14", 140],
["2004-09-15", 34],
["2004-09-16", 81],
["2004-09-17", 89],
["2004-09-18", 86],
["2004-09-19", 71],
["2004-09-20", 94],
["2004-09-21", 40],
["2004-09-22", 84],
["2004-09-23", 122],
["2004-09-24", 197],
["2004-09-25", 179],
["2004-09-26", 111],
["2004-09-27", 114],
["2004-09-29", 134],
["2004-09-30", 141],
["2004-10-01", 17],
["2004-10-02", 59],
["2004-10-03", 83],
["2004-10-04", 118],
["2004-10-05", 153],
["2004-10-06", 166],
["2004-10-07", 325],
["2004-10-08", 402],
["2004-10-09", 263],
["2004-10-10", 374],
["2004-10-11", 127],
["2004-10-12", 37],
["2004-10-13", 62],
["2004-10-14", 67],
["2004-10-15", 99],
["2004-10-16", 116],
["2004-10-17", 110],
["2004-10-18", 126],
["2004-10-19", 149],
["2004-10-20", 110],
["2004-10-21", 56],
["2004-10-22", 59],
["2004-10-23", 97],
["2004-10-24", 146],
["2004-10-25", 142],
["2004-10-26", 34],
["2004-10-27", 79],
["2004-10-28", 154],
["2004-10-29", 191],
["2004-10-30", 219],
["2004-10-31", 157],
["2004-11-01", 35],
["2004-11-02", 39],
["2004-11-03", 124],
["2004-11-04", 164],
["2004-11-05", 56],
["2004-11-06", 92],
["2004-11-07", 133],
["2004-11-08", 173],
["2004-11-09", 86],
["2004-11-10", 77],
["2004-11-11", 62],
["2004-11-12", 45],
["2004-11-13", 93],
["2004-11-14", 160],
["2004-11-15", 54],
["2004-11-16", 67],
["2004-11-17", 65],
["2004-11-18", 99],
["2004-11-19", 97],
["2004-11-20", 47],
["2004-11-21", 93],
["2004-11-22", 165],
["2004-11-23", 156],
["2004-11-24", 89],
["2004-11-25", 41],
["2004-11-26", 53],
["2004-11-27", 89],
["2004-11-28", 99],
["2004-11-29", 81],
["2004-11-30", 139],
["2004-12-01", 275],
["2004-12-02", 270],
["2004-12-03", 330],
["2004-12-04", 97],
["2004-12-05", 37],
["2004-12-06", 97],
["2004-12-07", 89],
["2004-12-08", 170],
["2004-12-09", 248],
["2004-12-10", 97],
["2004-12-11", 181],
["2004-12-12", 123],
["2004-12-13", 89],
["2004-12-14", 198],
["2004-12-15", 305],
["2004-12-16", 86],
["2004-12-17", 92],
["2004-12-18", 143],
["2004-12-19", 82],
["2004-12-20", 23],
["2004-12-21", 81],
["2004-12-22", 88],
["2004-12-23", 75],
["2004-12-24", 99],
["2004-12-25", 150],
["2004-12-26", 97],
["2004-12-27", 44],
["2004-12-28", 49],
["2004-12-29", 61],
["2004-12-30", 80],
["2004-12-31", 45],
["2005-01-01", 63],
["2005-01-02", 118],
["2005-01-03", 100],
["2005-01-04", 52],
["2005-01-05", 104],
["2005-01-06", 147],
["2005-01-07", 48],
["2005-01-08", 56],
["2005-01-09", 44],
["2005-01-10", 96],
["2005-01-11", 67],
["2005-01-12", 52],
["2005-01-13", 83],
["2005-01-14", 65],
["2005-01-15", 67],
["2005-01-16", 87],
["2005-01-17", 111],
["2005-01-18", 47],
["2005-01-19", 55],
["2005-01-20", 57],
["2005-01-21", 85],
["2005-01-22", 119],
["2005-01-23", 174],
["2005-01-24", 143],
["2005-01-25", 95],
["2005-01-26", 115],
["2005-01-27", 173],
["2005-01-28", 163],
["2005-01-29", 95],
["2005-01-30", 50],
["2005-01-31", 69],
["2005-02-01", 69],
["2005-02-02", 47],
["2005-02-03", 96],
["2005-02-04", 79],
["2005-02-05", 46],
["2005-02-06", 68],
["2005-02-07", 71],
["2005-02-08", 68],
["2005-02-09", 84],
["2005-02-10", 38],
["2005-02-11", 71],
["2005-02-12", 102],
["2005-02-13", 122],
["2005-02-14", 153],
["2005-02-15", 150],
["2005-02-16", 69],
["2005-02-17", 105],
["2005-02-18", 60],
["2005-02-19", 42],
["2005-02-20", 47],
["2005-02-21", 87],
["2005-02-22", 102],
["2005-02-23", 30],
["2005-02-24", 55],
["2005-02-25", 46],
["2005-02-26", 64],
["2005-02-27", 95],
["2005-02-28", 61],
["2005-03-01", 64],
["2005-03-02", 74],
["2005-03-03", 57],
["2005-03-04", 46],
["2005-03-05", 58],
["2005-03-06", 114],
["2005-03-07", 108],
["2005-03-08", 82],
["2005-03-09", 80],
["2005-03-10", 110],
["2005-03-11", 67],
["2005-03-12", 59],
["2005-03-13", 36],
["2005-03-14", 69],
["2005-03-15", 99],
["2005-03-16", 120],
["2005-03-17", 109],
["2005-03-18", 52],
["2005-03-19", 96],
["2005-03-20", 119],
["2005-03-21", 94],
["2005-03-22", 151],
["2005-03-23", 90],
["2005-03-24", 63],
["2005-03-25", 99],
["2005-03-26", 133],
["2005-03-27", 161],
["2005-03-28", 141],
["2005-03-29", 48],
["2005-03-30", 122],
["2005-03-31", 113],
["2005-04-01", 83],
["2005-04-02", 82],
["2005-04-03", 82],
["2005-04-04", 116],
["2005-04-05", 332],
["2005-04-06", 352],
["2005-04-07", 156],
["2005-04-08", 100],
["2005-04-09", 64],
["2005-04-10", 64],
["2005-04-11", 95],
["2005-04-12", 92],
["2005-04-13", 90],
["2005-04-14", 179],
["2005-04-15", 88],
["2005-04-16", 213],
["2005-04-17", 143],
["2005-04-18", 159],
["2005-04-19", 132],
["2005-04-20", 173],
["2005-04-21", 69],
["2005-04-22", 58],
["2005-04-23", 107],
["2005-04-24", 106],
["2005-04-25", 73],
["2005-04-26", 115],
["2005-04-27", 122],
["2005-04-28", 418],
["2005-04-29", 98],
["2005-04-30", 138],
["2005-05-01", 183],
["2005-05-02", 122],
["2005-05-03", 139],
["2005-05-04", 160],
["2005-05-05", 97],
["2005-05-06", 48],
["2005-05-07", 80],
["2005-05-08", 130],
["2005-05-09", 63],
["2005-05-10", 62],
["2005-05-11", 86],
["2005-05-12", 110],
["2005-05-13", 81],
["2005-05-14", 85],
["2005-05-15", 113],
["2005-05-16", 83],
["2005-05-17", 49],
["2005-05-18", 51],
["2005-05-19", 53],
["2005-05-20", 80],
["2005-05-21", 120],
["2005-05-22", 46],
["2005-05-23", 59],
["2005-05-24", 82],
["2005-05-25", 88],
["2005-05-26", 107],
["2005-05-27", 83],
["2005-05-28", 120],
["2005-05-29", 100],
["2005-05-30", 109],
["2005-05-31", 95],
["2005-06-01", 93],
["2005-06-02", 54],
["2005-06-03", 58],
["2005-06-04", 77],
["2005-06-05", 75],
["2005-06-06", 53],
["2005-06-07", 86],
["2005-06-08", 96],
["2005-06-09", 81],
["2005-06-10", 85],
["2005-06-11", 136],
["2005-06-12", 106],
["2005-06-13", 94],
["2005-06-14", 69],
["2005-06-15", 56],
["2005-06-16", 83],
["2005-06-17", 79],
["2005-06-18", 92],
["2005-06-19", 116],
["2005-06-20", 131],
["2005-06-21", 113],
["2005-06-22", 116],
["2005-06-23", 120],
["2005-06-24", 148],
["2005-06-25", 141],
["2005-06-26", 79],
["2005-06-27", 52],
["2005-06-28", 84],
["2005-06-29", 86],
["2005-06-30", 100],
["2005-07-01", 97],
["2005-07-02", 76],
["2005-07-03", 87],
["2005-07-04", 64],
["2005-07-05", 63],
["2005-07-06", 70],
["2005-07-07", 89],
["2005-07-08", 98],
["2005-07-09", 91],
["2005-07-10", 79],
["2005-07-11", 69],
["2005-07-12", 81],
["2005-07-13", 93],
["2005-07-14", 93],
["2005-07-15", 97],
["2005-07-17", 150],
["2005-07-18", 103],
["2005-07-19", 114],
["2005-07-20", 125],
["2005-07-21", 104],
["2005-07-22", 79],
["2005-07-23", 51],
["2005-07-24", 23],
["2005-07-25", 75],
["2005-07-26", 109],
["2005-07-27", 73],
["2005-07-28", 63],
["2005-07-29", 57],
["2005-07-30", 95],
["2005-07-31", 79],
["2005-08-01", 81],
["2005-08-02", 68],
["2005-08-03", 72],
["2005-08-04", 46],
["2005-08-05", 63],
["2005-08-06", 86],
["2005-08-07", 71],
["2005-08-08", 72],
["2005-08-09", 62],
["2005-08-10", 60],
["2005-08-11", 146],
["2005-08-12", 141],
["2005-08-13", 63],
["2005-08-14", 98],
["2005-08-15", 100],
["2005-08-16", 46],
["2005-08-17", 26],
["2005-08-18", 53],
["2005-08-19", 59],
["2005-08-20", 79],
["2005-08-21", 110],
["2005-08-22", 91],
["2005-08-23", 97],
["2005-08-24", 90],
["2005-08-25", 85],
["2005-08-26", 110],
["2005-08-27", 94],
["2005-08-28", 154],
["2005-08-29", 136],
["2005-08-30", 113],
["2005-08-31", 152],
["2005-09-01", 118],
["2005-09-02", 42],
["2005-09-03", 68],
["2005-09-04", 80],
["2005-09-05", 90],
["2005-09-06", 99],
["2005-09-07", 98],
["2005-09-08", 83],
["2005-09-09", 141],
["2005-09-10", 164],
["2005-09-11", 182],
["2005-09-12", 107],
["2005-09-13", 76],
["2005-09-14", 62],
["2005-09-15", 104],
["2005-09-16", 78],
["2005-09-17", 73],
["2005-09-18", 66],
["2005-09-19", 99],
["2005-09-20", 92],
["2005-09-21", 71],
["2005-09-22", 60],
["2005-09-23", 110],
["2005-09-24", 112],
["2005-09-25", 134],
["2005-09-26", 168],
["2005-09-27", 97],
["2005-09-28", 115],
["2005-09-29", 100],
["2005-09-30", 47],
["2005-10-01", 88],
["2005-10-02", 72],
| |
<reponame>mahakbansal/ChessAlphaZero
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of the python-chess library.
# Copyright (C) 2012-2018 <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import chess
import chess.polyglot
import chess.pgn
import chess.uci
import chess.xboard
import chess.syzygy
import chess.gaviota
import chess.variant
import chess.svg
import collections
import copy
import os
import os.path
import textwrap
import sys
import time
import threading
import logging
import platform
import unittest
try:
from StringIO import StringIO # Python 2
except ImportError:
from io import StringIO # Python 3
class RaiseLogHandler(logging.StreamHandler):
def handle(self, record):
super(RaiseLogHandler, self).handle(record)
raise RuntimeError("was expecting no log messages")
def catchAndSkip(signature, message=None):
def _decorator(f):
def _wrapper(self):
try:
return f(self)
except signature as err:
raise unittest.SkipTest(message or err)
return _wrapper
return _decorator
class SquareTestCase(unittest.TestCase):
def test_square(self):
for square in chess.SQUARES:
file_index = chess.square_file(square)
rank_index = chess.square_rank(square)
self.assertEqual(chess.square(file_index, rank_index), square, chess.SQUARE_NAMES[square])
def test_shifts(self):
shifts = [
chess.shift_down,
chess.shift_2_down,
chess.shift_up,
chess.shift_2_up,
chess.shift_right,
chess.shift_2_right,
chess.shift_left,
chess.shift_2_left,
chess.shift_up_left,
chess.shift_up_right,
chess.shift_down_left,
chess.shift_down_right,
]
for shift in shifts:
for bb_square in chess.BB_SQUARES:
shifted = shift(bb_square)
c = chess.popcount(shifted)
self.assertLessEqual(c, 1)
self.assertEqual(c, chess.popcount(shifted & chess.BB_ALL))
class MoveTestCase(unittest.TestCase):
def test_equality(self):
a = chess.Move(chess.A1, chess.A2)
b = chess.Move(chess.A1, chess.A2)
c = chess.Move(chess.H7, chess.H8, chess.BISHOP)
d1 = chess.Move(chess.H7, chess.H8)
d2 = chess.Move(chess.H7, chess.H8)
self.assertEqual(a, b)
self.assertEqual(b, a)
self.assertEqual(d1, d2)
self.assertNotEqual(a, c)
self.assertNotEqual(c, d1)
self.assertNotEqual(b, d1)
self.assertFalse(d1 != d2)
def test_uci_parsing(self):
self.assertEqual(chess.Move.from_uci("b5c7").uci(), "b5c7")
self.assertEqual(chess.Move.from_uci("e7e8q").uci(), "e7e8q")
self.assertEqual(chess.Move.from_uci("P@e4").uci(), "P@e4")
self.assertEqual(chess.Move.from_uci("B@f4").uci(), "B@f4")
def test_invalid_uci(self):
with self.assertRaises(ValueError):
chess.Move.from_uci("")
with self.assertRaises(ValueError):
chess.Move.from_uci("N")
with self.assertRaises(ValueError):
chess.Move.from_uci("z1g3")
with self.assertRaises(ValueError):
chess.Move.from_uci("Q@g9")
def test_copy(self):
a = chess.Move.from_uci("N@f3")
b = chess.Move.from_uci("a1h8")
c = chess.Move.from_uci("g7g8r")
self.assertEqual(copy.copy(a), a)
self.assertEqual(copy.copy(b), b)
self.assertEqual(copy.copy(c), c)
class PieceTestCase(unittest.TestCase):
def test_equality(self):
a = chess.Piece(chess.BISHOP, chess.WHITE)
b = chess.Piece(chess.KING, chess.BLACK)
c = chess.Piece(chess.KING, chess.WHITE)
d1 = chess.Piece(chess.BISHOP, chess.WHITE)
d2 = chess.Piece(chess.BISHOP, chess.WHITE)
self.assertEqual(a, d1)
self.assertEqual(d1, a)
self.assertEqual(d1, d2)
self.assertEqual(repr(a), repr(d1))
self.assertNotEqual(a, b)
self.assertNotEqual(b, c)
self.assertNotEqual(b, d1)
self.assertNotEqual(a, c)
self.assertFalse(d1 != d2)
self.assertNotEqual(repr(a), repr(b))
self.assertNotEqual(repr(b), repr(c))
self.assertNotEqual(repr(b), repr(d1))
self.assertNotEqual(repr(a), repr(c))
def test_from_symbol(self):
white_knight = chess.Piece.from_symbol("N")
self.assertEqual(white_knight.color, chess.WHITE)
self.assertEqual(white_knight.piece_type, chess.KNIGHT)
self.assertEqual(white_knight.symbol(), "N")
black_queen = chess.Piece.from_symbol("q")
self.assertEqual(black_queen.color, chess.BLACK)
self.assertEqual(black_queen.piece_type, chess.QUEEN)
self.assertEqual(black_queen.symbol(), "q")
class BoardTestCase(unittest.TestCase):
def test_default_position(self):
board = chess.Board()
self.assertEqual(board.piece_at(chess.B1), chess.Piece.from_symbol("N"))
self.assertEqual(board.fen(), chess.STARTING_FEN)
self.assertEqual(board.turn, chess.WHITE)
def test_empty(self):
board = chess.Board.empty()
self.assertEqual(board.fen(), "8/8/8/8/8/8/8/8 w - - 0 1")
self.assertEqual(board, chess.Board(None))
def test_from_epd(self):
base_epd = "rnbqkb1r/ppp1pppp/5n2/3P4/8/8/PPPP1PPP/RNBQKBNR w KQkq -"
board, ops = chess.Board.from_epd(base_epd + " ce 55;")
self.assertEqual(ops["ce"], 55)
self.assertEqual(board.fen(), base_epd + " 0 1")
def test_move_making(self):
board = chess.Board()
move = chess.Move(chess.E2, chess.E4)
board.push(move)
self.assertEqual(board.peek(), move)
def test_fen(self):
board = chess.Board()
self.assertEqual(board.fen(), chess.STARTING_FEN)
fen = "6k1/pb3pp1/1p2p2p/1Bn1P3/8/5N2/PP1q1PPP/6K1 w - - 0 24"
board.set_fen(fen)
self.assertEqual(board.fen(), fen)
board.push(chess.Move.from_uci("f3d2"))
self.assertEqual(board.fen(), "6k1/pb3pp1/1p2p2p/1Bn1P3/8/8/PP1N1PPP/6K1 b - - 0 24")
def test_xfen(self):
# https://de.wikipedia.org/wiki/Forsyth-Edwards-Notation#Beispiel
xfen = "rn2k1r1/ppp1pp1p/3p2p1/5bn1/P7/2N2B2/1PPPPP2/2BNK1RR w Gkq - 4 11"
board = chess.Board(xfen, chess960=True)
self.assertEqual(board.castling_rights, chess.BB_G1 | chess.BB_A8 | chess.BB_G8)
self.assertEqual(board.clean_castling_rights(), chess.BB_G1 | chess.BB_A8 | chess.BB_G8)
self.assertEqual(board.shredder_fen(), "rn2k1r1/ppp1pp1p/3p2p1/5bn1/P7/2N2B2/1PPPPP2/2BNK1RR w Gga - 4 11")
self.assertEqual(board.fen(), xfen)
self.assertTrue(board.has_castling_rights(chess.WHITE))
self.assertTrue(board.has_castling_rights(chess.BLACK))
self.assertTrue(board.has_kingside_castling_rights(chess.BLACK))
self.assertTrue(board.has_kingside_castling_rights(chess.WHITE))
self.assertTrue(board.has_queenside_castling_rights(chess.BLACK))
self.assertFalse(board.has_queenside_castling_rights(chess.WHITE))
# Chess960 position #284.
board = chess.Board("rkbqrbnn/pppppppp/8/8/8/8/PPPPPPPP/RKBQRBNN w - - 0 1", chess960=True)
board.castling_rights = board.rooks
self.assertTrue(board.clean_castling_rights() & chess.BB_A1)
self.assertEqual(board.fen(), "rkbqrbnn/pppppppp/8/8/8/8/PPPPPPPP/RKBQRBNN w KQkq - 0 1")
self.assertEqual(board.shredder_fen(), "rkbqrbnn/pppppppp/8/8/8/8/PPPPPPPP/RKBQRBNN w EAea - 0 1")
# Valid en passant square on illegal board.
fen = "8/8/8/pP6/8/8/8/8 w - a6 0 1"
board = chess.Board(fen)
self.assertEqual(board.fen(), fen)
# Illegal en passant square in illegal board.
fen = "1r6/8/8/pP6/8/8/8/1K6 w - a6 0 1"
board = chess.Board(fen)
self.assertEqual(board.fen(), "1r6/8/8/pP6/8/8/8/1K6 w - - 0 1")
def test_fen_en_passant(self):
board = chess.Board()
board.push_san("e4")
self.assertEqual(board.fen(en_passant="fen"), "rnbqkbnr/pppppppp/8/8/4P3/8/PPPP1PPP/RNBQKBNR b KQkq e3 0 1")
self.assertEqual(board.fen(en_passant="xfen"), "rnbqkbnr/pppppppp/8/8/4P3/8/PPPP1PPP/RNBQKBNR b KQkq - 0 1")
def test_get_set(self):
board = chess.Board()
self.assertEqual(board.piece_at(chess.B1), chess.Piece.from_symbol("N"))
board.remove_piece_at(chess.E2)
self.assertEqual(board.piece_at(chess.E2), None)
board.set_piece_at(chess.E4, chess.Piece.from_symbol("r"))
self.assertEqual(board.piece_type_at(chess.E4), chess.ROOK)
board.set_piece_at(chess.F1, None)
self.assertEqual(board.piece_at(chess.F1), None)
board.set_piece_at(chess.H7, chess.Piece.from_symbol("Q"), promoted=True)
self.assertEqual(board.promoted, chess.BB_H7)
def test_pawn_captures(self):
board = chess.Board()
# Kings gambit.
board.push(chess.Move.from_uci("e2e4"))
board.push(chess.Move.from_uci("e7e5"))
board.push(chess.Move.from_uci("f2f4"))
# Accepted.
exf4 = chess.Move.from_uci("e5f4")
self.assertIn(exf4, board.pseudo_legal_moves)
self.assertIn(exf4, board.legal_moves)
board.push(exf4)
board.pop()
def test_pawn_move_generation(self):
board = chess.Board("8/2R1P3/8/2pp4/2k1r3/P7/8/1K6 w - - 1 55")
self.assertEqual(len(list(board.generate_pseudo_legal_moves())), 16)
def test_single_step_pawn_move(self):
board = chess.Board()
a3 = chess.Move.from_uci("a2a3")
self.assertIn(a3, board.pseudo_legal_moves)
self.assertIn(a3, board.legal_moves)
board.push(a3)
board.pop()
self.assertEqual(board.fen(), chess.STARTING_FEN)
def test_castling(self):
board = chess.Board("r3k2r/8/8/8/8/8/8/R3K2R w KQkq - 1 1")
# Let white castle short.
move = board.parse_san("O-O")
self.assertEqual(move, chess.Move.from_uci("e1g1"))
self.assertEqual(board.san(move), "O-O")
self.assertIn(move, board.legal_moves)
board.push(move)
# Let black castle long.
move = board.parse_san("O-O-O")
self.assertEqual(board.san(move), "O-O-O")
self.assertIn(move, board.legal_moves)
board.push(move)
self.assertEqual(board.fen(), "2kr3r/8/8/8/8/8/8/R4RK1 w - - 3 2")
# Undo both castling moves.
board.pop()
board.pop()
self.assertEqual(board.fen(), "r3k2r/8/8/8/8/8/8/R3K2R w KQkq - 1 1")
# Let white castle long.
move = board.parse_san("O-O-O")
self.assertEqual(board.san(move), "O-O-O")
self.assertIn(move, board.legal_moves)
board.push(move)
# Let black castle short.
move = board.parse_san("O-O")
self.assertEqual(board.san(move), "O-O")
self.assertIn(move, board.legal_moves)
board.push(move)
self.assertEqual(board.fen(), "r4rk1/8/8/8/8/8/8/2KR3R w - - 3 2")
# Undo both castling moves.
board.pop()
board.pop()
self.assertEqual(board.fen(), "r3k2r/8/8/8/8/8/8/R3K2R w KQkq - 1 1")
def test_ninesixty_castling(self):
fen = "3r1k1r/4pp2/8/8/8/8/8/4RKR1 w Gd - 1 1"
board = chess.Board(fen, chess960=True)
# Let white do the king side swap.
move = board.parse_san("O-O")
self.assertEqual(board.san(move), "O-O")
self.assertEqual(move.from_square, chess.F1)
self.assertEqual(move.to_square, chess.G1)
self.assertIn(move, board.legal_moves)
board.push(move)
self.assertEqual(board.shredder_fen(), "3r1k1r/4pp2/8/8/8/8/8/4RRK1 b d - 2 1")
# Black can not castle kingside.
self.assertNotIn(chess.Move.from_uci("e8h8"), board.legal_moves)
# Let black castle queenside.
move = board.parse_san("O-O-O")
self.assertEqual(board.san(move), "O-O-O")
self.assertEqual(move.from_square, chess.F8)
self.assertEqual(move.to_square, chess.D8)
self.assertIn(move, board.legal_moves)
board.push(move)
self.assertEqual(board.shredder_fen(), "2kr3r/4pp2/8/8/8/8/8/4RRK1 w - - 3 2")
# Restore initial position.
board.pop()
board.pop()
self.assertEqual(board.shredder_fen(), fen)
fen = "Qr4k1/4pppp/8/8/8/8/8/R5KR w Hb - 0 1"
board = chess.Board(fen, chess960=True)
# White can just hop the rook over.
move = board.parse_san("O-O")
self.assertEqual(board.san(move), "O-O")
self.assertEqual(move.from_square, chess.G1)
self.assertEqual(move.to_square, chess.H1)
self.assertIn(move, board.legal_moves)
board.push(move)
self.assertEqual(board.shredder_fen(), "Qr4k1/4pppp/8/8/8/8/8/R4RK1 b b - 1 1")
# Black can not castle queenside nor kingside.
self.assertFalse(any(board.generate_castling_moves()))
# Restore initial position.
board.pop()
self.assertEqual(board.shredder_fen(), fen)
def test_selective_castling(self):
board = chess.Board("r3k2r/pppppppp/8/8/8/8/PPPPPPPP/R3K2R w KQkq - 0 1")
# King not selected
self.assertFalse(any(board.generate_castling_moves(chess.BB_ALL & ~board.kings)))
# Rook on h1 not selected
moves = board.generate_castling_moves(chess.BB_ALL, chess.BB_ALL & ~chess.BB_H1)
self.assertEqual(len(list(moves)), 1)
def test_castling_right_not_destroyed_bug(self):
# A rook move from H8 to H1 was only taking whites possible castling
# rights away.
board = chess.Board("2r1k2r/2qbbpp1/p2pp3/1p3PP1/Pn2P3/1PN1B3/1P3QB1/1K1R3R b k - 0 22")
board.push_san("Rxh1")
self.assertEqual(board.epd(), "2r1k3/2qbbpp1/p2pp3/1p3PP1/Pn2P3/1PN1B3/1P3QB1/1K1R3r w - -")
def test_invalid_castling_rights(self):
# KQkq is not valid in this standard chess position.
board = chess.Board("1r2k3/8/8/8/8/8/8/R3KR2 w KQkq - 0 1")
self.assertEqual(board.status(), chess.STATUS_BAD_CASTLING_RIGHTS)
self.assertEqual(board.fen(), "1r2k3/8/8/8/8/8/8/R3KR2 w Q - 0 1")
self.assertTrue(board.has_queenside_castling_rights(chess.WHITE))
self.assertFalse(board.has_kingside_castling_rights(chess.WHITE))
self.assertFalse(board.has_queenside_castling_rights(chess.BLACK))
self.assertFalse(board.has_kingside_castling_rights(chess.BLACK))
board = chess.Board("4k2r/8/8/8/8/8/8/R1K5 w KQkq - 0 1", chess960=True)
self.assertEqual(board.status(), chess.STATUS_BAD_CASTLING_RIGHTS)
self.assertEqual(board.fen(), "4k2r/8/8/8/8/8/8/R1K5 w Qk - 0 1")
board = chess.Board("1r2k3/8/1p6/8/8/5P2/8/1R2KR2 w KQkq - 0 1", chess960=True)
self.assertEqual(board.status(), chess.STATUS_BAD_CASTLING_RIGHTS)
self.assertEqual(board.fen(), "1r2k3/8/1p6/8/8/5P2/8/1R2KR2 w KQq - 0 1")
def test_ninesixty_different_king_and_rook_file(self):
# Theoretically this position (with castling rights) can not be reached
# with a series of legal moves from one of the 960 starting positions.
# Decision: We don't care. Neither does Stockfish or lichess.org.
fen = "1r1k1r2/5p2/8/8/8/8/3N4/R5KR b KQkq - 0 1"
board = chess.Board(fen, chess960=True)
self.assertEqual(board.fen(), fen)
def test_ninesixty_prevented_castle(self):
board = chess.Board("4k3/8/8/1b6/8/8/8/5RKR w KQ - 0 1", chess960=True)
self.assertFalse(board.is_legal(chess.Move.from_uci("g1f1")))
def test_insufficient_material(self):
# Starting position.
board = chess.Board()
self.assertFalse(board.is_insufficient_material())
# King vs. King + 2 bishops of the same color.
board = chess.Board("k1K1B1B1/8/8/8/8/8/8/8 w - - 7 32")
self.assertTrue(board.is_insufficient_material())
# Add bishop of opposite color for the weaker side.
board.set_piece_at(chess.B8, chess.Piece.from_symbol("b"))
self.assertFalse(board.is_insufficient_material())
def test_promotion_with_check(self):
board = chess.Board("8/6P1/2p5/1Pqk4/6P1/2P1RKP1/4P1P1/8 w - - 0 1")
board.push(chess.Move.from_uci("g7g8q"))
self.assertTrue(board.is_check())
self.assertEqual(board.fen(), "6Q1/8/2p5/1Pqk4/6P1/2P1RKP1/4P1P1/8 b - - 0 1")
board = chess.Board("8/8/8/3R1P2/8/2k2K2/3p4/r7 b - - 0 82")
board.push_san("d1=Q+")
self.assertEqual(board.fen(), "8/8/8/3R1P2/8/2k2K2/8/r2q4 w - - 0 83")
def test_scholars_mate(self):
board = chess.Board()
e4 = chess.Move.from_uci("e2e4")
self.assertIn(e4, board.legal_moves)
board.push(e4)
e5 = chess.Move.from_uci("e7e5")
self.assertIn(e5, board.legal_moves)
board.push(e5)
Qf3 = chess.Move.from_uci("d1f3")
self.assertIn(Qf3, board.legal_moves)
board.push(Qf3)
Nc6 = chess.Move.from_uci("b8c6")
self.assertIn(Nc6, board.legal_moves)
board.push(Nc6)
Bc4 = chess.Move.from_uci("f1c4")
self.assertIn(Bc4, board.legal_moves)
board.push(Bc4)
Rb8 = chess.Move.from_uci("a8b8")
self.assertIn(Rb8, board.legal_moves)
board.push(Rb8)
self.assertFalse(board.is_check())
self.assertFalse(board.is_checkmate())
self.assertFalse(board.is_game_over())
self.assertFalse(board.is_stalemate())
Qf7_mate = chess.Move.from_uci("f3f7")
self.assertIn(Qf7_mate, board.legal_moves)
board.push(Qf7_mate)
self.assertTrue(board.is_check())
self.assertTrue(board.is_checkmate())
self.assertTrue(board.is_game_over())
self.assertTrue(board.is_game_over(claim_draw=True))
self.assertFalse(board.is_stalemate())
self.assertEqual(board.fen(), "1rbqkbnr/pppp1Qpp/2n5/4p3/2B1P3/8/PPPP1PPP/RNB1K1NR b KQk - 0 4")
def test_result(self):
# Undetermined.
board = chess.Board()
| |
# Copyright 2019 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import stat
import subprocess
import re
import netaddr
from orderedattrdict import AttrDict
from enum import Enum
from filelock import Timeout, FileLock
from socket import gethostbyname
from time import sleep
from random import random
import lib.logger as logger
from lib.ssh import SSH
from lib.switch_exception import SwitchException
from lib.genesis import get_switch_lock_path
FILE_PATH = os.path.dirname(os.path.abspath(__file__))
SWITCH_LOCK_PATH = get_switch_lock_path()
class SwitchCommon(object):
ENABLE_REMOTE_CONFIG = 'configure terminal ; {} '
IFC_ETH_CFG = 'interface ethernet {} '
IFC_PORT_CH_CFG = 'interface port-channel {} '
NO_IFC_PORT_CH_CFG = 'no interface port-channel {} '
PORT_PREFIX = 'Eth'
SEP = ';'
SHOW_VLANS = 'show vlan'
CREATE_VLAN = 'vlan {}'
DELETE_VLAN = 'no vlan {}'
SHOW_PORT = 'show interface brief'
CLEAR_MAC_ADDRESS_TABLE = 'clear mac address-table dynamic'
SHOW_MAC_ADDRESS_TABLE = 'show mac address-table ;'
ENABLE_LACP = 'feature lacp'
NO_CHANNEL_GROUP = 'no channel-group'
CHANNEL_GROUP_MODE = 'channel-group {} mode {} '
SHOW_PORT_CHANNEL = 'show port-channel summary'
SWITCHPORT_MODE = 'switchport mode {} '
SWITCHPORT_ACCESS_VLAN = 'switchport access vlan {} '
SWITCHPORT_TRUNK_NATIVE_VLAN = 'switchport trunk native vlan {} '
SWITCHPORT_TRUNK_ALLOWED_VLAN = 'switchport trunk allowed vlan {} {}'
SET_MTU = 'mtu {}'
NO_MTU = 'no mtu'
SHUTDOWN = 'shutdown'
NO_SHUTDOWN = 'no shutdown'
FORCE = 'force'
MGMT_INTERFACE_CONFIG = 'interface ip {}'
SET_INTERFACE_IPADDR = ' ;ip address {}'
SET_INTERFACE_MASK = ' ;ip netmask {}'
SET_VLAN = ' ;vlan {}'
SHOW_IP_INTERFACE_BRIEF = 'show ip interface brief'
SHOW_INTERFACE = 'show interface vlan{}'
SET_INTERFACE = ('feature interface-vlan ;'
'interface vlan {} ;'
'ip address {} {} ;'
'management ;'
'no shutdown')
def __init__(self, host=None, userid=None,
password=<PASSWORD>, mode=None, outfile=None):
self.log = logger.getlogger()
pass
class AllowOp(Enum):
ADD = 'add'
ALL = 'all'
EXCEPT = 'except'
NONE = 'none'
REMOVE = 'remove'
class PortMode(Enum):
ACCESS = 'access'
FEX_FABRIC = 'fex-fabric'
TRUNK = 'trunk'
HYBRID = ''
TRUNK_NATIVE = ''
def send_cmd(self, cmd):
if self.mode == 'passive':
f = open(self.outfile, 'a+')
f.write(cmd + '\n')
f.close()
return
host_ip = gethostbyname(self.host)
lockfile = os.path.join(SWITCH_LOCK_PATH, host_ip + '.lock')
if not os.path.isfile(lockfile):
os.mknod(lockfile)
os.chmod(lockfile, stat.S_IRWXO | stat.S_IRWXG | stat.S_IRWXU)
lock = FileLock(lockfile)
cnt = 0
while cnt < 5 and not lock.is_locked:
if cnt > 0:
self.log.info('Waiting to acquire lock for switch {}'.
format(self.host))
cnt += 1
try:
lock.acquire(timeout=5, poll_intervall=0.05) # 5 sec, 50 ms
sleep(0.01) # give switch a chance to close out comms
except Timeout:
pass
if lock.is_locked:
if self.ENABLE_REMOTE_CONFIG:
cmd = self.ENABLE_REMOTE_CONFIG.format(cmd)
self.log.debug(cmd)
ssh = SSH()
__, data, _ = ssh.exec_cmd(
self.host,
self.userid,
self.password,
cmd,
ssh_log=True,
look_for_keys=False)
lock.release()
# sleep 60 ms to give other processes a chance.
sleep(0.06 + random() / 100) # lock acquire polls at 50 ms
if lock.is_locked:
self.log.error('Lock is locked. Should be unlocked')
return data.decode("utf-8")
else:
self.log.error('Unable to acquire lock for switch {}'.format(self.host))
raise SwitchException('Unable to acquire lock for switch {}'.
format(self.host))
def get_enums(self):
return self.PortMode, self.AllowOp
def show_ports(self, format='raw'):
if self.mode == 'passive':
return None
ports = {}
port_info = self.send_cmd(self.SHOW_PORT)
if format == 'raw':
return port_info
elif format == 'std':
port_info = port_info.splitlines()
for line in port_info:
match = re.search(
r'Eth([0-9/]+)\s+(\d+)\s+\w+\s+(access|trunk)', line)
if match:
# mode, avlans = self._get_port_detail(match)
ports[match.group(1)] = {
'mode': match.group(3),
'nvlan': match.group(2),
'avlans': ''}
port_info = self.send_cmd('show interface trunk').split('Port')
for item in port_info:
if 'Vlans Allowed on Trunk' in item:
item = item.splitlines()
for line in item:
match = re.search(
r'Eth((?:\d+/)+\d+)\s+((?:\d+[,-])*\d+)', line)
if match:
ports[match.group(1)]['avlans'] = match.group(2)
return ports
def show_vlans(self):
if self.mode == 'passive':
return None
self.log.debug(self.SHOW_VLANS)
vlan_info = self.send_cmd(self.SHOW_VLANS)
return vlan_info
def show_native_vlan(self, port):
if self.mode == 'passive':
return None
port = str(port)
ports = self.show_ports(format='std')
return ports[port]['nvlan']
def set_switchport_mode(self, port, mode, vlan=None):
port = str(port)
cmd = self.IFC_ETH_CFG.format(port) + self.SEP
cmd += self.SWITCHPORT_MODE.format(mode.value)
if vlan:
if mode.value == 'trunk':
cmd += self.SEP + self.SWITCHPORT_TRUNK_NATIVE_VLAN.format(vlan)
if mode.value == 'access':
cmd += self.SEP + self.SWITCHPORT_ACCESS_VLAN.format(vlan)
self.send_cmd(cmd)
ports = self.show_ports(format='std')
if port not in ports:
msg = 'Unable to verify setting of switchport mode'
msg += 'for port {}. May already be in a channel group.'
msg.format(port)
self.log.debug(msg)
return
if self.mode == 'passive' or ports[port]['mode'] == mode.value:
self.log.debug(
'Port {} is in {} mode'.format(port, mode.value))
else:
raise SwitchException(
'Failed setting port {} to {} mode'.format(port, mode.value))
if vlan:
if self.mode == 'passive' or str(vlan) == ports[port]['nvlan']:
msg = 'PVID/Native vlan {} set on port {}'.format(vlan, port)
self.log.debug(msg)
else:
msg = 'Failed setting PVID/Native vlan {} on port {}'.format(
vlan, port)
self.log.error(msg)
raise SwitchException(msg)
def is_port_in_trunk_mode(self, port):
"""Allows determination if a port is in 'trunk' mode.
"""
if self.mode == 'passive':
return None
port = str(port)
ports = self.show_ports(format='std')
return self.PortMode.TRUNK.value in ports[port]['mode']
def is_port_in_access_mode(self, port):
if self.mode == 'passive':
return None
port = str(port)
ports = self.show_ports('std')
return self.PortMode.ACCESS.value in ports[port]['mode']
def allowed_vlans_port(self, port, operation, vlans=''):
""" configure vlans on a port channel
ARGS:
operation (enum of AllowOp): add | all | except | none | remove
vlan (str or tuple or list). if type string, can be of the
following formats: '4' or '4,5,8' or '5-10'
"""
if isinstance(vlans, (tuple, list)):
vlans = vlans[:]
vlans = [str(vlans[i]) for i in range(len(vlans))]
vlans = ','.join(vlans)
else:
vlans = str(vlans)
cmd = self.IFC_ETH_CFG.format(port) + self.SEP + \
self.SWITCHPORT_TRUNK_ALLOWED_VLAN.format(operation.value, vlans)
self.send_cmd(cmd)
res = self.is_vlan_allowed_for_port(vlans, port)
if operation.value == 'add':
if res is None:
return
elif not res:
msg = 'Not all vlans in {} were added to port {}'. \
format(vlans, port)
self.log.error(msg)
else:
self.log.debug('vlans {} were added to port {}'.
format(vlans, port))
if operation.value == 'remove':
if res is None:
return
elif res:
msg = 'Not all vlans in {} were removed from port {}'. \
format(vlans, port)
self.log.error(msg)
else:
self.log.debug('vlans {} were removed from port {}'.
format(vlans, port))
def is_vlan_allowed_for_port(self, vlans, port):
""" Test that all vlans in vlans are allowed for the given port
Args:
vlans: (int or str) string can be of form 'n', 'n,m,p', 'n-p'
port: (int or str)
Returns True if all vlans in vlans argument are allowed for port
"""
if self.mode == 'passive':
return None
vlans = str(vlans)
vlans = vlans.split(',')
result = True
port = str(port)
ports = self.show_ports('std')
if port not in ports:
msg = 'Unable to verify setting of vlans '
msg += 'for port {}. May already be in a channel group.'
msg = msg.format(port)
self.log.debug(msg)
return
avlans = ports[port]['avlans']
avlans = avlans.split(',')
for vlan in vlans:
res = False
for i, _vlans in enumerate(avlans):
_vlans = _vlans.strip(' ')
if not vlan:
res = True
break
if not _vlans:
break
elif '-' in vlan and vlan == _vlans:
res = True
break
elif int(vlan) >= int(_vlans.split('-')[0]) and \
int(vlan) <= int(_vlans.split('-')[-1]):
res = True
break
else:
pass
result = result and res
return result
def create_vlan(self, vlan):
self.send_cmd(self.CREATE_VLAN.format(vlan))
if self.mode == 'passive' or self.is_vlan_created(vlan):
self.log.debug('Created VLAN {}'.format(vlan))
else:
raise SwitchException('Failed creating VLAN {}'.format(vlan))
def delete_vlan(self, vlan):
self.send_cmd(self.DELETE_VLAN.format(vlan))
if self.mode == 'active' and self.is_vlan_created(vlan):
self.log.warning(
'Failed deleting VLAN {}'.format(vlan))
raise SwitchException(
'Failed deleting VLAN {}'.format(vlan))
self.log.info('vlan {} deleted.'.format(vlan))
return
def is_vlan_created(self, vlan):
if self.mode == 'passive':
return None
if re.search(
r'^' + str(vlan),
self.send_cmd(self.SHOW_VLANS),
re.MULTILINE):
return True
return False
def set_mtu_for_port(self, port, mtu):
# Bring port down
self.send_cmd(
self.IFC_ETH_CFG.format(port) + self.SEP + self.SHUTDOWN)
# Set MTU
if mtu == 0:
self.send_cmd(
self.IFC_ETH_CFG.format(port) + self.SEP + self.NO_MTU)
else:
self.send_cmd(
self.IFC_ETH_CFG.format(port) + self.SEP + self.SET_MTU.format(mtu))
# Bring port up
self.send_cmd(
self.IFC_ETH_CFG.format(port) + self.SEP + self.NO_SHUTDOWN)
def show_mac_address_table(self, format=False):
"""Get switch mac address table.
The returned text string can be raw or optionally fomatted.
Args:
format (boolean) : set to 'dict' or 'std' to return a dictionary
Returns:
raw string if format=False
| |
"""This module implements the TwillBrowser."""
import pickle
import re
from urllib.parse import urljoin
import requests
import requests.auth
from lxml import html
from requests.exceptions import InvalidSchema, ConnectionError
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from . import log, __version__
from .utils import (
print_form, trunc, unique_match, ResultWrapper, _equiv_refresh_interval)
from .errors import TwillException
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
class TwillBrowser:
"""A simple, stateful browser"""
user_agent = f'TwillBrowser/{__version__}'
def __init__(self):
self.result = None
self.last_submit_button = None
self.first_error = None
# whether meta refresh will be displayed
self.show_refresh = False
# whether the SSL cert will be verified, or can be a ca bundle path
self.verify = False
# Session stores cookies
self._session = requests.Session()
# An lxml FormElement, none until a form is selected
# replaces self._browser.form from mechanize
self._form = None
self._form_files = {}
# A dict of HTTPBasicAuth from requests, keyed off URL
self._auth = {}
# callables to be called after each page load.
self._post_load_hooks = []
self._history = []
# set default headers
self.reset_headers()
def reset(self):
"""Reset the browser"""
self.__init__()
@property
def creds(self):
"""Get the credentials for basic authentication."""
return self._auth
@creds.setter
def creds(self, creds):
"""Set the credentials for basic authentication."""
self._auth[creds[0]] = requests.auth.HTTPBasicAuth(*creds[1])
def go(self, url):
"""Visit given URL."""
try_urls = []
if '://' in url:
try_urls.append(url)
else: # URL does not have a schema
# if this is a relative URL, then assume that we want to tack it
# onto the end of the current URL
current_url = self.url
if current_url:
try_urls.append(urljoin(current_url, url))
# if this is an absolute URL, it may be just missing the 'http://'
# at the beginning, try fixing that (mimic browser behavior)
if not url.startswith(('.', '/', '?')):
try_urls.append(f'http://{url}')
try_urls.append(f'https://{url}')
for try_url in try_urls:
try:
self._journey('open', try_url)
except (IOError,
ConnectionError, InvalidSchema, UnicodeError) as error:
log.info("cannot go to '%s': %s", try_url, error)
else:
break
else:
raise TwillException(f"cannot go to '{url}'")
log.info('==> at %s', self.url)
def reload(self):
"""Tell the browser to reload the current page."""
self._journey('reload')
log.info('==> reloaded')
def back(self):
"""Return to previous page, if possible."""
try:
self._journey('back')
log.info('==> back to %s', self.url)
except TwillException:
log.warning('==> back at empty page')
@property
def code(self):
"""Get the HTTP status code received for the current page."""
return self.result.http_code if self.result else None
@property
def encoding(self):
"""Get the encoding used by the server for the current page."""
return self.result.encoding if self.result else None
@property
def html(self):
"""Get the HTML for the current page."""
return self.result.text if self.result else None
@property
def dump(self):
"""Get the binary content of the current page."""
return self.result.content if self.result else None
@property
def title(self):
if self.result is None:
raise TwillException("Error: Getting title with no page")
return self.result.title
@property
def url(self):
"""Get the URL of the current page."""
return self.result.url if self.result else None
def find_link(self, pattern):
"""Find the first link matching the given pattern.
The pattern is searched in the URL, link text, or name.
"""
return self.result.find_link(pattern) if self.result else None
def follow_link(self, link):
"""Follow the given link."""
self._journey('follow_link', link)
log.info('==> at %s', self.url)
@property
def headers(self):
return self._session.headers
def reset_headers(self):
self.headers.clear()
self.headers.update({
'Accept': 'text/html; */*',
'User-Agent': self.user_agent})
@property
def agent_string(self):
"""Get the agent string."""
return self.headers.get('User-Agent')
@agent_string.setter
def agent_string(self, agent):
"""Set the agent string to the given value."""
self.headers['User-agent'] = agent
def showforms(self):
"""Pretty-print all of the forms.
Include the global form (form elements outside of <form> pairs)
as forms[0] if present.
"""
for n, form in enumerate(self.forms, 1):
print_form(form, n)
def showlinks(self):
"""Pretty-print all of the links."""
info = log.info
links = self.links
if links:
info('\nLinks (%d links total):\n', len(links))
for n, link in enumerate(links, 1):
info('\t%d. %s ==> %s', n, trunc(link.text, 40), link.url)
info('')
else:
info('\n** no links **\n')
def showhistory(self):
"""Pretty-print the history of links visited."""
info = log.info
history = self._history
if history:
info('\nHistory (%d pages total):\n', len(history))
for n, page in enumerate(history, 1):
info('\t%d. %s', n, page.url)
info('')
else:
info('\n** no history **\n')
@property
def links(self):
"""Return a list of all of the links on the page."""
return [] if self.result is None else self.result.links
@property
def forms(self):
"""Return a list of all of the forms.
Include the global form at index 0 if present.
"""
return [] if self.result is None else self.result.forms
def form(self, formname=1):
"""Return the first form that matches the given form name."""
return None if self.result is None else self.result.form(formname)
def form_field(self, form, fieldname=1):
"""Return the control that matches the given field name.
Must be a *unique* regex/exact string match.
"""
inputs = form.inputs
found_multiple = False
if isinstance(fieldname, str):
if fieldname in form.fields:
match_name = [c for c in inputs if c.name == fieldname]
if len(match_name) > 1:
if all(hasattr(c, 'type') and c.type == 'checkbox'
for c in match_name):
return html.CheckboxGroup(match_name)
if all(hasattr(c, 'type') and c.type == 'radio'
for c in match_name):
return html.RadioGroup(match_name)
else:
match_name = None
# test exact match to id
match_id = [c for c in inputs if c.get('id') == fieldname]
if match_id:
if unique_match(match_id):
return match_id[0]
found_multiple = True
# test exact match to name
if match_name:
if unique_match(match_name):
return match_name[0]
found_multiple = True
# test field index
try:
return list(inputs)[int(fieldname) - 1]
except (IndexError, ValueError):
pass
if isinstance(fieldname, str):
# test regex match
regex = re.compile(fieldname)
match_name = [c for c in inputs
if c.name and regex.search(c.name)]
if match_name:
if unique_match(match_name):
return match_name[0]
found_multiple = True
# test field values
match_value = [c for c in inputs if c.value == fieldname]
if match_value:
if len(match_value) == 1:
return match_value[0]
found_multiple = True
# error out
if found_multiple:
raise TwillException(f'multiple matches to "{fieldname}"')
raise TwillException(f'no field matches "{fieldname}"')
def add_form_file(self, fieldname, fp):
self._form_files[fieldname] = fp
def clicked(self, form, control):
"""Record a 'click' in a specific form."""
if self._form != form:
# construct a function to choose a particular form;
# select_form can use this to pick out a precise form.
self._form = form
self.last_submit_button = None
# record the last submit button clicked.
if hasattr(control, 'type') and control.type in ('submit', 'image'):
self.last_submit_button = control
def submit(self, fieldname=None):
"""Submit the currently clicked form using the given field."""
if fieldname is not None:
fieldname = str(fieldname)
forms = self.forms
if not forms:
raise TwillException("no forms on this page!")
ctl = None
form = self._form
if form is None:
if len(forms) == 1:
form = forms[0]
else:
raise TwillException(
"more than one form;"
" you must select one (use 'fv') before submitting")
action = form.action or ''
if '://' not in action:
form.action = urljoin(self.url, action)
# no fieldname? see if we can use the last submit button clicked...
if fieldname is None:
if self.last_submit_button is None:
# get first submit button in form.
submits = [c for c in form.inputs
if hasattr(c, 'type') and
c.type in ('submit', 'image')]
if submits:
ctl = submits[0]
else:
ctl = self.last_submit_button
else:
# fieldname given; find it
ctl = self.form_field(form, fieldname)
# now set up the submission by building the request object that
# will be sent in the form submission.
if ctl is None:
log.debug('Note: submit without using a submit button')
else:
log.info(
"Note: submit is using submit button:"
" name='%s', value='%s'", ctl.get('name'), ctl.value)
# Add referer information. This may require upgrading the
# request object to have an 'add_unredirected_header' function.
# @BRT: For now, the referrer is always the current page
# @CTB: this seems like an issue for further work.
# Note: We do not set Content-Type from form.attrib.get('enctype'),
# since Requests does a much better job at setting the proper one.
headers = {'Referer': self.url}
payload = form.form_values()
if ctl is not None and ctl.get('name') is not None:
payload.append((ctl.get('name'), ctl.value))
payload = self._encode_payload(payload)
# now actually GO
if form.method == 'POST':
if self._form_files:
r = self._session.post(
form.action, data=payload, headers=headers,
files=self._form_files)
else:
r = self._session.post(
form.action, data=payload, headers=headers)
else:
r = self._session.get(form.action, params=payload, headers=headers)
self._form = None
self._form_files.clear()
self.last_submit_button = None
self._history.append(self.result)
self.result = ResultWrapper(r)
def save_cookies(self, filename):
"""Save cookies into the given file."""
with open(filename, 'wb') as | |
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doJoinGameServerSession(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.GseClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.JoinGameServerSessionRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.JoinGameServerSession(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doJoinGameServerSessionBatch(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.GseClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.JoinGameServerSessionBatchRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.JoinGameServerSessionBatch(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeFleetStatisticSummary(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.GseClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeFleetStatisticSummaryRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeFleetStatisticSummary(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteAlias(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.GseClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteAliasRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DeleteAlias(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteTimerScalingPolicy(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.GseClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteTimerScalingPolicyRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DeleteTimerScalingPolicy(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doUpdateAsset(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.GseClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.UpdateAssetRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.UpdateAsset(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doStartGameServerSessionPlacement(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.GseClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.StartGameServerSessionPlacementRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.StartGameServerSessionPlacement(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeInstanceLimit(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.GseClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeInstanceLimitRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeInstanceLimit(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doAttachCcnInstances(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.GseClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.AttachCcnInstancesRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.AttachCcnInstances(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == | |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO useless node remove
from x2paddle.op_mapper.tf_op_mapper import TFOpMapper
from x2paddle.core.fluid_code import Layer
from x2paddle.core.util import *
import six
import numpy
import copy as cp
def exist_act(node):
for layer in node.fluid_code.layers:
if layer.param_attr is not None:
act = layer.param_attr.get("act", None)
if act is not None:
return True
return False
class TFOptimizer(object):
activation_ops = {
'Relu': 'relu',
'Sigmoid': 'sigmoid',
'Relu6': 'relu6',
'swish_f32': 'swish'
}
layers_with_act = [
'Conv2D', 'BiasAdd', 'DepthwiseConv2dNative', 'Conv2DBackpropInput',
'FusedBatchNorm', 'conv2d', 'elementwise_add', 'conv2d_transpose',
'batch_norm'
]
layers_with_bias = [
'Conv2D', 'DepthwiseConv2dNative', 'Conv2DBackpropInput', 'conv2d',
'conv2d_transpose'
]
def __init__(self, op_mapper):
self.op_mapper = op_mapper
self.graph = op_mapper.graph
def delete_redundance_code(self):
for node_name in self.graph.topo_sort:
if node_name in self.op_mapper.omit_nodes:
node = self.graph.get_node(node_name)
if node is None:
continue
omit_freq = self.op_mapper.omit_nodes.count(node_name)
if len(node.outputs) <= omit_freq:
node.fluid_code.clear()
# remove node from graph
input_names = node.inputs
output_names = node.outputs
for in_name in input_names:
in_node = self.graph.get_node(in_name)
index = in_node.outputs.index(node_name)
del in_node.outputs[index]
for out_name in output_names:
out_node = self.graph.get_node(out_name)
index = out_node.inputs.index(node_name)
del out_node.inputs[index]
del self.graph.node_map[node_name]
def strip_graph(self):
visited_nodes = set()
def visit(node_name):
if node_name in visited_nodes:
return
visited_nodes.add(node_name)
input_names = self.graph.get_node(node_name).inputs
for in_name in input_names:
visit(in_name)
for node_name in self.graph.output_nodes:
visit(node_name)
for i, node_name in enumerate(self.graph.topo_sort):
if node_name not in visited_nodes:
node = self.graph.get_node(node_name)
if node is None:
continue
input_names = node.inputs
output_names = node.outputs
for in_name in input_names:
in_node = self.graph.get_node(in_name)
index = in_node.outputs.index(node_name)
del in_node.outputs[index]
for out_name in output_names:
out_node = self.graph.get_node(out_name)
index = out_node.inputs.index(node_name)
del out_node.inputs[index]
del self.graph.node_map[node_name]
def optimize_elementwise_op(self):
elementwise_ops = [
'Sub', 'Add', 'RealDiv', 'Maximum', 'Mul', 'FloorDiv',
'GreaterEqual'
]
revertable_ops = ['Add', 'Mul']
for node_name in self.graph.topo_sort:
node = self.graph.get_node(node_name)
if node is None:
continue
if node.layer_type in elementwise_ops:
if len(node.fluid_code.layers) != 2:
continue
if node.fluid_code.layers[0].op != "expand":
continue
expand_out = node.fluid_code.layers[0].output
expand_in = node.fluid_code.layers[0].inputs
expand_times = node.fluid_code.layers[0].param_attr[
"expand_times"]
x = node.fluid_code.layers[1].inputs["x"]
y = node.fluid_code.layers[1].inputs["y"]
if isinstance(
x,
six.string_types) and node.layer_type in revertable_ops:
node.fluid_code.layers[1].inputs["y"] = x
node.fluid_code.layers[1].inputs["x"] = y
x = node.fluid_code.layers[1].inputs["x"]
y = expand_in
elif isinstance(y, six.string_types):
y = expand_in
else:
continue
x_shape = x.out_shapes[0]
y_shape = y.out_shapes[0]
if len(x_shape) != len(y_shape):
continue
if len(x_shape) == 4:
x_shape = [x_shape[i] for i in [0, 3, 1, 2]]
y_shape = [y_shape[i] for i in [0, 3, 1, 2]]
continue_flag = True
for i in range(len(x_shape)):
if y_shape[-1 * (i + 1)] == 1 and continue_flag:
expand_times[-1 * (i + 1)] = 1
else:
continue_flag = False
if expand_times.count(1) == len(expand_times):
node.fluid_code.layers[1].inputs["y"] = expand_in
del node.fluid_code.layers[0]
def merge_activation(self):
act_nodes = list()
for node_name in self.graph.topo_sort:
node = self.graph.get_node(node_name)
if node is None:
continue
if node.layer_type in self.activation_ops:
act_nodes.append(node_name)
for act_node_name in act_nodes:
node = self.graph.get_node(act_node_name)
input = self.graph.get_node(node.inputs[0])
if input.layer_type not in self.layers_with_act:
continue
if len(input.fluid_code.layers) == 0:
continue
if 'act' in input.fluid_code.layers[
-1].param_attr and input.fluid_code.layers[-1].param_attr[
'act'] is not None:
continue
if len(input.outputs) != 1:
continue
index = -1
for i in range(len(input.fluid_code.layers)):
if input.fluid_code.layers[i].op in self.layers_with_act:
index = i
break
input.fluid_code.layers[index].param_attr['act'] = string(
self.activation_ops[node.layer_type])
input.fluid_code.layers[-1].output = node.fluid_code.layers[
0].output
self.graph.remove_node(act_node_name)
def merge_bias(self):
for node_name in self.graph.topo_sort:
node = self.graph.get_node(node_name)
if node is None:
continue
if node.layer_type == "BiasAdd":
input = self.graph.get_node(node.inputs[0])
if input.layer_type not in self.layers_with_bias:
continue
if len(input.outputs) != 1:
continue
if len(input.fluid_code.layers) == 0:
continue
bias_with_act = False
if 'act' in node.fluid_code.layers[-1].param_attr:
bias_with_act = True
layer_with_act = False
index = -1
for i in range(len(input.fluid_code.layers)):
if input.fluid_code.layers[i].op in self.layers_with_bias:
index = i
break
if 'act' in input.fluid_code.layers[
index].param_attr and input.fluid_code.layers[
index].param_attr['act'] is not None:
layer_with_act = True
if bias_with_act and layer_with_act:
continue
if not input.fluid_code.layers[index].param_attr['bias_attr']:
bias_name = node.inputs[1]
input.fluid_code.layers[index].param_attr[
'bias_attr'] = string(bias_name)
input.fluid_code.layers[-1].output = node.fluid_code.layers[
0].output
if bias_with_act:
input.fluid_code.layers[index].param_attr[
'act'] = node.fluid_code.layers[-1].param_attr[
'act']
node.fluid_code.clear()
self.graph.remove_node(node.layer_name)
self.graph.identity_map[node.layer_name] = input.layer_name
def remove_transpose(self):
graph_copy = cp.deepcopy(self.graph)
nhwc_insensitive_ops = [
'Relu', 'Relu6', 'Abs', 'Sigmoid', 'Exp', 'Rsqrt', 'swish_f32',
'LeakyRelu', 'Cast', 'Tanh'
]
elementwise_ops = [
'Sub', 'Add', 'RealDiv', 'Maximum', 'Mul', 'FloorDiv',
'GreaterEqual'
]
optimize_ops = [
'Conv2D', 'MaxPool', 'FusedBatchNorm', 'DepthwiseConv2dNative',
'AvgPool', 'Pad', 'Conv2DBackpropInput', 'ResizeNearestNeighbor',
'ResizeBilinear', "Placeholder"
]
can_be_optimized_ops = [
'Conv2D', 'MaxPool', 'FusedBatchNorm', 'DepthwiseConv2dNative',
'AvgPool', 'Pad', 'Conv2DBackpropInput', 'ResizeNearestNeighbor',
'ResizeBilinear', "Placeholder", 'Relu', 'Relu6', 'Abs', 'Sigmoid',
'Exp', 'Rsqrt', 'swish_f32', 'LeakyRelu', 'Cast', 'Tanh'
]
for node_name in self.graph.topo_sort:
node = graph_copy.get_node(node_name)
if node is None:
continue
if node.layer_type in can_be_optimized_ops:
if node.fluid_code.layers[
-1].op != "transpose" or node.fluid_code.layers[
-1].param_attr["perm"] != [0, 2, 3, 1]:
continue
can_be_removed = True
output_names = node.outputs
for out_name in output_names:
out_node = graph_copy.get_node(out_name)
if hasattr(out_node, "can_be_removed"):
if not out_node.can_be_removed:
can_be_removed = False
break
elif out_node.fluid_code.layers[
0].op != "transpose" or out_node.fluid_code.layers[
0].param_attr["perm"] != [0, 3, 1, 2]:
can_be_removed = False
break
elif out_node.layer_type in elementwise_ops:
can_be_removed = False
break
if can_be_removed and len(node.fluid_code.layers) > 1:
true_node = self.graph.get_node(node_name)
if true_node.layer_type == "Placeholder":
index = self.graph.input_nodes.index(
true_node.fluid_code.layers[-2].output)
if isinstance(true_node.fluid_code.layers[-1].output,
str):
self.graph.input_nodes[
index] = true_node.fluid_code.layers[-1].output
else:
self.graph.input_nodes[
index] = true_node.fluid_code.layers[
-1].output.layer_name
true_node.fluid_code.layers[
-2].output = true_node.fluid_code.layers[-1].output
node.removed = True
del true_node.fluid_code.layers[-1]
for out_name in output_names:
out_node = self.graph.get_node(out_name)
out_node.fluid_code.layers[
1].inputs = out_node.fluid_code.layers[0].inputs
del out_node.fluid_code.layers[0]
for node_name in self.graph.topo_sort:
node = graph_copy.get_node(node_name)
if node is None:
continue
if node.layer_type in elementwise_ops:
can_be_removed = True
if node.fluid_code.layers[
-1].op != "transpose" or node.fluid_code.layers[
-1].param_attr["perm"] != [0, 2, 3, 1]:
continue
can_be_removed = True
output_names = node.outputs
for out_name in output_names:
out_node = graph_copy.get_node(out_name)
if len(out_node.fluid_code.layers) < 3:
can_be_removed = False
break
if hasattr(out_node, "can_be_removed"):
if not out_node.can_be_removed:
can_be_removed = False
break
if out_node.layer_type in can_be_optimized_ops:
if out_node.fluid_code.layers[
0].op != "transpose" or out_node.fluid_code.layers[
0].param_attr["perm"] != [0, 3, 1, 2]:
can_be_removed = False
break
elif out_node.layer_type in elementwise_ops:
if out_node.fluid_code.layers[
0].op != "transpose" and out_node.fluid_code.layers[
1].op != "transpose":
can_be_removed = False
break
if out_node.fluid_code.layers[0].op == "transpose":
if out_node.fluid_code.layers[0].param_attr[
"perm"] != [0, 3, 1, 2]:
can_be_removed = False
break
if out_node.fluid_code.layers[1].op == "transpose":
if out_node.fluid_code.layers[1].param_attr[
"perm"] != [0, 3, 1, 2]:
can_be_removed = False
break
if can_be_removed and len(node.fluid_code.layers) > 1:
true_node = self.graph.get_node(node_name)
true_node.fluid_code.layers[
-2].output = true_node.fluid_code.layers[-1].output
del true_node.fluid_code.layers[-1]
for out_name in output_names:
out_node = self.graph.get_node(out_name)
if out_node.layer_type in can_be_optimized_ops:
out_node.fluid_code.layers[
1].inputs = out_node.fluid_code.layers[0].inputs
del out_node.fluid_code.layers[0]
elif out_node.layer_type in elementwise_ops:
if out_node.inputs[0] in node.layer_name:
if out_node.fluid_code.layers[
1].op == 'transpose':
out_node.fluid_code.layers[2].inputs[
'x'] = out_node.fluid_code.layers[
0].inputs
del out_node.fluid_code.layers[0]
else:
out_node.fluid_code.layers[1].inputs[
'x'] = out_node.fluid_code.layers[
0].inputs
del out_node.fluid_code.layers[0]
elif out_node.inputs[1] in node.layer_name:
if out_node.fluid_code.layers[
1].op == 'transpose':
out_node.fluid_code.layers[2].inputs[
'y'] = out_node.fluid_code.layers[
1].inputs
del out_node.fluid_code.layers[1]
else:
out_node.fluid_code.layers[1].inputs[
'y'] = out_node.fluid_code.layers[
0].inputs
del out_node.fluid_code.layers[0]
graph_copy = cp.deepcopy(self.graph)
for node_name in self.graph.topo_sort:
node = graph_copy.get_node(node_name)
if node is None or len(node.fluid_code.layers) < 2:
continue
if node.layer_type in can_be_optimized_ops and node.layer_type != "Placeholder":
if node.fluid_code.layers[
-1].op != "transpose" or node.fluid_code.layers[
-1].param_attr["perm"] != [0, 2, 3, 1]:
continue
can_be_removed = True
output_names = node.outputs
for out_name in output_names:
out_node = graph_copy.get_node(out_name)
if hasattr(out_node, "can_be_removed"):
if not out_node.can_be_removed:
can_be_removed = False
break
if len(out_node.fluid_code.layers) < 2:
can_be_removed = False
break
if out_node.layer_type in can_be_optimized_ops:
if out_node.fluid_code.layers[
0].op != "transpose" or out_node.fluid_code.layers[
0].param_attr["perm"] != [0, 3, 1, 2]:
can_be_removed = False
break
elif out_node.layer_type in elementwise_ops:
if out_node.fluid_code.layers[
0].op != "transpose" and out_node.fluid_code.layers[
1].op != "transpose":
can_be_removed = False
break
if out_node.fluid_code.layers[
0].op == "expand" or out_node.fluid_code.layers[
1].op == "expand":
can_be_removed = False
break
if out_node.fluid_code.layers[0].op == "transpose":
if out_node.fluid_code.layers[0].param_attr[
"perm"] != [0, 3, 1, 2]:
can_be_removed = False
break
if out_node.fluid_code.layers[1].op == "transpose":
if out_node.fluid_code.layers[1].param_attr[
"perm"] != [0, 3, 1, 2]:
can_be_removed = False
break
elif out_node.layer_type not in elementwise_ops and out_node.layer_type not in can_be_optimized_ops:
can_be_removed = False
break
if can_be_removed:
true_node = self.graph.get_node(node_name)
if len(true_node.fluid_code.layers) < 2:
continue
true_node.fluid_code.layers[
-2].output = true_node.fluid_code.layers[-1].output
del true_node.fluid_code.layers[-1]
for out_name in output_names:
out_node = self.graph.get_node(out_name)
if out_node.layer_type in can_be_optimized_ops:
out_node.fluid_code.layers[
| |
doesn’t use the cache of uncompressed blocks.
:param pulumi.Input[int] merge_tree_max_rows_to_use_cache: If ClickHouse should read more than merge_tree_max_rows_to_use_cache rows in one query, it doesn’t use the cache of uncompressed blocks.
:param pulumi.Input[int] merge_tree_min_bytes_for_concurrent_read: If the number of bytes to read from one file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, then ClickHouse tries to concurrently read from this file in several threads.
:param pulumi.Input[int] merge_tree_min_rows_for_concurrent_read: If the number of rows to be read from a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read then ClickHouse tries to perform a concurrent reading from this file on several threads.
:param pulumi.Input[int] min_bytes_to_use_direct_io: The minimum data volume required for using direct I/O access to the storage disk.
:param pulumi.Input[int] min_count_to_compile: How many times to potentially use a compiled chunk of code before running compilation.
:param pulumi.Input[int] min_count_to_compile_expression: A query waits for expression compilation process to complete prior to continuing execution.
:param pulumi.Input[int] min_execution_speed: Minimal execution speed in rows per second.
:param pulumi.Input[int] min_execution_speed_bytes: Minimal execution speed in bytes per second.
:param pulumi.Input[int] min_insert_block_size_bytes: Sets the minimum number of bytes in the block which can be inserted into a table by an INSERT query.
:param pulumi.Input[int] min_insert_block_size_rows: Sets the minimum number of rows in the block which can be inserted into a table by an INSERT query.
:param pulumi.Input[bool] output_format_json_quote64bit_integers: If the value is true, integers appear in quotes when using JSON* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes.
:param pulumi.Input[bool] output_format_json_quote_denormals: Enables +nan, -nan, +inf, -inf outputs in JSON output format.
:param pulumi.Input[int] priority: Query priority.
:param pulumi.Input[str] quota_mode: Quota accounting mode.
:param pulumi.Input[str] read_overflow_mode: Sets behaviour on overflow while read. Possible values:
:param pulumi.Input[int] readonly: Restricts permissions for reading data, write data and change settings queries.
:param pulumi.Input[int] receive_timeout: Receive timeout in milliseconds on the socket used for communicating with the client.
:param pulumi.Input[int] replication_alter_partitions_sync: For ALTER ... ATTACH|DETACH|DROP queries, you can use the replication_alter_partitions_sync setting to set up waiting.
:param pulumi.Input[str] result_overflow_mode: Sets behaviour on overflow in result. Possible values:
:param pulumi.Input[bool] select_sequential_consistency: Enables or disables sequential consistency for SELECT queries.
:param pulumi.Input[bool] send_progress_in_http_headers: Enables or disables X-ClickHouse-Progress HTTP response headers in clickhouse-server responses.
:param pulumi.Input[int] send_timeout: Send timeout in milliseconds on the socket used for communicating with the client.
:param pulumi.Input[str] set_overflow_mode: Sets behaviour on overflow in the set resulting. Possible values:
:param pulumi.Input[bool] skip_unavailable_shards: Enables or disables silently skipping of unavailable shards.
:param pulumi.Input[str] sort_overflow_mode: Sets behaviour on overflow while sort. Possible values:
:param pulumi.Input[str] timeout_overflow_mode: Sets behaviour on overflow. Possible values:
:param pulumi.Input[str] transfer_overflow_mode: Sets behaviour on overflow. Possible values:
:param pulumi.Input[bool] transform_null_in: Enables equality of NULL values for IN operator.
:param pulumi.Input[bool] use_uncompressed_cache: Whether to use a cache of uncompressed blocks.
"""
if add_http_cors_header is not None:
pulumi.set(__self__, "add_http_cors_header", add_http_cors_header)
if allow_ddl is not None:
pulumi.set(__self__, "allow_ddl", allow_ddl)
if compile is not None:
pulumi.set(__self__, "compile", compile)
if compile_expressions is not None:
pulumi.set(__self__, "compile_expressions", compile_expressions)
if connect_timeout is not None:
pulumi.set(__self__, "connect_timeout", connect_timeout)
if count_distinct_implementation is not None:
pulumi.set(__self__, "count_distinct_implementation", count_distinct_implementation)
if distinct_overflow_mode is not None:
pulumi.set(__self__, "distinct_overflow_mode", distinct_overflow_mode)
if distributed_aggregation_memory_efficient is not None:
pulumi.set(__self__, "distributed_aggregation_memory_efficient", distributed_aggregation_memory_efficient)
if distributed_ddl_task_timeout is not None:
pulumi.set(__self__, "distributed_ddl_task_timeout", distributed_ddl_task_timeout)
if distributed_product_mode is not None:
pulumi.set(__self__, "distributed_product_mode", distributed_product_mode)
if empty_result_for_aggregation_by_empty_set is not None:
pulumi.set(__self__, "empty_result_for_aggregation_by_empty_set", empty_result_for_aggregation_by_empty_set)
if enable_http_compression is not None:
pulumi.set(__self__, "enable_http_compression", enable_http_compression)
if fallback_to_stale_replicas_for_distributed_queries is not None:
pulumi.set(__self__, "fallback_to_stale_replicas_for_distributed_queries", fallback_to_stale_replicas_for_distributed_queries)
if force_index_by_date is not None:
pulumi.set(__self__, "force_index_by_date", force_index_by_date)
if force_primary_key is not None:
pulumi.set(__self__, "force_primary_key", force_primary_key)
if group_by_overflow_mode is not None:
pulumi.set(__self__, "group_by_overflow_mode", group_by_overflow_mode)
if group_by_two_level_threshold is not None:
pulumi.set(__self__, "group_by_two_level_threshold", group_by_two_level_threshold)
if group_by_two_level_threshold_bytes is not None:
pulumi.set(__self__, "group_by_two_level_threshold_bytes", group_by_two_level_threshold_bytes)
if http_connection_timeout is not None:
pulumi.set(__self__, "http_connection_timeout", http_connection_timeout)
if http_headers_progress_interval is not None:
pulumi.set(__self__, "http_headers_progress_interval", http_headers_progress_interval)
if http_receive_timeout is not None:
pulumi.set(__self__, "http_receive_timeout", http_receive_timeout)
if http_send_timeout is not None:
pulumi.set(__self__, "http_send_timeout", http_send_timeout)
if input_format_defaults_for_omitted_fields is not None:
pulumi.set(__self__, "input_format_defaults_for_omitted_fields", input_format_defaults_for_omitted_fields)
if input_format_values_interpret_expressions is not None:
pulumi.set(__self__, "input_format_values_interpret_expressions", input_format_values_interpret_expressions)
if insert_quorum is not None:
pulumi.set(__self__, "insert_quorum", insert_quorum)
if insert_quorum_timeout is not None:
pulumi.set(__self__, "insert_quorum_timeout", insert_quorum_timeout)
if join_overflow_mode is not None:
pulumi.set(__self__, "join_overflow_mode", join_overflow_mode)
if join_use_nulls is not None:
pulumi.set(__self__, "join_use_nulls", join_use_nulls)
if joined_subquery_requires_alias is not None:
pulumi.set(__self__, "joined_subquery_requires_alias", joined_subquery_requires_alias)
if low_cardinality_allow_in_native_format is not None:
pulumi.set(__self__, "low_cardinality_allow_in_native_format", low_cardinality_allow_in_native_format)
if max_ast_depth is not None:
pulumi.set(__self__, "max_ast_depth", max_ast_depth)
if max_ast_elements is not None:
pulumi.set(__self__, "max_ast_elements", max_ast_elements)
if max_block_size is not None:
pulumi.set(__self__, "max_block_size", max_block_size)
if max_bytes_before_external_group_by is not None:
pulumi.set(__self__, "max_bytes_before_external_group_by", max_bytes_before_external_group_by)
if max_bytes_before_external_sort is not None:
pulumi.set(__self__, "max_bytes_before_external_sort", max_bytes_before_external_sort)
if max_bytes_in_distinct is not None:
pulumi.set(__self__, "max_bytes_in_distinct", max_bytes_in_distinct)
if max_bytes_in_join is not None:
pulumi.set(__self__, "max_bytes_in_join", max_bytes_in_join)
if max_bytes_in_set is not None:
pulumi.set(__self__, "max_bytes_in_set", max_bytes_in_set)
if max_bytes_to_read is not None:
pulumi.set(__self__, "max_bytes_to_read", max_bytes_to_read)
if max_bytes_to_sort is not None:
pulumi.set(__self__, "max_bytes_to_sort", max_bytes_to_sort)
if max_bytes_to_transfer is not None:
pulumi.set(__self__, "max_bytes_to_transfer", max_bytes_to_transfer)
if max_columns_to_read is not None:
pulumi.set(__self__, "max_columns_to_read", max_columns_to_read)
if max_execution_time is not None:
pulumi.set(__self__, "max_execution_time", max_execution_time)
if max_expanded_ast_elements is not None:
pulumi.set(__self__, "max_expanded_ast_elements", max_expanded_ast_elements)
if max_insert_block_size is not None:
pulumi.set(__self__, "max_insert_block_size", max_insert_block_size)
if max_memory_usage is not None:
pulumi.set(__self__, "max_memory_usage", max_memory_usage)
if max_memory_usage_for_user is not None:
pulumi.set(__self__, "max_memory_usage_for_user", max_memory_usage_for_user)
if max_network_bandwidth is not None:
pulumi.set(__self__, "max_network_bandwidth", max_network_bandwidth)
if max_network_bandwidth_for_user is not None:
pulumi.set(__self__, "max_network_bandwidth_for_user", max_network_bandwidth_for_user)
if max_query_size is not None:
pulumi.set(__self__, "max_query_size", max_query_size)
if max_replica_delay_for_distributed_queries is not None:
pulumi.set(__self__, "max_replica_delay_for_distributed_queries", max_replica_delay_for_distributed_queries)
if max_result_bytes is not None:
pulumi.set(__self__, "max_result_bytes", max_result_bytes)
if max_result_rows is not None:
pulumi.set(__self__, "max_result_rows", max_result_rows)
if max_rows_in_distinct is not None:
pulumi.set(__self__, "max_rows_in_distinct", max_rows_in_distinct)
if max_rows_in_join is not None:
pulumi.set(__self__, "max_rows_in_join", max_rows_in_join)
if max_rows_in_set is not None:
pulumi.set(__self__, "max_rows_in_set", max_rows_in_set)
if max_rows_to_group_by is not None:
pulumi.set(__self__, "max_rows_to_group_by", max_rows_to_group_by)
if max_rows_to_read is not None:
pulumi.set(__self__, "max_rows_to_read", max_rows_to_read)
if max_rows_to_sort is not None:
pulumi.set(__self__, "max_rows_to_sort", max_rows_to_sort)
if max_rows_to_transfer is not None:
pulumi.set(__self__, "max_rows_to_transfer", max_rows_to_transfer)
if max_temporary_columns is not None:
pulumi.set(__self__, "max_temporary_columns", max_temporary_columns)
if max_temporary_non_const_columns is not None:
pulumi.set(__self__, "max_temporary_non_const_columns", max_temporary_non_const_columns)
if max_threads is not None:
pulumi.set(__self__, "max_threads", max_threads)
if merge_tree_max_bytes_to_use_cache is not None:
pulumi.set(__self__, "merge_tree_max_bytes_to_use_cache", merge_tree_max_bytes_to_use_cache)
if merge_tree_max_rows_to_use_cache is not None:
pulumi.set(__self__, "merge_tree_max_rows_to_use_cache", merge_tree_max_rows_to_use_cache)
if merge_tree_min_bytes_for_concurrent_read is not None:
pulumi.set(__self__, "merge_tree_min_bytes_for_concurrent_read", merge_tree_min_bytes_for_concurrent_read)
if merge_tree_min_rows_for_concurrent_read is not None:
pulumi.set(__self__, "merge_tree_min_rows_for_concurrent_read", merge_tree_min_rows_for_concurrent_read)
if min_bytes_to_use_direct_io is not None:
pulumi.set(__self__, "min_bytes_to_use_direct_io", min_bytes_to_use_direct_io)
if min_count_to_compile is not None:
pulumi.set(__self__, "min_count_to_compile", min_count_to_compile)
if min_count_to_compile_expression is not None:
pulumi.set(__self__, "min_count_to_compile_expression", min_count_to_compile_expression)
if min_execution_speed is not None:
pulumi.set(__self__, "min_execution_speed", min_execution_speed)
if min_execution_speed_bytes is not None:
pulumi.set(__self__, "min_execution_speed_bytes", min_execution_speed_bytes)
if min_insert_block_size_bytes is not None:
pulumi.set(__self__, "min_insert_block_size_bytes", min_insert_block_size_bytes)
if min_insert_block_size_rows is not None:
pulumi.set(__self__, "min_insert_block_size_rows", min_insert_block_size_rows)
if output_format_json_quote64bit_integers is not None:
pulumi.set(__self__, "output_format_json_quote64bit_integers", output_format_json_quote64bit_integers)
if output_format_json_quote_denormals is not None:
pulumi.set(__self__, "output_format_json_quote_denormals", output_format_json_quote_denormals)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if quota_mode is not None:
pulumi.set(__self__, "quota_mode", quota_mode)
if read_overflow_mode is not None:
pulumi.set(__self__, "read_overflow_mode", read_overflow_mode)
if readonly is not None:
pulumi.set(__self__, "readonly", readonly)
if receive_timeout is not None:
pulumi.set(__self__, "receive_timeout", receive_timeout)
if replication_alter_partitions_sync is not None:
pulumi.set(__self__, "replication_alter_partitions_sync", replication_alter_partitions_sync)
if result_overflow_mode is not None:
pulumi.set(__self__, "result_overflow_mode", result_overflow_mode)
if select_sequential_consistency is not None:
pulumi.set(__self__, "select_sequential_consistency", select_sequential_consistency)
if send_progress_in_http_headers is not None:
pulumi.set(__self__, "send_progress_in_http_headers", send_progress_in_http_headers)
if send_timeout is not None:
pulumi.set(__self__, "send_timeout", send_timeout)
if set_overflow_mode is not None:
pulumi.set(__self__, "set_overflow_mode", set_overflow_mode)
if skip_unavailable_shards is not None:
pulumi.set(__self__, "skip_unavailable_shards", skip_unavailable_shards)
if sort_overflow_mode is not None:
pulumi.set(__self__, "sort_overflow_mode", sort_overflow_mode)
if timeout_overflow_mode is not None:
pulumi.set(__self__, "timeout_overflow_mode", timeout_overflow_mode)
if transfer_overflow_mode is not None:
pulumi.set(__self__, "transfer_overflow_mode", transfer_overflow_mode)
if transform_null_in is not None:
pulumi.set(__self__, "transform_null_in", transform_null_in)
if use_uncompressed_cache is not None:
pulumi.set(__self__, "use_uncompressed_cache", use_uncompressed_cache)
@property
@pulumi.getter(name="addHttpCorsHeader")
def add_http_cors_header(self) -> Optional[pulumi.Input[bool]]:
"""
Include CORS headers in HTTP responces.
"""
return pulumi.get(self, "add_http_cors_header")
@add_http_cors_header.setter
def add_http_cors_header(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "add_http_cors_header", value)
@property
@pulumi.getter(name="allowDdl")
def allow_ddl(self) -> Optional[pulumi.Input[bool]]:
"""
Allows or denies DDL queries.
"""
return pulumi.get(self, "allow_ddl")
@allow_ddl.setter
def allow_ddl(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_ddl", value)
@property
@pulumi.getter
def compile(self) -> Optional[pulumi.Input[bool]]:
"""
Enable compilation of queries.
"""
return pulumi.get(self, "compile")
@compile.setter
def compile(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "compile", value)
@property
@pulumi.getter(name="compileExpressions")
def compile_expressions(self) -> Optional[pulumi.Input[bool]]:
"""
Turn on expression compilation.
"""
| |
# -*- coding: utf-8 -*-
import copy
from typing import Union
import numpy as np
def temperature(
t: np.array,
fire_load_density_MJm2: float,
fire_hrr_density_MWm2: float,
room_length_m: float,
room_width_m: float,
fire_spread_rate_ms: float,
beam_location_height_m: float,
beam_location_length_m: Union[float, list, np.ndarray],
fire_nft_limit_c: float,
*_,
**__,
):
"""
This function calculates and returns a temperature array representing travelling temperature. This function is NOT in SI.
:param t: in s, is the time array
:param fire_load_density_MJm2: in MJ/m2, is the fuel density on the floor
:param fire_hrr_density_MWm2: in MW/m2, is the heat release rate density
:param room_length_m: in m, is the room length
:param room_width_m: in m, is the room width
:param fire_spread_rate_ms: in m/s, is fire spread speed
:param beam_location_height_m: in m, is the beam lateral distance to fire origin
:param beam_location_length_m: in m, is the beam height above the floor
:param fire_nft_limit_c: in deg.C, is the maximum near field temperature
:param opening_fraction: in -, is the ventilation opening proportion between 0 to 1
:param opening_width_m: in m, is ventilation opening width
:param opening_height_m: in m, is ventilation opening height
:return T_g: in deg.C, is calculated gas temperature
"""
# re-assign variable names for equation readability
q_fd = fire_load_density_MJm2
HRRPUA = fire_hrr_density_MWm2
s = fire_spread_rate_ms
h_s = beam_location_height_m
l_s = beam_location_length_m
l = room_length_m
w = room_width_m
if l < w:
l += w
w = l - w
l -= w
# work out ventilation conditions
# a_v = opening_height_m * opening_width_m * opening_fraction
# Qv = 1.75 * a_v * np.sqrt(opening_height_m)
# workout burning time etc.
t_burn = max([q_fd / HRRPUA, 900.0])
t_decay = max([t_burn, l / s])
t_lim = min([t_burn, l / s])
# reduce resolution to fit time step for t_burn, t_decay, t_lim
time_interval_s = t[1] - t[0]
t_decay_ = round(t_decay / time_interval_s, 0) * time_interval_s
t_lim_ = round(t_lim / time_interval_s, 0) * time_interval_s
if t_decay_ == t_lim_:
t_lim_ -= time_interval_s
# workout the heat release rate ARRAY (corrected with time)
Q_growth = (HRRPUA * w * s * t) * (t < t_lim_)
Q_peak = (
min([HRRPUA * w * s * t_burn, HRRPUA * w * l]) * (t >= t_lim_) * (t <= t_decay_)
)
Q_decay = (max(Q_peak) - (t - t_decay_) * w * s * HRRPUA) * (t > t_decay_)
Q_decay[Q_decay < 0] = 0
Q = (Q_growth + Q_peak + Q_decay) * 1000.0
# workout the distance between fire median to the structural element r
l_fire_front = s * t
l_fire_front[l_fire_front < 0] = 0
l_fire_front[l_fire_front > l] = l
l_fire_end = s * (t - t_lim)
l_fire_end[l_fire_end < 0] = 0.0
l_fire_end[l_fire_end > l] = l
l_fire_median = (l_fire_front + l_fire_end) / 2.0
# workout the far field temperature of gas T_g
if isinstance(l_s, float) or isinstance(l_s, int):
r = np.absolute(l_s - l_fire_median)
T_g = np.where((r / h_s) > 0.8, (5.38 * np.power(Q / r, 2 / 3) / h_s) + 20.0, 0)
T_g = np.where(
(r / h_s) <= 0.8,
(16.9 * np.power(Q, 2 / 3) / np.power(h_s, 5 / 3)) + 20.0,
T_g,
)
T_g[T_g >= fire_nft_limit_c] = fire_nft_limit_c
return T_g
elif isinstance(l_s, np.ndarray) or isinstance(l_s, list):
l_s_list = copy.copy(l_s)
T_g_list = list()
for l_s in l_s_list:
r = np.absolute(l_s - l_fire_median)
T_g = np.where(
(r / h_s) > 0.8, (5.38 * np.power(Q / r, 2 / 3) / h_s) + 20.0, 0
)
T_g = np.where(
(r / h_s) <= 0.8,
(16.9 * np.power(Q, 2 / 3) / np.power(h_s, 5 / 3)) + 20.0,
T_g,
)
T_g[T_g >= fire_nft_limit_c] = fire_nft_limit_c
T_g_list.append(T_g)
return T_g_list
else:
raise TypeError('Unknown type of parameter "l_s": {}'.format(type(l_s)))
def temperature_si(
t: np.ndarray,
T_0: float,
q_fd: float,
hrrpua: float,
l: float,
w: float,
s: float,
e_h: float,
e_l: float,
T_max: float = 1323.15,
):
"""
This is an SI and improved version of the original `temperature` method.
:param t: ndarray, [s] An array representing time incorporating 'temperature'.
:param T_0: float, [K] ,Initial temperature.
:param q_fd: float, [J/m2], Fire load density.
:param hrrpua: float, [W/m2], Heat release rate density.
:param l: float, [m], Compartment length.
:param w: float, [m], Compartment width.
:param s: float, [m/s], Fire spread speed.
:param e_h: float, [m], Vertical distance between element to fuel bed.
:param e_l: float, [m], Horizontal distance between element to fire front.
:return temperature: [K] An array representing temperature incorporating 'time'.
"""
# UNIT CONVERSION TO FIT EQUATIONS
T_0 -= 273.15
q_fd /= 1e6
hrrpua /= 1e6
T_max -= 273.15
# workout time step
time_step = t[1] - t[0]
# workout burning time etc.
t_burn = max([q_fd / hrrpua, 900.0])
t_decay = max([t_burn, l / s])
t_lim = min([t_burn, l / s])
# reduce resolution to fit time step for t_burn, t_decay, t_lim
t_decay_ = round(t_decay / time_step, 0) * time_step
t_lim_ = round(t_lim / time_step, 0) * time_step
if t_decay_ == t_lim_:
t_lim_ -= time_step
# workout the heat release rate ARRAY (corrected with time)
Q_growth = (hrrpua * w * s * t) * (t < t_lim_)
Q_peak = min([hrrpua * w * s * t_burn, hrrpua * w * l]) * (t >= t_lim_) * (t <= t_decay_)
Q_decay = (max(Q_peak) - (t - t_decay_) * w * s * hrrpua) * (t > t_decay_)
Q_decay[Q_decay < 0] = 0
Q = (Q_growth + Q_peak + Q_decay) * 1000.0
# workout the distance between fire_curve median to the structural element r
l_fire_front = s * t
l_fire_front[l_fire_front < 0] = 0.0
l_fire_front[l_fire_front > l] = l
l_fire_end = s * (t - t_lim)
l_fire_end[l_fire_end < 0] = 0.0
l_fire_end[l_fire_end > l] = l
l_fire_median = (l_fire_front + l_fire_end) / 2.0
r = np.absolute(e_l - l_fire_median)
r[r == 0] = 0.001 # will cause crash if r = 0
# workout the far field temperature of gas T_g
T_g1 = (5.38 * np.power(Q / r, 2 / 3) / e_h) * ((r / e_h) > 0.18)
T_g2 = (16.9 * np.power(Q, 2 / 3) / np.power(e_h, 5 / 3)) * ((r / e_h) <= 0.18)
T_g = T_g1 + T_g2 + T_0
T_g[T_g >= T_max] = T_max
# UNIT CONVERSION TO FIT OUTPUT (SI)
T_g = T_g + 273.15 # C -> K
Q *= 10e6 # MJ -> J
return T_g
def _test_fire():
time = np.arange(0, 210 * 60, 30)
list_l = [25, 50, 100, 150]
import matplotlib.pyplot as plt
plt.style.use("seaborn-paper")
fig, ax = plt.subplots(figsize=(3.94, 2.76))
ax.set_xlabel("Time [minute]")
ax.set_ylabel(u"Temperature [$℃$]")
for length in list_l:
temperature_ = temperature(
t=time,
fire_load_density_MJm2=600,
fire_hrr_density_MWm2=0.25,
room_length_m=length,
room_width_m=16,
fire_spread_rate_ms=0.012,
beam_location_height_m=3,
beam_location_length_m=length / 2,
fire_nft_limit_c=1050,
)
ax.plot(time / 60, temperature_, label="Room length {:4.0f} m".format(length))
ax.legend(loc=4).set_visible(True)
ax.set_xlim((-10, 190))
ax.grid(color="k", linestyle="--")
plt.tight_layout()
plt.show()
def _test_fire_backup():
import numpy as np
time = np.arange(0, 22080, 30)
list_l = [50, 100, 150]
import matplotlib.pyplot as plt
plt.style.use("seaborn-paper")
fig, ax = plt.subplots(figsize=(3.94, 2.76))
ax.set_xlabel("Time [minute]")
ax.set_ylabel("Temperature [$℃$]")
for l in list_l:
temperature_0 = temperature_si(
t=time,
T_0=293.15,
q_fd=900e6,
hrrpua=0.15e6,
l=l,
w=17.4,
s=0.012,
e_h=3.5,
e_l=l / 2,
)
temperature_1 = temperature(
t=time,
fire_load_density_MJm2=900,
fire_hrr_density_MWm2=0.15,
room_length_m=l,
room_width_m=17.4,
fire_spread_rate_ms=0.012,
beam_location_height_m=3.5,
beam_location_length_m=l / 2,
fire_nft_limit_c=1323.15 - 273.15
)
ax.plot(time / 60, temperature_0 - 273.15)
ax.plot(time / 60, temperature_1, ls=':', c='r')
assert np.allclose(temperature_0 - 273.15, temperature_1)
ax.legend().set_visible(True)
ax.set_xlim((0, 180))
ax.grid(color="grey", linestyle="--", linewidth=0.5)
plt.tight_layout()
plt.show()
def _test_fire_multiple_beam_location():
time = np.arange(0, 210 * 60, 30)
length = 100
import matplotlib.pyplot as plt
plt.style.use("seaborn-paper")
fig, ax = plt.subplots(figsize=(3.94, 2.76))
ax.set_xlabel("Time [minute]")
ax.set_ylabel("Temperature [$℃$]")
temperature_list = temperature(
t=time,
fire_load_density_MJm2=600,
fire_hrr_density_MWm2=0.25,
room_length_m=length,
room_width_m=16,
fire_spread_rate_ms=0.012,
beam_location_height_m=3,
beam_location_length_m=np.linspace(0, length, 12)[1:-1],
fire_nft_limit_c=1050,
)
for temperature_ in temperature_list:
ax.plot(time / 60, temperature_, label="Room length {:4.0f} m".format(length))
ax.legend(loc=4).set_visible(True)
ax.set_xlim((-10, 190))
ax.grid(color="k", linestyle="--")
plt.tight_layout()
# plt.show()
def example():
time = np.arange(0, 210 * 60, 30)
list_l = [25, 50, 100, 150]
import matplotlib.pyplot as plt
plt.style.use("seaborn-paper")
fig, ax = plt.subplots(figsize=(3.94, 2.76))
ax.set_xlabel("Time [minute]")
ax.set_ylabel("Temperature [$℃$]")
for length in list_l:
temperature_ = temperature(
t=time,
fire_load_density_MJm2=600,
fire_hrr_density_MWm2=0.25,
room_length_m=length,
room_width_m=16,
fire_spread_rate_ms=0.012,
beam_location_height_m=3,
beam_location_length_m=length / 2,
fire_nft_limit_c=1050,
)
ax.plot(time / 60, temperature_, label="Room length {:4.0f} | |
"""Database tools."""
import os
from cStringIO import StringIO
from skytools.quoting import quote_copy, quote_literal, quote_ident, quote_fqident
import skytools.installer_config
try:
import plpy
except ImportError:
pass
__all__ = [
"fq_name_parts", "fq_name", "get_table_oid", "get_table_pkeys",
"get_table_columns", "exists_schema", "exists_table", "exists_type",
"exists_sequence", "exists_temp_table",
"exists_function", "exists_language", "Snapshot", "magic_insert",
"CopyPipe", "full_copy", "DBObject", "DBSchema", "DBTable", "DBFunction",
"DBLanguage", "db_install", "installer_find_file", "installer_apply_file",
"dbdict", "mk_insert_sql", "mk_update_sql", "mk_delete_sql",
'QueryBuilder', 'PLPyQueryBuilder',
]
PARAM_INLINE = 0 # quote_literal()
PARAM_DBAPI = 1 # %()s
PARAM_PLPY = 2 # $n
class dbdict(dict):
"""Wrapper on actual dict that allows
accessing dict keys as attributes."""
# obj.foo access
def __getattr__(self, k):
"Return attribute."
return self[k]
def __setattr__(self, k, v):
"Set attribute."
self[k] = v
def __delattr__(self, k):
"Remove attribute"
del self[k]
#
# Fully qualified table name
#
def fq_name_parts(tbl):
"Return fully qualified name parts."
tmp = tbl.split('.', 1)
if len(tmp) == 1:
return ('public', tbl)
elif len(tmp) == 2:
return tmp
else:
raise Exception('Syntax error in table name:'+tbl)
def fq_name(tbl):
"Return fully qualified name."
return '.'.join(fq_name_parts(tbl))
#
# info about table
#
def get_table_oid(curs, table_name):
"""Find Postgres OID for table."""
schema, name = fq_name_parts(table_name)
q = """select c.oid from pg_namespace n, pg_class c
where c.relnamespace = n.oid
and n.nspname = %s and c.relname = %s"""
curs.execute(q, [schema, name])
res = curs.fetchall()
if len(res) == 0:
raise Exception('Table not found: '+table_name)
return res[0][0]
def get_table_pkeys(curs, tbl):
"""Return list of pkey column names."""
oid = get_table_oid(curs, tbl)
q = "SELECT k.attname FROM pg_index i, pg_attribute k"\
" WHERE i.indrelid = %s AND k.attrelid = i.indexrelid"\
" AND i.indisprimary AND k.attnum > 0 AND NOT k.attisdropped"\
" ORDER BY k.attnum"
curs.execute(q, [oid])
return map(lambda x: x[0], curs.fetchall())
def get_table_columns(curs, tbl):
"""Return list of column names for table."""
oid = get_table_oid(curs, tbl)
q = "SELECT k.attname FROM pg_attribute k"\
" WHERE k.attrelid = %s"\
" AND k.attnum > 0 AND NOT k.attisdropped"\
" ORDER BY k.attnum"
curs.execute(q, [oid])
return map(lambda x: x[0], curs.fetchall())
#
# exist checks
#
def exists_schema(curs, schema):
"""Does schema exists?"""
q = "select count(1) from pg_namespace where nspname = %s"
curs.execute(q, [schema])
res = curs.fetchone()
return res[0]
def exists_table(curs, table_name):
"""Does table exists?"""
schema, name = fq_name_parts(table_name)
q = """select count(1) from pg_namespace n, pg_class c
where c.relnamespace = n.oid and c.relkind = 'r'
and n.nspname = %s and c.relname = %s"""
curs.execute(q, [schema, name])
res = curs.fetchone()
return res[0]
def exists_sequence(curs, seq_name):
"""Does sequence exists?"""
schema, name = fq_name_parts(seq_name)
q = """select count(1) from pg_namespace n, pg_class c
where c.relnamespace = n.oid and c.relkind = 'S'
and n.nspname = %s and c.relname = %s"""
curs.execute(q, [schema, name])
res = curs.fetchone()
return res[0]
def exists_type(curs, type_name):
"""Does type exists?"""
schema, name = fq_name_parts(type_name)
q = """select count(1) from pg_namespace n, pg_type t
where t.typnamespace = n.oid
and n.nspname = %s and t.typname = %s"""
curs.execute(q, [schema, name])
res = curs.fetchone()
return res[0]
def exists_function(curs, function_name, nargs):
"""Does function exists?"""
# this does not check arg types, so may match several functions
schema, name = fq_name_parts(function_name)
q = """select count(1) from pg_namespace n, pg_proc p
where p.pronamespace = n.oid and p.pronargs = %s
and n.nspname = %s and p.proname = %s"""
curs.execute(q, [nargs, schema, name])
res = curs.fetchone()
# if unqualified function, check builtin functions too
if not res[0] and function_name.find('.') < 0:
name = "pg_catalog." + function_name
return exists_function(curs, name, nargs)
return res[0]
def exists_language(curs, lang_name):
"""Does PL exists?"""
q = """select count(1) from pg_language
where lanname = %s"""
curs.execute(q, [lang_name])
res = curs.fetchone()
return res[0]
def exists_temp_table(curs, tbl):
"""Does temp table exists?"""
# correct way, works only on 8.2
q = "select 1 from pg_class where relname = %s and relnamespace = pg_my_temp_schema()"
curs.execute(q, [tbl])
tmp = curs.fetchall()
return len(tmp) > 0
#
# Support for PostgreSQL snapshot
#
class Snapshot(object):
"Represents a PostgreSQL snapshot."
def __init__(self, str):
"Create snapshot from string."
self.sn_str = str
tmp = str.split(':')
if len(tmp) != 3:
raise Exception('Unknown format for snapshot')
self.xmin = int(tmp[0])
self.xmax = int(tmp[1])
self.txid_list = []
if tmp[2] != "":
for s in tmp[2].split(','):
self.txid_list.append(int(s))
def contains(self, txid):
"Is txid visible in snapshot."
txid = int(txid)
if txid < self.xmin:
return True
if txid >= self.xmax:
return False
if txid in self.txid_list:
return False
return True
#
# Copy helpers
#
def _gen_dict_copy(tbl, row, fields, qfields):
tmp = []
for f in fields:
v = row.get(f)
tmp.append(quote_copy(v))
return "\t".join(tmp)
def _gen_dict_insert(tbl, row, fields, qfields):
tmp = []
for f in fields:
v = row.get(f)
tmp.append(quote_literal(v))
fmt = "insert into %s (%s) values (%s);"
return fmt % (tbl, ",".join(qfields), ",".join(tmp))
def _gen_list_copy(tbl, row, fields, qfields):
tmp = []
for i in range(len(fields)):
v = row[i]
tmp.append(quote_copy(v))
return "\t".join(tmp)
def _gen_list_insert(tbl, row, fields, qfields):
tmp = []
for i in range(len(fields)):
v = row[i]
tmp.append(quote_literal(v))
fmt = "insert into %s (%s) values (%s);"
return fmt % (tbl, ",".join(qfields), ",".join(tmp))
def magic_insert(curs, tablename, data, fields = None, use_insert = 0):
"""Copy/insert a list of dict/list data to database.
If curs == None, then the copy or insert statements are returned
as string. For list of dict the field list is optional, as its
possible to guess them from dict keys.
"""
if len(data) == 0:
return
# decide how to process
if hasattr(data[0], 'keys'):
if fields == None:
fields = data[0].keys()
if use_insert:
row_func = _gen_dict_insert
else:
row_func = _gen_dict_copy
else:
if fields == None:
raise Exception("Non-dict data needs field list")
if use_insert:
row_func = _gen_list_insert
else:
row_func = _gen_list_copy
qfields = [quote_ident(f) for f in fields]
qtablename = quote_fqident(tablename)
# init processing
buf = StringIO()
if curs == None and use_insert == 0:
fmt = "COPY %s (%s) FROM STDIN;\n"
buf.write(fmt % (qtablename, ",".join(qfields)))
# process data
for row in data:
buf.write(row_func(qtablename, row, fields, qfields))
buf.write("\n")
# if user needs only string, return it
if curs == None:
if use_insert == 0:
buf.write("\\.\n")
return buf.getvalue()
# do the actual copy/inserts
if use_insert:
curs.execute(buf.getvalue())
else:
buf.seek(0)
hdr = "%s (%s)" % (qtablename, ",".join(qfields))
curs.copy_from(buf, hdr)
#
# Full COPY of table from one db to another
#
class CopyPipe(object):
"Splits one big COPY to chunks."
def __init__(self, dstcurs, tablename = None, limit = 512*1024, cancel_func=None, sql_from = None):
self.tablename = tablename
self.sql_from = sql_from
self.dstcurs = dstcurs
self.buf = StringIO()
self.limit = limit
self.cancel_func = None
self.total_rows = 0
self.total_bytes = 0
def write(self, data):
"New data from psycopg"
self.total_bytes += len(data)
self.total_rows += data.count("\n")
if self.buf.tell() >= self.limit:
pos = data.find('\n')
if pos >= 0:
# split at newline
p1 = data[:pos + 1]
p2 = data[pos + 1:]
self.buf.write(p1)
self.flush()
data = p2
self.buf.write(data)
def flush(self):
"Send data out."
if self.cancel_func:
self.cancel_func()
if self.buf.tell() <= 0:
return
self.buf.seek(0)
if self.sql_from:
self.dstcurs.copy_expert(self.sql_from, self.buf)
else:
self.dstcurs.copy_from(self.buf, self.tablename)
self.buf.seek(0)
self.buf.truncate()
def full_copy(tablename, src_curs, dst_curs, column_list = []):
"""COPY table from one db to another."""
qtable = quote_fqident(tablename)
if column_list:
qfields = [quote_ident(f) for f in column_list]
hdr = "%s (%s)" % (qtable, ",".join(qfields))
else:
hdr = qtable
if hasattr(src_curs, 'copy_expert'):
sql_to = "COPY %s TO stdout" % hdr
sql_from = "COPY %s FROM stdout" % hdr
buf = CopyPipe(dst_curs, sql_from = sql_from)
src_curs.copy_expert(sql_to, buf)
else:
buf = CopyPipe(dst_curs, hdr)
src_curs.copy_to(buf, hdr)
buf.flush()
return (buf.total_bytes, buf.total_rows)
#
# SQL installer
#
class DBObject(object):
"""Base class for installable DB objects."""
name = None
sql = None
sql_file = None
def __init__(self, name, sql = None, sql_file = None):
"""Generic dbobject init."""
self.name = name
self.sql = sql
self.sql_file = sql_file
def create(self, curs, log = None):
"""Create a dbobject."""
if log:
log.info('Installing %s' % self.name)
if self.sql:
sql = self.sql
elif self.sql_file:
fn = self.find_file()
if log:
log.info(" Reading from %s" % fn)
sql = open(fn, "r").read()
else:
raise Exception('object not defined')
for stmt in skytools.parse_statements(sql):
#if log: log.debug(repr(stmt))
curs.execute(stmt)
def find_file(self):
"""Find install script file."""
full_fn = None
if self.sql_file[0] == "/":
full_fn = self.sql_file
else:
dir_list = skytools.installer_config.sql_locations
for fdir in dir_list:
fn = os.path.join(fdir, self.sql_file)
if os.path.isfile(fn):
full_fn = fn
break
if not full_fn:
raise Exception('File not found: '+self.sql_file)
return full_fn
class DBSchema(DBObject):
"""Handles db schema."""
def exists(self, curs):
"""Does schema exists."""
return exists_schema(curs, self.name)
class DBTable(DBObject):
"""Handles db table."""
def exists(self, curs):
"""Does table | |
<filename>code/feature_encoding.py<gh_stars>0
import numpy as np
from Bio import SeqIO
import math
import pandas as pd
import csv
from keras.models import load_model
from collections import Counter
import re, os, sys
import itertools
def extract_DNAfeatures(data, c_m, nc_m, Tc_pos1, Tc_neg1, Tc_pos2, Tc_neg2, Tc_pos3, Tc_neg3, fea1_1):
sequences = data
#DNA
fea1 = np.array(ORF_length(sequences))
fea2 = np.array(Hexamer_score(sequences, c_m, nc_m, k=6))
fea3 = np.array(i_framed_3mer_1(sequences)[0])
fea4 = np.array(i_framed_3mer_2(sequences))
fea5 = np.array(i_framed_3mer_3(sequences))
fea6 = np.array(i_framed_CKSNAP_1(sequences))
fea7 = np.array(i_framed_CKSNAP_2(sequences))
fea8 = np.array(i_framed_CKSNAP_3(sequences))
fea9 = np.array(i_framed_TDE_1(sequences, Tc_pos1, Tc_neg1))
fea10 = np.array(i_framed_TDE_2(sequences, Tc_pos2, Tc_neg2))
fea11 = np.array(i_framed_TDE_3(sequences, Tc_pos3, Tc_neg3))
feature_vector_dna = np.concatenate((fea1, fea1_1[:, 1:], fea2[:, 1:], fea3[:, 1:], fea4[:, 1:], fea5[:, 1:], fea6[:, 1:], fea7[:, 1:], fea8[:, 1:], fea9[:, 1:], fea10[:, 1:], fea11[:, 1:]), axis=1)
return feature_vector_dna
def extract_Proteinfeatures(data):
sequences = data
#Protein
fea1 = np.array(AAC(sequences))
fea2 = np.array(DPC(sequences))
fea3 = np.array(CTDC(sequences))
fea4 = np.array(CTDD(sequences))
fea5 = np.array(CTDT(sequences))
feature_vector_protein = np.concatenate((fea1[:, 1:], fea2[:, 1:], fea3[:, 1:], fea4[:, 1:], fea5[:, 1:]), axis=1)
return feature_vector_protein
###__________________________ORF length_________________________
def ORF_length(fastas):
ORFlength_encoding = []
header = ['#'] + ['ORFlength']
ORFlength_encoding.append(header)
for i in fastas:
name, seq = i[0], re.sub('-', '', i[1])
code = [name]
l = math.log10(len(seq))
code.append(l)
ORFlength_encoding.append(code)
return ORFlength_encoding
###__________________________hcds ratio ORF length_________________________
def ratio_ORFlength_hcds(fastas):
diff_ORFlength_encoding = []
header = ['#'] + ['diff_ORFlength']
diff_ORFlength_encoding.append(header)
for i in fastas:
name, seq = i[0], re.sub('-', '', i[1])
code = [name]
l = (len(seq)) / 162.6482 ##note
code.append(l)
diff_ORFlength_encoding.append(code)
return diff_ORFlength_encoding
###__________________________hnoncds ratio ORF length_________________________
def ratio_ORFlength_hnoncds(fastas):
diff_ORFlength_encoding = []
header = ['#'] + ['diff_ORFlength']
diff_ORFlength_encoding.append(header)
for i in fastas:
name, seq = i[0], re.sub('-', '', i[1])
code = [name]
l = (len(seq)) / 143.5887 ##note
code.append(l)
diff_ORFlength_encoding.append(code)
return diff_ORFlength_encoding
###__________________________mcds ratio ORF length_________________________
def ratio_ORFlength_mcds(fastas):
diff_ORFlength_encoding = []
header = ['#'] + ['diff_ORFlength']
diff_ORFlength_encoding.append(header)
for i in fastas:
name, seq = i[0], re.sub('-', '', i[1])
code = [name]
l = (len(seq)) / 161.6805 ##note
code.append(l)
diff_ORFlength_encoding.append(code)
return diff_ORFlength_encoding
###__________________________mnoncds ratio ORF length_________________________
def ratio_ORFlength_mnoncds(fastas):
diff_ORFlength_encoding = []
header = ['#'] + ['diff_ORFlength']
diff_ORFlength_encoding.append(header)
for i in fastas:
name, seq = i[0], re.sub('-', '', i[1])
code = [name]
l = (len(seq)) / 151.4276 ##note
code.append(l)
diff_ORFlength_encoding.append(code)
return diff_ORFlength_encoding
###__________________________fcds ratio ORF length_________________________
def ratio_ORFlength_fcds(fastas):
diff_ORFlength_encoding = []
header = ['#'] + ['diff_ORFlength']
diff_ORFlength_encoding.append(header)
for i in fastas:
name, seq = i[0], re.sub('-', '', i[1])
code = [name]
l = (len(seq)) / 165.3607 ##note
code.append(l)
diff_ORFlength_encoding.append(code)
return diff_ORFlength_encoding
###__________________________fnoncds ratio ORF length_________________________
def ratio_ORFlength_fnoncds(fastas):
diff_ORFlength_encoding = []
header = ['#'] + ['diff_ORFlength']
diff_ORFlength_encoding.append(header)
for i in fastas:
name, seq = i[0], re.sub('-', '', i[1])
code = [name]
l = (len(seq)) / 98.1887 ##note
code.append(l)
diff_ORFlength_encoding.append(code)
return diff_ORFlength_encoding
###_____________________Hexamer score_____________________________
def Hexamer_score(fastas, c_m, nc_m, k=6):
hexamer_score_encoding = []
header = ['#', 'hexamer_score']
hexamer_score_encoding.append(header)
ntarr = 'ACGT'
hexnuc = [nt1 + nt2 + nt3 + nt4 + nt5 + nt6 for nt1 in ntarr for nt2 in ntarr for nt3 in ntarr for nt4 in ntarr for nt5 in ntarr for nt6 in ntarr]
for i in fastas:
name, seq = i[0], re.sub('-', '', i[1])
print(seq)
print(len(seq))
code = [name]
if len(seq) > 5:
l = len(seq) - k + 1
log_r = np.zeros((l-6))
for j in range(3, l-3):
tempseq = seq[j: j + k]
idx = hexnuc.index(tempseq)
Fc = c_m[idx].values
Fnc = nc_m[idx].values
if Fc == 0 and Fnc == 0:
log_r[j-3] = 0
elif Fc == 0 and Fnc != 0:
log_r[j-3] = -1
elif Fc != 0 and Fnc == 0:
log_r[j-3] = 1
else:
log_r[j-3] = math.log(Fc / Fnc)
miu = sum(log_r) / (l-6)
code.append(miu)
else:
code.append(0)
hexamer_score_encoding.append(code)
return hexamer_score_encoding
###___________________________i-framed-kmer_______________________________________
def i_framed_3mer_1(fastas):
NA = 'ACGT'
NApairs = [na1 + na2 + na3 for na1 in NA for na2 in NA for na3 in NA]
i_framed_3mer_1_encoding = []
header = ['#']
for na in NApairs:
header.append('1_framed_' + na)
i_framed_3mer_1_encoding.append(header)
for i in fastas:
name, seq = i[0], re.sub('-', '', i[1])
l = len(seq)
code = [name]
kmerArr = []
for j in range(3, l - 3 - 2, 3):
kmerArr.append(seq[j:j + 3])
count = Counter()
count.update(kmerArr)
for key in count:
count[key] = count[key] / len(kmerArr)
for g in range(len(NApairs)):
if NApairs[g] in count:
code.append(count[NApairs[g]])
else:
code.append(0)
i_framed_3mer_1_encoding.append(code)
#delet stop codon column
index_to_delet = [49, 51, 57]
i_framed_3mer_1_fea = []
for i in i_framed_3mer_1_encoding:
fea = [i[j] for j in range(len(i)) if j not in index_to_delet]
i_framed_3mer_1_fea.append(fea)
return i_framed_3mer_1_fea, i_framed_3mer_1_encoding
def i_framed_3mer_2(fastas):
NA = 'ACGT'
NApairs = [na1 + na2 + na3 for na1 in NA for na2 in NA for na3 in NA]
i_framed_3mer_2_encoding = []
header = ['#']
for na in NApairs:
header.append('2_framed_' + na)
i_framed_3mer_2_encoding.append(header)
for i in fastas:
name, seq = i[0], re.sub('-', '', i[1])
l = len(seq)
code = [name]
kmerArr = []
for j in range(4, l - 3 - 4, 3):
kmerArr.append(seq[j:j + 3])
count = Counter()
count.update(kmerArr)
for key in count:
count[key] = count[key] / len(kmerArr)
for g in range(len(NApairs)):
if NApairs[g] in count:
code.append(count[NApairs[g]])
else:
code.append(0)
i_framed_3mer_2_encoding.append(code)
return i_framed_3mer_2_encoding
def i_framed_3mer_3(fastas):
NA = 'ACGT'
NApairs = [na1 + na2 + na3 for na1 in NA for na2 in NA for na3 in NA]
i_framed_3mer_3_encoding = []
header = ['#']
for na in NApairs:
header.append('3_framed_' + na)
i_framed_3mer_3_encoding.append(header)
for i in fastas:
name, seq = i[0], re.sub('-', '', i[1])
l = len(seq)
code = [name]
kmerArr = []
for j in range(5, l - 3 - 3, 3):
kmerArr.append(seq[j:j + 3])
count = Counter()
count.update(kmerArr)
for key in count:
count[key] = count[key] / len(kmerArr)
for g in range(len(NApairs)):
if NApairs[g] in count:
code.append(count[NApairs[g]])
else:
code.append(0)
i_framed_3mer_3_encoding.append(code)
return i_framed_3mer_3_encoding
###_________________________i_framed_CKSNAP_____________________________
def i_framed_CKSNAP_1(fastas, gap=1):
i_framed_cksnap_1 = []
AA = 'ACGT'
AApairs = [aa1 + aa2 for aa1 in AA for aa2 in AA]
header = ['#']
for g in range(gap, gap+1):
for aa in AApairs:
header.append('1_framed_' + aa + '.gap' + str(g))
i_framed_cksnap_1.append(header)
for i in fastas:
name, seq = i[0], i[1]
l = len(seq)
code = [name]
for g in range(gap, gap+1):
myDict = {}
for pair in AApairs:
myDict[pair] = 0
sum = 0
for index1 in range(3, l-3-g-1, 3):
index2 = index1 + g + 1
if index1 < l and index2 < l and seq[index1] in AA and seq[index2] in AA:
myDict[seq[index1] + seq[index2]] = myDict[seq[index1] + seq[index2]] + 1
sum = sum + 1
for pair in AApairs:
code.append(myDict[pair] / sum)
i_framed_cksnap_1.append(code)
return i_framed_cksnap_1
def i_framed_CKSNAP_2(fastas, gap=1):
i_framed_cksnap_2 = []
AA = 'ACGT'
AApairs = [aa1 + aa2 for aa1 in AA for aa2 in AA]
header = ['#']
for g in range(gap, gap+1):
for aa in AApairs:
header.append('2_framed_' + aa + '.gap' + str(g))
i_framed_cksnap_2.append(header)
for i in fastas:
name, seq = i[0], i[1]
l = len(seq)
code = [name]
for g in range(gap, gap+1):
myDict = {}
for pair in AApairs:
myDict[pair] = 0
sum = 0
for index1 in range(4, l-3-g-3, 3):
index2 = index1 + g + 1
if index1 < l and index2 < l and seq[index1] in AA and seq[index2] in AA:
myDict[seq[index1] + seq[index2]] = myDict[seq[index1] + seq[index2]] + 1
sum = sum + 1
for pair in AApairs:
code.append(myDict[pair] / sum)
i_framed_cksnap_2.append(code)
return i_framed_cksnap_2
def i_framed_CKSNAP_3(fastas, gap=1):
i_framed_cksnap_3 = []
AA = 'ACGT'
AApairs = [aa1 + aa2 for aa1 in AA for aa2 in AA]
header = ['#']
for g in range(gap, gap+1):
for aa in AApairs:
header.append('3_framed_' + aa + '.gap' + str(g))
i_framed_cksnap_3.append(header)
for i in fastas:
name, seq = i[0], i[1]
l = len(seq)
code = [name]
for g in range(gap, gap+1):
myDict = {}
for pair in AApairs:
myDict[pair] = 0
sum = 0
for index1 in range(5, l-3-g-2, 3):
index2 = index1 + g + 1
if index1 < l and index2 < l and seq[index1] in AA and seq[index2] in AA:
myDict[seq[index1] + seq[index2]] = myDict[seq[index1] + seq[index2]] + 1
sum = sum + 1
for pair in AApairs:
code.append(myDict[pair] / sum)
i_framed_cksnap_3.append(code)
return i_framed_cksnap_3
# ###____________________________1-framed-TDE_____________________________
def i_framed_TDE_1(fastas, Tc_pos1, Tc_neg1):
Tc = np.vstack((Tc_pos1, Tc_neg1))
Tm = sum(Tc) / len(Tc)
Tc_test = np.array(i_framed_3mer_1(fastas)[1])
Tc_test = Tc_test[:, 1:]
AA = 'ACGT'
i_framed_TDE_1_encoding = []
header = ['#']
AApairs | |
be calculated
accuracy Accuracy of the autocorrelation function, typically 16
split Number of seconds of each chunk to split the data into
E.g. split=10 will divide a 60 second stream in 6 ten-second
traces and calculate G for each individual trace
========== ===============================================================
Output Meaning
---------- ---------------------------------------------------------------
G Object with all autocorrelations
E.g. G.central contains the array with the central detector
element autocorrelation
data Last chunk of raw data
========== ===============================================================
"""
info = getFCSinfo(fname[:-4] + "_info.txt")
dwellTime = info.dwellTime
duration = info.duration
N = np.int(np.floor(duration / split)) # number of chunks
G = correlations()
G.dwellTime = dwellTime
chunkSize = int(np.floor(split / dwellTime))
for chunk in range(N):
# --------------------- CALCULATE CORRELATIONS SINGLE CHUNK ---------------------
print("+-----------------------")
print("| Loading chunk " + str(chunk))
print("+-----------------------")
data = file_to_FCScount(fname, np.uint8, chunkSize, chunk*chunkSize)
for j in listOfG:
print(' --> ' + str(j) + ": ", end = '')
# ------------------ CHUNK ------------------
newList = [j]
Gsplit = FCS2Corr(data, 1e6*dwellTime, newList, accuracy)
GsplitList = list(Gsplit.__dict__.keys())
for k in GsplitList:
if k.find('dwellTime') == -1:
setattr(G, k + '_chunk' + str(chunk), getattr(Gsplit, k))
# ---------- CALCULATE AVERAGE CORRELATION OF ALL CHUNKS ----------
print("Calculating average correlations")
# Get list of "root" names, i.e. without "_chunk"
Gfields = list(G.__dict__.keys())
t = [Gfields[i].split("_chunk")[0] for i in range(len(Gfields))]
t = list(dict.fromkeys(t))
t.remove("dwellTime")
# average over chunks
for field in t:
avList = [i for i in Gfields if i.startswith(field + '_chunk')]
# check if all elements have same dimension
Ntau = [len(getattr(G, i)) for i in avList]
avList2 = [avList[i] for i in range(len(avList)) if Ntau[i] == Ntau[0]]
Gav = sum(getattr(G, i) for i in avList2) / len(avList2)
setattr(G, str(field) + '_average', Gav)
# average over same shifts in case of 'crossAll'
if 'crossAll' in listOfG:
print("Calculating spatially averaged correlations.")
spatialCorr = np.zeros([9, 9, len(G.det0x0_average)])
for shifty in np.arange(-4, 5):
for shiftx in np.arange(-4, 5):
avList = SPADshiftvectorCrossCorr([shifty, shiftx])
avList = [s + '_average' for s in avList]
Gav = sum(getattr(G, i) for i in avList) / len(avList)
spatialCorr[shifty+4, shiftx+4, :] = Gav[:,1]
G.spatialCorr = spatialCorr
return G, data
def FCSSpatialCorrAv(G, N=5):
spatialCorr = np.zeros([2*N-1, 2*N-1, len(G.det0x0_average)])
for shifty in np.arange(-(N-1), N):
for shiftx in np.arange(-(N-1), N):
avList = SPADshiftvectorCrossCorr([shifty, shiftx], N)
avList = [s + '_average' for s in avList]
Gav = sum(getattr(G, i) for i in avList) / len(avList)
spatialCorr[shifty+N-1, shiftx+N-1, :] = Gav[:,1]
G.spatialCorr = spatialCorr
return G
def FCSCrossCenterAv(G):
"""
Average pair-correlations between central pixel and other pixels that are
located at the same distance from the center
===========================================================================
Input Meaning
---------- ---------------------------------------------------------------
G Correlations object that (at least) contains all
cross-correlations between central pixel and all other pixels:
G.det12x12_average, G.det12x13_average, etc.
============================================================================
Output Meaning
---------- ---------------------------------------------------------------
G Same object as input but with the additional field
G.crossCenterAv, which contains array of 6 columns, containing
averaged cross-correlations between central pixel and pixels
located at a distance of
| 0 | 1 | sqrt(2) | 2 | sqrt(5) | sqrt(8) |
===========================================================================
"""
tau = G.det12x12_average[:,0]
G.crossCenterAv = np.zeros((len(tau), 6))
# autocorrelation central element
G.crossCenterAv[:,0] = G.det12x12_average[:,1]
# average pair-correlations 4 elements located at distance 1 from center
G.crossCenterAv[:,1] = np.mean(np.transpose(np.array([getattr(G, 'det12x' + str(det) + '_average')[:,1] for det in [7, 11, 13, 17]])), 1)
# average pair-correlations 4 elements located at distance sqrt(2) from center
G.crossCenterAv[:,2] = np.mean(np.transpose(np.array([getattr(G, 'det12x' + str(det) + '_average')[:,1] for det in [6, 8, 16, 18]])), 1)
# average pair-correlation 4 elements located at distance 2 from center
G.crossCenterAv[:,3] = np.mean(np.transpose(np.array([getattr(G, 'det12x' + str(det) + '_average')[:,1] for det in [2, 10, 14, 22]])), 1)
# average pair-correlation 8 elements located at distance sqrt(5) from center
G.crossCenterAv[:,4] = np.mean(np.transpose(np.array([getattr(G, 'det12x' + str(det) + '_average')[:,1] for det in [1, 3, 5, 9, 15, 19, 21, 23]])), 1)
# average pair-correlation 4 elements located at distance sqrt(8) from center
G.crossCenterAv[:,5] = np.mean(np.transpose(np.array([getattr(G, 'det12x' + str(det) + '_average')[:,1] for det in [0, 4, 20, 24]])), 1)
return G
def FCSBinToCSVAll(folderName=[], Glist=['central', 'sum3', 'sum5', 'chessboard', 'ullr'], split=10):
# PARSE INPUT
if folderName == []:
folderName = getcwd()
folderName = folderName.replace("\\", "/")
folderName = Path(folderName)
# CHECK BIN FILES
allFiles = listFiles(folderName, 'bin')
# GO THROUGH EACH FILE
for file in allFiles:
fileName = ntpath.basename(file)
print("File found: " + fileName)
[G, data] = FCSLoadAndCorrSplit(file, Glist, 50, split)
corr2csv(G, file[0:-4], [0, 0], 0)
def plotFCScorrelations(G, plotList='all', limits=[0, -1], vector=[], pColors='auto', yscale='lin'):
"""
Plot all correlation curves
========== ===============================================================
Input Meaning
---------- ---------------------------------------------------------------
G Object with all autocorrelations
Possible attributes:
det*,
central, sum3, sum5, allbuthot, chessboard, ullr, av
autoSpatial,
det12x*
dwellTime (is not plotted)
========== ===============================================================
Output Meaning
---------- ---------------------------------------------------------------
figure
========== ===============================================================
"""
spatialCorrList = ['autoSpatial']
start = limits[0]
stop = limits[1]
# plotList contains all attributes of G that have to be plotted
if plotList == 'all':
plotList = list(G.__dict__.keys())
# remove dwellTime from plotList
if 'dwellTime' in plotList:
plotList.remove('dwellTime')
if 'av' in plotList:
# remove all single detector element correlations
plotListRemove = fnmatch.filter(plotList, 'det?')
for elem in plotListRemove:
plotList.remove(elem)
plotListRemove = fnmatch.filter(plotList, 'det??')
for elem in plotListRemove:
plotList.remove(elem)
if np.size(fnmatch.filter(plotList, 'det12x??')) > 10:
# replace all individual cross-correlations by single crossCenter element
plotListRemove = fnmatch.filter(plotList, 'det12x?')
for elem in plotListRemove:
plotList.remove(elem)
plotListRemove = fnmatch.filter(plotList, 'det12x??')
for elem in plotListRemove:
plotList.remove(elem)
plotList.append('crossCenter')
if fnmatch.filter(plotList, '*_'):
plotListStart = plotList[0]
# plot chunks of data and average
plotList = list(G.__dict__.keys())
plotList.remove('dwellTime')
plotList = [i for i in plotList if i.startswith(plotListStart)]
# -------------------- Check for temporal correlations --------------------
plotTempCorr = False
for i in range(np.size(plotList)):
if plotList[i] not in spatialCorrList:
plotTempCorr = True
break
if plotTempCorr:
leg = [] # figure legend
h = plt.figure()
maxy = 0
miny = 0
minx = 25e-9
maxx = 10
pColIndex = 0
for i in plotList:
#if i not in list(G.__dict__.keys()):
# break
if i in ["central", "central_average", "sum3", "sum3_average", "sum5", "sum5_average", "allbuthot", "allbuthot_average", "chessboard", "chessboard_average", "chess3", "chess3_average", "ullr", "ullr_average", "av", "cross12_average", "cross21_average", "cross_average", "auto1_average", "auto2_average"]:
# plot autocorrelation
Gtemp = getattr(G, i)
plt.plot(Gtemp[start:stop, 0], Gtemp[start:stop, 1], color=plotColors(i), linewidth=1.3)
maxy = np.max([maxy, np.max(Gtemp[start+1:stop, 1])])
miny = np.min([miny, np.min(Gtemp[start+1:stop, 1])])
minx = Gtemp[start, 0]
maxx = Gtemp[stop, 0]
leg.append(i)
elif i == 'crossCenter':
for j in range(25):
Gsingle = getattr(G, 'det12x' + str(j))
# plotColor = colorFromMap(distance2detElements(12, j), 0, np.sqrt(8))
plt.plot(Gsingle[start:stop, 0], Gsingle[start:stop, 1], color=plotColors(i))
maxy = np.max([maxy, np.max(Gsingle[start+1:stop, 1])])
miny = np.min([miny, np.min(Gtemp[start+1:stop, 1])])
leg.append(i + str(j))
elif i == 'crossCenterAv':
tau = G.det12x12_average[:,0]
for j in range(6):
plt.plot(tau[start:stop], G.crossCenterAv[start:stop, j], color=plotColors(j))
miny = np.min(G.crossCenterAv[start+10:stop,:])
maxy = np.max(G.crossCenterAv[start+1:stop,:])
leg = ['$\Delta r = 0$', '$\Delta r = 1$', '$\Delta r = \sqrt{2}$', '$\Delta r = 2$', '$\Delta r = \sqrt{5}$', '$\Delta r = 2\sqrt{2}$']
elif i != 'autoSpatial' and i != 'stics' and i != 'crossAll' and i != 'crossVector':
# plot autocorr single detector element
if pColors == 'auto':
plt.plot(getattr(G, i)[start:stop, 0], getattr(G, i)[start:stop, 1])
else:
plt.plot(getattr(G, i)[start:stop, 0], getattr(G, i)[start:stop, 1], color=plotColors(pColors[pColIndex]))
pColIndex += 1
maxy = np.max([maxy, np.max(getattr(G, i)[start+1:stop, 1])])
miny = np.min([miny, np.min(getattr(G, i)[start+1:stop, 1])])
minx = getattr(G, i)[start, 0]
maxx = getattr(G, i)[stop, 0]
if '_average' in i:
iLeg = i[0:-8]
else:
iLeg = i
leg.append(iLeg)
# figure lay-out
plt.xscale('log')
plt.xlabel('Temporal shift [s]')
plt.ylabel('G')
if yscale == 'log':
plt.yscale('log')
else:
plt.yscale('linear')
axes = plt.gca()
axes.set_xlim([minx, maxx])
axes.set_ylim([miny, maxy])
if np.size(leg) > 0 and np.size(leg) < 10 and 'crossCenter' not in plotList:
axes.legend(leg)
plt.rcParams.update({'font.size': 15})
plt.tight_layout()
if 'crossCenter' in plotList:
plotCrossCenterScheme()
# -------------------- Check for spatial correlations --------------------
if 'autoSpatial' in plotList:
Gtemp = G.autoSpatial
Gmax = np.max(Gtemp)
xmax = (np.size(Gtemp, 0)) / 2
extent = [-xmax, xmax, -xmax, xmax]
for j in range(np.size(Gtemp, 2)):
h = plt.figure()
plt.imshow(Gtemp[:, :, j], extent=extent, vmin=0, vmax=Gmax)
plt.title('delta_t = ' + str(G.dwellTime * j) + ' µs')
if 'crossAll' in plotList:
Gtemp = G.spatialCorr
tau = G.det0x0_average[:,0]
for vector in [[4, 4], [3, 4], [3, | |
'86180254':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'86180255':{'en': 'Shantou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6c55\u5934\u5e02')},
'86180251':{'en': 'Dongguan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e1c\u839e\u5e02')},
'86180252':{'en': 'Dongguan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e1c\u839e\u5e02')},
'86180253':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'861811574':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861811575':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861811576':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861811577':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861811570':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861811571':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861811572':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861811573':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861811578':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861811579':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861810735':{'en': 'Ch<NAME>', 'zh': u('\u6e56\u5357\u7701\u90f4\u5dde\u5e02')},
'861810734':{'en': 'Hengyang, Hunan', 'zh': u('\u6e56\u5357\u7701\u8861\u9633\u5e02')},
'861810737':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u76ca\u9633\u5e02')},
'861810736':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u5e38\u5fb7\u5e02')},
'861810731':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'861810730':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u5cb3\u9633\u5e02')},
'861810733':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'861810732':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'861810739':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u90b5\u9633\u5e02')},
'861810738':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u5a04\u5e95\u5e02')},
'861807771':{'en': 'Qinzhou, Guangxi', 'zh': u('\u5e7f\u897f\u94a6\u5dde\u5e02')},
'861807770':{'en': 'Qinzhou, Guangxi', 'zh': u('\u5e7f\u897f\u94a6\u5dde\u5e02')},
'861807773':{'en': 'Qinzhou, Guangxi', 'zh': u('\u5e7f\u897f\u94a6\u5dde\u5e02')},
'861807772':{'en': 'Yulin, Guangxi', 'zh': u('\u5e7f\u897f\u7389\u6797\u5e02')},
'861768319':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u8fbe\u5dde\u5e02')},
'861768318':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5357\u5145\u5e02')},
'861807777':{'en': 'Nanning, Guangxi', 'zh': u('\u5e7f\u897f\u5357\u5b81\u5e02')},
'861807776':{'en': 'Qinzhou, Guangxi', 'zh': u('\u5e7f\u897f\u94a6\u5dde\u5e02')},
'861768315':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5df4\u4e2d\u5e02')},
'861768314':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5fb7\u9633\u5e02')},
'861768317':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5357\u5145\u5e02')},
'861768316':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5357\u5145\u5e02')},
'861768311':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5e7f\u5b89\u5e02')},
'861768310':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5b9c\u5bbe\u5e02')},
'861768313':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u81ea\u8d21\u5e02')},
'861768312':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6500\u679d\u82b1\u5e02')},
'861809997':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u963f\u514b\u82cf\u5730\u533a')},
'861809996':{'en': 'Bayingolin, Xinjiang', 'zh': u('\u65b0\u7586\u5df4\u97f3\u90ed\u695e\u8499\u53e4\u81ea\u6cbb\u5dde')},
'861809197':{'en': 'Tongchuan, Shaanxi', 'zh': u('\u9655\u897f\u7701\u94dc\u5ddd\u5e02')},
'861809995':{'en': 'Turpan, Xinjiang', 'zh': u('\u65b0\u7586\u5410\u9c81\u756a\u5730\u533a')},
'861809994':{'en': 'Changji, Xinjiang', 'zh': u('\u65b0\u7586\u660c\u5409\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861809993':{'en': 'Shihezi, Xinjiang', 'zh': u('\u65b0\u7586\u77f3\u6cb3\u5b50\u5e02')},
'861809992':{'en': 'Ili, Xinjiang', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'861809991':{'en': 'Urumchi, Xinjiang', 'zh': u('\u65b0\u7586\u4e4c\u9c81\u6728\u9f50\u5e02')},
'861809196':{'en': 'Tongchuan, Shaanxi', 'zh': u('\u9655\u897f\u7701\u94dc\u5ddd\u5e02')},
'861809990':{'en': 'Karamay, Xinjiang', 'zh': u('\u65b0\u7586\u514b\u62c9\u739b\u4f9d\u5e02')},
'861808875':{'en': 'Suihua, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u7ee5\u5316\u5e02')},
'861808874':{'en': 'Harbin, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'861808879':{'en': 'Daqing, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u5927\u5e86\u5e02')},
'861813428':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')},
'861811934':{'en': 'Lanzhou, Gansu', 'zh': u('\u7518\u8083\u7701\u5170\u5dde\u5e02')},
'861808878':{'en': '<NAME>', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'861813420':{'en': '<NAME>', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')},
'861813421':{'en': 'Handan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')},
'861813422':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')},
'861813423':{'en': 'Zhangjiakou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5f20\u5bb6\u53e3\u5e02')},
'861813424':{'en': 'Tangshan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')},
'861813425':{'en': 'Tangshan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')},
'861813426':{'en': 'Langfang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5eca\u574a\u5e02')},
'861813427':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')},
'861811468':{'en': 'Changzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861811469':{'en': 'Changzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861811935':{'en': 'Lanzhou, Gansu', 'zh': u('\u7518\u8083\u7701\u5170\u5dde\u5e02')},
'861770899':{'en': 'Lhasa, Tibet', 'zh': u('\u897f\u85cf\u62c9\u8428\u5e02')},
'861770898':{'en': 'Lhasa, Tibet', 'zh': u('\u897f\u85cf\u62c9\u8428\u5e02')},
'861770893':{'en': 'Shannan, Tibet', 'zh': u('\u897f\u85cf\u5c71\u5357\u5730\u533a')},
'861770892':{'en': 'Xigaze, Tibet', 'zh': u('\u897f\u85cf\u65e5\u5580\u5219\u5730\u533a')},
'861770891':{'en': 'Lhasa, Tibet', 'zh': u('\u897f\u85cf\u62c9\u8428\u5e02')},
'861770890':{'en': 'Xigaze, Tibet', 'zh': u('\u897f\u85cf\u65e5\u5580\u5219\u5730\u533a')},
'861770897':{'en': 'Ngari, Tibet', 'zh': u('\u897f\u85cf\u963f\u91cc\u5730\u533a')},
'861770896':{'en': 'Nagqu, Tibet', 'zh': u('\u897f\u85cf\u90a3\u66f2\u5730\u533a')},
'861770895':{'en': 'Qamdo, Tibet', 'zh': u('\u897f\u85cf\u660c\u90fd\u5730\u533a')},
'861770894':{'en': 'Nyingchi, Tibet', 'zh': u('\u897f\u85cf\u6797\u829d\u5730\u533a')},
'861808327':{'en': 'Qianxinan, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u897f\u5357\u5e03\u4f9d\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861808326':{'en': 'Qianxinan, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u897f\u5357\u5e03\u4f9d\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861808325':{'en': 'Qianxinan, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u897f\u5357\u5e03\u4f9d\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861808324':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u94dc\u4ec1\u5730\u533a')},
'861808323':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u94dc\u4ec1\u5730\u533a')},
'861808322':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u94dc\u4ec1\u5730\u533a')},
'861808321':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u94dc\u4ec1\u5730\u533a')},
'861808320':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u94dc\u4ec1\u5730\u533a')},
'861808639':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u5b5d\u611f\u5e02')},
'861808638':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u5b9c\u660c\u5e02')},
'861808329':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u897f\u5357\u5e03\u4f9d\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861808328':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u897f\u5357\u5e03\u4f9d\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861812481':{'en': 'Foshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'861812480':{'en': 'Foshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'861812483':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6c5f\u95e8\u5e02')},
'861812482':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6c5f\u95e8\u5e02')},
'861812485':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u8302\u540d\u5e02')},
'861812484':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u8302\u540d\u5e02')},
'861812487':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4e91\u6d6e\u5e02')},
'861812486':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u8302\u540d\u5e02')},
'861812489':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u97f6\u5173\u5e02')},
'861812488':{'en': 'Yangjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u9633\u6c5f\u5e02')},
'861810337':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')},
'861810336':{'en': 'Langfang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5eca\u574a\u5e02')},
'861810339':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')},
'861810338':{'en': 'Hengshui, Hebei', 'zh': u('\u6cb3\u5317\u7701\u8861\u6c34\u5e02')},
'861810485':{'en': 'Hohhot, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u547c\u548c\u6d69\u7279\u5e02')},
'861809093':{'en': 'Dazhou, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u8fbe\u5dde\u5e02')},
'861810484':{'en': 'Hohhot, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u547c\u548c\u6d69\u7279\u5e02')},
'861810483':{'en': 'Alxa, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u963f\u62c9\u5584\u76df')},
'861810482':{'en': 'Hinggan, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u5174\u5b89\u76df')},
'861810481':{'en': 'Ordos, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u9102\u5c14\u591a\u65af\u5e02')},
'861810480':{'en': 'Hinggan, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u5174\u5b89\u76df')},
'861800356':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u664b\u57ce\u5e02')},
'861800357':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u4e34\u6c7e\u5e02')},
'861800354':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u664b\u4e2d\u5e02')},
'861800355':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u957f\u6cbb\u5e02')},
'861800352':{'en': 'Datong, Shanxi', 'zh': u('\u5c71\u897f\u7701\u5927\u540c\u5e02')},
'861800353':{'en': 'Yangquan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u9633\u6cc9\u5e02')},
'861800350':{'en': 'Xinzhou, Shanxi', 'zh': u('\u5c71\u897f\u7701\u5ffb\u5dde\u5e02')},
'861800351':{'en': 'Taiyuan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')},
'861800358':{'en': u('L\u00fcliang, Shanxi'), 'zh': u('\u5c71\u897f\u7701\u5415\u6881\u5e02')},
'861800359':{'en': 'Yuncheng, Shanxi', 'zh': u('\u5c71\u897f\u7701\u8fd0\u57ce\u5e02')},
'861811909':{'en': 'Ili, Xinjiang', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'861811908':{'en': 'Ili, Xinjiang', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'861811905':{'en': 'Alt<NAME>', 'zh': u('\u65b0\u7586\u963f\u52d2\u6cf0\u5730\u533a')},
'861811904':{'en': 'Altay, Xinjiang', 'zh': u('\u65b0\u7586\u963f\u52d2\u6cf0\u5730\u533a')},
'861811907':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u963f\u514b\u82cf\u5730\u533a')},
'861811906':{'en': 'Altay, Xinjiang', 'zh': u('\u65b0\u7586\u963f\u52d2\u6cf0\u5730\u533a')},
'861811901':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u54c8\u5bc6\u5730\u533a')},
'861811900':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u54c8\u5bc6\u5730\u533a')},
'861811903':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u54c8\u5bc6\u5730\u533a')},
'861811902':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u54c8\u5bc6\u5730\u533a')},
'861773728':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u8bb8\u660c\u5e02')},
'861773729':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u4e09\u95e8\u5ce1\u5e02')},
'861773720':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6d1b\u9633\u5e02')},
'861773721':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')},
'861773722':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6f2f\u6cb3\u5e02')},
'861773723':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6fee\u9633\u5e02')},
'861773724':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5b89\u9633\u5e02')},
'861773725':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5b89\u9633\u5e02')},
'861773726':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5b89\u9633\u5e02')},
'861773727':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u65b0\u4e61\u5e02')},
'861810508':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861810509':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861810506':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861810507':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861810504':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861810505':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861810502':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861810503':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861810500':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861810501':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'86180743':{'en': 'Xiangxi, Hunan', 'zh': u('\u6e56\u5357\u7701\u6e58\u897f\u571f\u5bb6\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'86180741':{'en': 'Taizhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u53f0\u5dde\u5e02')},
'86180746':{'en': 'Yongzhou, Hunan', 'zh': u('\u6e56\u5357\u7701\u6c38\u5dde\u5e02')},
'86180744':{'en': 'Zhangjiajie, Hunan', 'zh': u('\u6e56\u5357\u7701\u5f20\u5bb6\u754c\u5e02')},
'861810953':{'en': 'Wuzhong, Ningxia', 'zh': u('\u5b81\u590f\u5434\u5fe0\u5e02')},
'86180745':{'en': 'Huaihua, Hunan', 'zh': u('\u6e56\u5357\u7701\u6000\u5316\u5e02')},
'861812003':{'en': 'Xuzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861813851':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'861813850':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'861813853':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'861813852':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'861813855':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6f6e\u5dde\u5e02')},
'861813854':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'861813857':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u97f6\u5173\u5e02')},
'861813856':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6c55\u5c3e\u5e02')},
'861813859':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u8087\u5e86\u5e02')},
'861813858':{'en': '<NAME>dong', 'zh': u('\u5e7f\u4e1c\u7701\u8087\u5e86\u5e02')},
'861811862':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861810155':{'en': 'Suzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861810154':{'en': 'Suzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861810157':{'en': 'Suzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861810156':{'en': 'Suzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861810151':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861811863':{'en': 'Nantong, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861810150':{'en': 'Changzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861807188':{'en': 'Jingzhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u8346\u5dde\u5e02')},
'861807189':{'en': 'Jingzhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u8346\u5dde\u5e02')},
'861810153':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861807182':{'en': 'Huanggang, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u5188\u5e02')},
'861807183':{'en': 'Huanggang, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u5188\u5e02')},
'861807180':{'en': 'Huanggang, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u5188\u5e02')},
'861807181':{'en': 'Huanggang, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u5188\u5e02')},
'861807186':{'en': 'Huangshi, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u77f3\u5e02')},
'861807187':{'en': 'Xianning, Hubei', 'zh': u('\u6e56\u5317\u7701\u54b8\u5b81\u5e02')},
'861807184':{'en': 'Huanggang, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u5188\u5e02')},
'861807185':{'en': 'Huangshi, Hubei', 'zh': u('\u6e56\u5317\u7701\u9ec4\u77f3\u5e02')},
'861768126':{'en': 'Anqing, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5b89\u5e86\u5e02')},
'861768127':{'en': 'Anqing, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5b89\u5e86\u5e02')},
'861768124':{'en': 'MaAnshan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u9a6c\u978d\u5c71\u5e02')},
'861768125':{'en': 'MaAnshan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u9a6c\u978d\u5c71\u5e02')},
'861768122':{'en': 'Bengbu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u868c\u57e0\u5e02')},
'861768123':{'en': 'Bengbu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u868c\u57e0\u5e02')},
'861768120':{'en': 'Chizhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6c60\u5dde\u5e02')},
'861768121':{'en': 'Huaibei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6dee\u5317\u5e02')},
'861811860':{'en': 'Nantong, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861768128':{'en': 'Huainan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6dee\u5357\u5e02')},
'861768129':{'en': 'Huangshan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u9ec4\u5c71\u5e02')},
'861812816':{'en': 'Qingyuan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e05\u8fdc\u5e02')},
'861812817':{'en': 'Chaozhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6f6e\u5dde\u5e02')},
'861812814':{'en': 'Zhuhai, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u73e0\u6d77\u5e02')},
'861812815':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6885\u5dde\u5e02')},
'861812812':{'en': 'Zhuhai, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u73e0\u6d77\u5e02')},
'861812813':{'en': 'Zhuhai, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u73e0\u6d77\u5e02')},
'861812810':{'en': 'Shantou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6c55\u5934\u5e02')},
'861812811':{'en': 'Zhuhai, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u73e0\u6d77\u5e02')},
'861812818':{'en': 'Shaoguan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u97f6\u5173\u5e02')},
'861812819':{'en': 'Yangjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u9633\u6c5f\u5e02')},
'861811861':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861800510':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861800511':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861771298':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861771299':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861800514':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861800515':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861800516':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861800517':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861771292':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861800519':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861771290':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861771291':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861771296':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861771297':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861771294':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861771295':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861811866':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861811867':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861800639':{'en': 'Bin<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6ee8\u5dde\u5e02')},
'861800638':{'en': 'Binzhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6ee8\u5dde\u5e02')},
'861800635':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u804a\u57ce\u5e02')},
'861800634':{'en': 'Laiwu, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u83b1\u829c\u5e02')},
'861800637':{'en': 'Binzhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6ee8\u5dde\u5e02')},
'861800636':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'861800631':{'en': 'Weihai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5a01\u6d77\u5e02')},
'861800630':{'en': 'Weihai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5a01\u6d77\u5e02')},
'861800633':{'en': 'Rizhao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u65e5\u7167\u5e02')},
'861800632':{'en': 'Zaozhuang, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u67a3\u5e84\u5e02')},
'861813246':{'en': 'Tangshan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')},
'861813674':{'en': 'Changzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861813677':{'en': 'Suzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861813676':{'en': 'Suzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861813671':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861813670':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861813673':{'en': 'Su<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861813672':{'en': 'Changzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861813679':{'en': 'Z<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861813678':{'en': 'Zhenjiang, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861811864':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861805999':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861811865':{'en': 'Nantong, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861805998':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861802022':{'en': 'Suzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861802023':{'en': 'Suzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861770488':{'en': 'Hohhot, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u547c\u548c\u6d69\u7279\u5e02')},
'861805992':{'en': 'Putian, Fujian', 'zh': u('\u798f\u5efa\u7701\u8386\u7530\u5e02')},
'861805698':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u516d\u5b89\u5e02')},
'861805699':{'en': 'Wuhu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u829c\u6e56\u5e02')},
'861805694':{'en': 'Anqing, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5b89\u5e86\u5e02')},
'861805695':{'en': 'Xuancheng, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5ba3\u57ce\u5e02')},
'861805696':{'en': 'Chuzhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6ec1\u5dde\u5e02')},
'861805697':{'en': 'Fuyang, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')},
'861805690':{'en': 'Anqing, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5b89\u5e86\u5e02')},
'861805691':{'en': 'Anqing, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5b89\u5e86\u5e02')},
'861805692':{'en': 'Anqing, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5b89\u5e86\u5e02')},
'861805693':{'en': 'Anqing, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5b89\u5e86\u5e02')},
'861805044':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861809751':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u963f\u52d2\u6cf0\u5730\u533a')},
'861809750':{'en': 'Altay, Xinjiang', 'zh': u('\u65b0\u7586\u963f\u52d2\u6cf0\u5730\u533a')},
'861803728':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5b89\u9633\u5e02')},
'861803729':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5b89\u9633\u5e02')},
'861809755':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u963f\u52d2\u6cf0\u5730\u533a')},
'861809754':{'en': 'Altay, Xinjiang', 'zh': u('\u65b0\u7586\u963f\u52d2\u6cf0\u5730\u533a')},
'861809757':{'en': 'Hot<NAME>', 'zh': u('\u65b0\u7586\u548c\u7530\u5730\u533a')},
'861809756':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u54c8\u5bc6\u5730\u533a')},
'861803722':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6d1b\u9633\u5e02')},
'861803723':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')},
'861803720':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5b89\u9633\u5e02')},
'861803721':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5b89\u9633\u5e02')},
'861803726':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')},
'861803727':{'en': '<NAME>', | |
and len(x) == 0:
pass
else:
if isinstance(a, np.ndarray):
aa = a[:, np.newaxis]
else:
aa = a
x[ind1] += indexed(dv2, ind2) * aa
assert np.allclose(c1.data, x)
c1.axpy(-a, c2, ind=ind1, x_ind=ind2)
assert len(c1) == len(v1)
assert np.all(almost_equal(c1, v1))
def test_axpy_self(vector_array):
v = vector_array
if hasattr(v, 'data'):
dv = v.data
for ind1, ind2 in valid_inds_of_same_length(v, v):
if v.len_ind(ind1) != v.len_ind_unique(ind1):
with pytest.raises(Exception):
c, = v.copy()
c.axpy(0., c, ind=ind1, x_ind=ind2)
continue
ind1_complement = ind_complement(v, ind1)
c = v.copy()
c.axpy(0., c, ind=ind1, x_ind=ind2)
assert len(c) == len(v)
assert np.all(almost_equal(c, v))
assert np.all(almost_equal(c, v))
np.random.seed(len(v) + 8)
for a in (1., 1.4, np.random.random(v.len_ind(ind1))):
c = v.copy()
c.axpy(a, c, ind=ind1, x_ind=ind2)
assert len(c) == len(v)
assert np.all(almost_equal(c, v, U_ind=ind1_complement, V_ind=ind1_complement))
assert np.all(c.sup_norm(ind1) <= v.sup_norm(ind1) + abs(a) * v.sup_norm(ind2) * (1. + 1e-10))
assert np.all(c.l1_norm(ind1) <= (v.l1_norm(ind1) + abs(a) * v.l1_norm(ind2)) * (1. + 1e-10))
if hasattr(v, 'data'):
x = dv.copy()
if isinstance(ind1, Number):
x[[ind1]] += indexed(dv, ind2) * a
else:
if NUMPY_INDEX_QUIRK and len(x) == 0:
pass
else:
if isinstance(a, np.ndarray):
aa = a[:, np.newaxis]
else:
aa = a
x[ind1] += indexed(dv, ind2) * aa
assert np.allclose(c.data, x)
c.axpy(-a, v, ind=ind1, x_ind=ind2)
assert len(c) == len(v)
assert np.all(almost_equal(c, v))
for ind in valid_inds(v):
if v.len_ind(ind) != v.len_ind_unique(ind):
continue
for x in (1., 23., -4):
c = v.copy()
cc = v.copy()
c.axpy(x, c, ind=ind, x_ind=ind)
cc.scal(1 + x, ind=ind)
assert np.all(almost_equal(c, cc))
def test_pairwise_dot(compatible_vector_array_pair):
v1, v2 = compatible_vector_array_pair
if hasattr(v1, 'data'):
dv1, dv2 = v1.data, v2.data
for ind1, ind2 in valid_inds_of_same_length(v1, v2):
r = v1.pairwise_dot(v2, ind=ind1, o_ind=ind2)
assert isinstance(r, np.ndarray)
assert r.shape == (v1.len_ind(ind1),)
r2 = v2.pairwise_dot(v1, ind=ind2, o_ind=ind1)
assert np.all(r == r2)
assert np.all(r <= v1.l2_norm(ind1) * v2.l2_norm(ind2) * (1. + 1e-10))
if hasattr(v1, 'data'):
assert np.allclose(r, np.sum(indexed(dv1, ind1) * indexed(dv2, ind2), axis=1))
def test_pairwise_dot_self(vector_array):
v = vector_array
if hasattr(v, 'data'):
dv = v.data
for ind1, ind2 in valid_inds_of_same_length(v, v):
r = v.pairwise_dot(v, ind=ind1, o_ind=ind2)
assert isinstance(r, np.ndarray)
assert r.shape == (v.len_ind(ind1),)
r2 = v.pairwise_dot(v, ind=ind2, o_ind=ind1)
assert np.all(r == r2)
assert np.all(r <= v.l2_norm(ind1) * v.l2_norm(ind2) * (1. + 1e-10))
if hasattr(v, 'data'):
assert np.allclose(r, np.sum(indexed(dv, ind1) * indexed(dv, ind2), axis=1))
for ind in valid_inds(v):
r = v.pairwise_dot(v, ind=ind, o_ind=ind)
assert np.allclose(r, v.l2_norm(ind) ** 2)
def test_dot(compatible_vector_array_pair):
v1, v2 = compatible_vector_array_pair
if hasattr(v1, 'data'):
dv1, dv2 = v1.data, v2.data
for ind1, ind2 in chain(valid_inds_of_different_length(v1, v2), valid_inds_of_same_length(v1, v2)):
r = v1.dot(v2, ind=ind1, o_ind=ind2)
assert isinstance(r, np.ndarray)
assert r.shape == (v1.len_ind(ind1), v2.len_ind(ind2))
r2 = v2.dot(v1, ind=ind2, o_ind=ind1)
assert np.all(r == r2.T)
assert np.all(r <= v1.l2_norm(ind1)[:, np.newaxis] * v2.l2_norm(ind2)[np.newaxis, :] * (1. + 1e-10))
if hasattr(v1, 'data'):
assert np.allclose(r, indexed(dv1, ind1).dot(indexed(dv2, ind2).T))
def test_dot_self(vector_array):
v = vector_array
if hasattr(v, 'data'):
dv = v.data
for ind1, ind2 in chain(valid_inds_of_different_length(v, v), valid_inds_of_same_length(v, v)):
r = v.dot(v, ind=ind1, o_ind=ind2)
assert isinstance(r, np.ndarray)
assert r.shape == (v.len_ind(ind1), v.len_ind(ind2))
r2 = v.dot(v, ind=ind2, o_ind=ind1)
assert np.all(r == r2.T)
assert np.all(r <= v.l2_norm(ind1)[:, np.newaxis] * v.l2_norm(ind2)[np.newaxis, :] * (1. + 1e-10))
if hasattr(v, 'data'):
assert np.allclose(r, indexed(dv, ind1).dot(indexed(dv, ind2).T))
for ind in valid_inds(v):
r = v.dot(v, ind=ind, o_ind=ind)
assert np.all(r == r.T)
def test_lincomb_1d(vector_array):
v = vector_array
np.random.seed(len(v) + 42 + v.dim)
for ind in valid_inds(v):
coeffs = np.random.random(v.len_ind(ind))
lc = v.lincomb(coeffs, ind=ind)
assert lc.dim == v.dim
assert lc.subtype == v.subtype
assert len(lc) == 1
lc2 = v.zeros()
ind = list(range(len(v))) if ind is None else [ind] if isinstance(ind, _INDEXTYPES) else ind
for coeff, i in zip(coeffs, ind):
lc2.axpy(coeff, v, x_ind=i)
assert np.all(almost_equal(lc, lc2))
def test_lincomb_2d(vector_array):
v = vector_array
np.random.seed(len(v) + 42 + v.dim)
for ind in valid_inds(v):
for count in (0, 1, 5):
coeffs = np.random.random((count, v.len_ind(ind)))
lc = v.lincomb(coeffs, ind=ind)
assert lc.dim == v.dim
assert lc.subtype == v.subtype
assert len(lc) == count
lc2 = v.empty(reserve=count)
for coeffs_1d in coeffs:
lc2.append(v.lincomb(coeffs_1d, ind=ind))
assert np.all(almost_equal(lc, lc2))
def test_lincomb_wrong_coefficients(vector_array):
v = vector_array
np.random.seed(len(v) + 42 + v.dim)
for ind in valid_inds(v):
coeffs = np.random.random(v.len_ind(ind) + 1)
with pytest.raises(Exception):
v.lincomb(coeffs, ind=ind)
coeffs = np.random.random(v.len_ind(ind)).reshape((1, 1, -1))
with pytest.raises(Exception):
v.lincomb(coeffs, ind=ind)
if v.len_ind(ind) > 0:
coeffs = np.random.random(v.len_ind(ind) - 1)
with pytest.raises(Exception):
v.lincomb(coeffs, ind=ind)
coeffs = np.array([])
with pytest.raises(Exception):
v.lincomb(coeffs, ind=ind)
def test_l1_norm(vector_array):
v = vector_array
if hasattr(v, 'data'):
dv = v.data
for ind in valid_inds(v):
c = v.copy()
norm = c.l1_norm(ind)
assert isinstance(norm, np.ndarray)
assert norm.shape == (v.len_ind(ind),)
assert np.all(norm >= 0)
if v.dim == 0:
assert np.all(norm == 0)
if hasattr(v, 'data'):
assert np.allclose(norm, np.sum(np.abs(indexed(dv, ind)), axis=1))
c.scal(4.)
assert np.allclose(c.l1_norm(ind), norm * 4)
c.scal(-4.)
assert np.allclose(c.l1_norm(ind), norm * 16)
c.scal(0.)
assert np.allclose(c.l1_norm(ind), 0)
def test_l2_norm(vector_array):
v = vector_array
if hasattr(v, 'data'):
dv = v.data
for ind in valid_inds(v):
c = v.copy()
norm = c.l2_norm(ind)
assert isinstance(norm, np.ndarray)
assert norm.shape == (v.len_ind(ind),)
assert np.all(norm >= 0)
if v.dim == 0:
assert np.all(norm == 0)
if hasattr(v, 'data'):
assert np.allclose(norm, np.sqrt(np.sum(np.power(indexed(dv, ind), 2), axis=1)))
c.scal(4.)
assert np.allclose(c.l2_norm(ind), norm * 4)
c.scal(-4.)
assert np.allclose(c.l2_norm(ind), norm * 16)
c.scal(0.)
assert np.allclose(c.l2_norm(ind), 0)
def test_l2_norm2(vector_array):
v = vector_array
if hasattr(v, 'data'):
dv = v.data
for ind in valid_inds(v):
c = v.copy()
norm = c.l2_norm2(ind)
assert isinstance(norm, np.ndarray)
assert norm.shape == (v.len_ind(ind),)
assert np.all(norm >= 0)
if v.dim == 0:
assert np.all(norm == 0)
if hasattr(v, 'data'):
assert np.allclose(norm, np.sum(np.power(indexed(dv, ind), 2), axis=1))
c.scal(4.)
assert np.allclose(c.l2_norm2(ind), norm * 16)
c.scal(-4.)
assert np.allclose(c.l2_norm2(ind), norm * 256)
c.scal(0.)
assert np.allclose(c.l2_norm2(ind), 0)
def test_sup_norm(vector_array):
v = vector_array
if hasattr(v, 'data'):
dv = v.data
for ind in valid_inds(v):
c = v.copy()
norm = c.sup_norm(ind)
assert isinstance(norm, np.ndarray)
assert norm.shape == (v.len_ind(ind),)
assert np.all(norm >= 0)
if v.dim == 0:
assert np.all(norm == 0)
if hasattr(v, 'data') and v.dim > 0:
assert np.allclose(norm, np.max(np.abs(indexed(dv, ind)), axis=1))
c.scal(4.)
assert np.allclose(c.sup_norm(ind), norm * 4)
c.scal(-4.)
assert np.allclose(c.sup_norm(ind), norm * 16)
c.scal(0.)
assert np.allclose(c.sup_norm(ind), 0)
def test_components(vector_array):
v = vector_array
np.random.seed(len(v) + 24 + v.dim)
if hasattr(v, 'data'):
dv = v.data
for ind in valid_inds(v):
c = v.copy()
comp = c.components(np.array([], dtype=np.int), ind=ind)
assert isinstance(comp, np.ndarray)
assert comp.shape == (v.len_ind(ind), 0)
c = v.copy()
comp = c.components([], ind=ind)
assert isinstance(comp, np.ndarray)
assert comp.shape == (v.len_ind(ind), 0)
if v.dim > 0:
for count in (1, 5, 10):
c_ind = np.random.randint(0, v.dim, count)
c = v.copy()
comp = c.components(c_ind, ind=ind)
assert comp.shape == (v.len_ind(ind), count)
c = v.copy()
comp2 = c.components(list(c_ind), ind=ind)
assert np.all(comp == comp2)
c = v.copy()
c.scal(3.)
comp2 = c.components(c_ind, ind=ind)
assert np.allclose(comp * 3, comp2)
c = v.copy()
comp2 = c.components(np.hstack((c_ind, c_ind)), ind=ind)
assert np.all(comp2 == np.hstack((comp, comp)))
if hasattr(v, 'data'):
assert np.all(comp == indexed(dv, ind)[:, c_ind])
def test_components_wrong_component_indices(vector_array):
v = vector_array
np.random.seed(len(v) + 24 + v.dim)
for ind in valid_inds(v):
with pytest.raises(Exception):
v.components(None, ind=ind)
with pytest.raises(Exception):
v.components(1, ind=ind)
with pytest.raises(Exception):
v.components(np.array([-1]), ind=ind)
with pytest.raises(Exception):
v.components(np.array([v.dim]), ind=ind)
def test_amax(vector_array):
v = vector_array
if v.dim == 0:
return
for ind in valid_inds(v):
max_inds, max_vals = v.amax(ind)
assert np.allclose(np.abs(max_vals), v.sup_norm(ind))
if ind is None:
ind = range(len(v))
elif isinstance(ind, Number):
ind = [ind]
for i, max_ind, max_val in zip(ind, max_inds, max_vals):
assert np.allclose(max_val, v.components([max_ind], ind=[i]))
# def test_amax_zero_dim(zero_dimensional_vector_space):
# for count in (0, 10):
# v = zero_dimensional_vector_space.zeros(count=count)
# for ind in valid_inds(v):
# with pytest.raises(Exception):
# v.amax(ind)
def test_gramian(vector_array):
v = vector_array
for ind in valid_inds(v):
assert np.allclose(v.gramian(ind), v.dot(v, ind=ind, o_ind=ind))
def test_add(compatible_vector_array_pair):
v1, v2 = compatible_vector_array_pair
if len(v2) < len(v1):
v2.append(v2, o_ind=np.zeros(len(v1) - len(v2), dtype=np.int))
elif len(v2) > len(v1):
v2.remove(list(range(len(v2)-len(v1))))
c1 = v1.copy()
cc1 = v1.copy()
c1.axpy(1, v2)
assert np.all(almost_equal(v1 + v2, c1))
assert np.all(almost_equal(v1, cc1))
def test_iadd(compatible_vector_array_pair):
v1, v2 = compatible_vector_array_pair
if len(v2) < len(v1):
v2.append(v2, o_ind=np.zeros(len(v1) - len(v2), dtype=np.int))
elif len(v2) > len(v1):
v2.remove(list(range(len(v2)-len(v1))))
c1 = v1.copy()
c1.axpy(1, v2)
v1 += v2
assert np.all(almost_equal(v1, c1))
def test_sub(compatible_vector_array_pair):
v1, v2 = compatible_vector_array_pair
if len(v2) < len(v1):
v2.append(v2, o_ind=np.zeros(len(v1) - len(v2), dtype=np.int))
elif len(v2) > len(v1):
v2.remove(list(range(len(v2)-len(v1))))
c1 = v1.copy()
cc1 = v1.copy()
c1.axpy(-1, v2)
assert np.all(almost_equal((v1 - v2), c1))
assert np.all(almost_equal(v1, cc1))
def test_isub(compatible_vector_array_pair):
v1, v2 = compatible_vector_array_pair
if len(v2) < len(v1):
v2.append(v2, o_ind=np.zeros(len(v1) - len(v2), dtype=np.int))
elif len(v2) > len(v1):
v2.remove(list(range(len(v2)-len(v1))))
c1 = v1.copy()
c1.axpy(-1, v2)
v1 -= v2
assert np.all(almost_equal(v1, c1))
def test_neg(vector_array):
v = vector_array
c = v.copy()
cc = | |
sample
neural_mode :
defines the input for the neural net
file :
string which points to the file with the corpus of text
sg_file :
string which points to the location with the skipgrams created with the corpus of text
optimizer :
optimizer which shall be used for the keras neural net
properties:
Properties for the data creator that will be loaded.
This is only needed if neural_mode is set to physical2D
"""
super().__init__(vocab_size,window_size, vector_dim, negative_samples, neural_mode, file, properties)
self.optimizer = optimizer
self.file = file
self.sg_file = sg_file
self.SetupNeuralnet()
def SetupNeuralnet(self):
"""
Creates the neural network by using the keras module.
"""
#print("Setup neural net...")
# create some input variables
input_target = Input((1,))
input_context = Input((1,))
#initialization values are originated in the gensim code
embedding = Embedding(self.vocab_size, self.vector_dim, input_length=1, name='embedding_word') #Create embedding layer
embedding_context = Embedding(self.vocab_size, self.vector_dim, input_length=1, name='embedding_context', embeddings_initializer=keras.initializers.RandomUniform(minval=-0.5/self.vector_dim,maxval=0.5/self.vector_dim)) #extra embedding layer for context
target = embedding(input_target) #calculate the word vector of the target word
target = Reshape((self.vector_dim, 1))(target)
context = embedding_context(input_context) #calculate the word vector of the possible context word
context = Reshape((self.vector_dim, 1))(context)
# now perform the dot product operation to get a similarity measure
dot_product = dot([target, context], axes = 1, normalize = False)
dot_product = Reshape((1,))(dot_product)
output = Activation('sigmoid')(dot_product) #With that approach there is no additional parameter that can be learned
# create the primary training model
model = Model(inputs=[input_target, input_context], outputs=output)
model.compile(loss='binary_crossentropy', optimizer=self.optimizer) #optimizer='SGD' #optimizer='rmsprop'
#create a model which gives back the word_vector representation of the context words
word_vector_model = Model(inputs=[input_context],outputs=context)
self.model = model
self.word_vector_model = word_vector_model
def batchgenerator(self, batch_size):
"""
Generates batch from the skip gram file given at the initialization.
Parameters
----------
batch_size :
Number of skipgram pairs in one batch that is given to the neural net for training
"""
def set_to_zero():
return [np.zeros(batch_size, dtype='int32'), np.zeros(batch_size, dtype='int32'), np.zeros(batch_size, dtype='int32')]
stream = open(self.sg_file, mode='r')
while True:
word_target, word_context, labels = set_to_zero()
act_idx = 0
for idx, line in enumerate(stream):
k = idx - act_idx
word_target[k], word_context[k], labels[k] = line.replace("\n","").split(" ")
if k == batch_size - 1:
yield ([word_target, word_context], labels)
word_target, word_context, labels = set_to_zero()
act_idx = idx
stream.seek(0)
def __number_of_skipgrams_in_file(self):
"""
Counts the number of lines in the skip gram file to determine the number of skipgrams.
"""
stream = open(self.sg_file, mode='r')
length = 0
while stream.readline() != "":
length += 1
stream.close()
return length
def Train(self, epochs, batch_size, initial_epoch=0):
"""
Trains the model by using the keras api.
Parameters
----------
epochs :
Number of final epoch that will be trained.
batch_size :
Number of skipgrams that will be given as one batch for training the neural net
initial_epoch :
Last learned epoch. So for starting learning the value is 0.
"""
number = self.__number_of_skipgrams_in_file()
self.model.fit_generator(self.batchgenerator(batch_size), epochs=epochs,steps_per_epoch = number//batch_size, verbose=1, initial_epoch=initial_epoch)
def get_word_vector (self, word, normalized = True):
"""
Returns the word vector that corresponds to the given word.
Parameters
----------
word :
The word which vector is wanted
normalized :
if =True the word vector will be normalized
"""
in_word = np.zeros(1)
if type(word) == int:
in_word[0] = word
else:
in_word[0] = self.dictionary[word]
vector = np.ndarray.flatten(self.word_vector_model.predict_on_batch(in_word)[0])
if normalized:
vector /= np.linalg.norm(vector)
return vector
def get_word_vectors(self, normalized = False):
"""
Returns a list of all word vectors trained by the neural net.
Attention
---------
The list has not to be in any order that corresponds to the internal dictionary
Parameters
----------
normalized :
if =True the word vectors will be normalized
"""
in_batch = np.array([k for k in range (self.vocab_size)])
vectors = self.word_vector_model.predict_on_batch(in_batch)
return np.squeeze(vectors, axis=2)
def index2word(self, idx):
"""
Returns the word to the given index.
Parameters
----------
idx : index of a word in the internal gensim implementation
"""
return self.reverse_dictionary[idx]
#TODO: Add visalization for 2d systems as in gensim class
def most_similar(self, positive=[], negative=[], number=10):
"""
Gives back the most similar words. Positive words contribute positivly, negative words
negativly. For measuring similarity cosine similarity is used as described in the original
paper.
Parameters
----------
positive :
list of positive words which will be given to the most_similar method
negative :
list of negative words which will be given to the most_similar method
number :
number of most similar words that is given back
"""
vectors = []
for i in positive:
vectors.append(self.get_word_vector(i,normalized=True))
for i in negative:
vectors.append((-1) * self.get_word_vector(i,normalized=True))
if vectors == []:
raise ValueError("cannot compute nearest words with no input")
final_vec = np.mean(np.array(vectors),axis=0)
norm_vec = final_vec / np.linalg.norm(final_vec)
in_batch = np.array([k for k in range (self.vocab_size)])
vectors = self.word_vector_model.predict_on_batch(in_batch)
for v in vectors:
v /= np.linalg.norm(v)
similarity = [[self.reverse_dictionary[k],(np.transpose(vectors[k])@norm_vec)[0]] for k in range(len(vectors)) if self.reverse_dictionary[k] not in positive+negative]
return sorted(similarity,reverse=True, key=tools.takeSecond)[:number]
def most_similar_cosmul(self, positive=[],negative=[],number=10):
"""
Gives back the most similar words. Positive words contribute positivly, negative words
negativly. For measuring similarity the multiplicative combination objective is used,
see <http://www.aclweb.org/anthology/W14-1618>.
Parameters
----------
positive :
list of positive words which will be given to the most_similar method
negative :
list of negative words which will be given to the most_similar method
number :
number of most similar words that is given back
"""
in_batch = np.array([k for k in range (self.vocab_size)])
vectors = self.word_vector_model.predict_on_batch(in_batch)
for v in vectors:
v /= np.linalg.norm(v)
pos_dist, neg_dist = [], []
for i in positive:
pos_dist.append((1+np.dot(np.squeeze(vectors,axis=2), self.get_word_vector(i,normalized=True)))/2)
for i in negative:
neg_dist.append((1+np.dot(np.squeeze(vectors,axis=2), self.get_word_vector(i,normalized=True)))/2)
dist = np.prod(pos_dist,axis=0) / (np.prod(neg_dist, axis=0) + 0.000001)
similarity = [[self.reverse_dictionary[k],dist[k]] for k in range(len(dist)) if self.reverse_dictionary[k] not in positive+negative]
return sorted(similarity,reverse=True, key=tools.takeSecond)[:number]
#TODO: Save the whole model? In order to just go on without again initialisation
def save_model(self, path):
"""
Saves the model at the given path.
"""
self.model.save_weights(path)
def load_model(self, path):
"""
Loads the model with the data given at path.
"""
self.model.load_weights(path)
def make_cum_table(self, domain=2**31 - 1):
"""
Calculates the noise distribution that is used for sampling of negative samples.
The code is adopted from the gensim library. The distribution follows the stated
one in the original paper.
"""
cum_table = np.zeros(self.vocab_size-1, dtype=np.uint32) #in order to ignore UNK -1
# compute sum of all power (Z in paper)
train_words_pow = 0.0
for word_index in range(1,self.vocab_size): #To ignore the UNK start with 1
train_words_pow += self.count[word_index][1]**(0.75)
cumulative = 0.0
for word_index in range(1,self.vocab_size):
cumulative += self.count[word_index][1]**(0.75)
cum_table[word_index-1] = round(cumulative / train_words_pow * domain)
return cum_table
def load_embedding_matrix_from_gensim(self, thing2vecgensim):
"""
Loads the embedding vectors from the gensim model word2vec into the
own model.
"""
def get_matrix(wvmatrix):
wv_matrix = (np.random.rand(self.vocab_size, self.vector_dim) - 0.5) / 5.0
for i in self.reverse_dictionary:
if i >= self.vocab_size:
continue
try:
index = thing2vecgensim.model.wv.vocab[str(thing2vecgensim.dictionary[self.reverse_dictionary[i]])].index
embedding_vector = wvmatrix[index]
# words not found in embedding index will be all-zeros.
wv_matrix[i] = embedding_vector
except:
pass
return wv_matrix
syn1neg = get_matrix(thing2vecgensim.model.trainables.syn1neg)
syn0 = get_matrix(thing2vecgensim.model.wv.vectors)
self.model.set_weights([syn1neg,syn0])
def Generate_skipgrams(self, replace_context=False):
if os.path.isfile(self.sg_file):
print("Skipgram file already exists!")
return None
cum_table = self.make_cum_table()
sentences = self.Thing2String(self.file, self.dictionary, transformation=int)
sampling_table = sequence.make_sampling_table(self.vocab_size)
self.skipgrams_sampled(sentences, self.sg_file, sampling_table=sampling_table, replace_context=replace_context, cum_table=cum_table)
#TODO: This function has to be changed due just taking a file "sequence" which is not data_idx so
# dictionary has to be applied.
def skipgrams_sampled_old(self, sequence, vocabulary_size,
window_size=4, negative_samples=1., shuffle=True,
categorical=False, sampling_table=None, seed=None,
unigram_distribution = True, replace_context=False, count = []):
"""
Generates skipgram word pairs.
Function originally from keras package with added functionality
sampling words with different distances to the original word.
With unigram_distribution the negative samples are sampled due
to a unigram distribution.
replace_context defines wheater the negtive samples are created
by replacing the context word or the "goal" word.
"""
couples = []
labels = []
for i, wi in enumerate(sequence):
if not wi:
continue
if sampling_table is not None:
if sampling_table[wi] < random.random():
continue
reduced_window = random.randint(0,window_size) #Added code
window_start = max(0, i - window_size + reduced_window)
window_end = min(len(sequence), i + window_size + 1 - reduced_window)
for j in | |
function with respect to the distribution
location and scale only tested on a few examples
Parameters
----------
all parameters are keyword parameters
fn : function (default: identity mapping)
Function for which integral is calculated. Takes only one argument.
args : tuple
argument (parameters) of the distribution
lb, ub : numbers
lower and upper bound for integration, default is set using
quantiles of the distribution, see Notes
conditional : boolean (False)
If true then the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Returns
-------
expected value : float
Notes
-----
This function has not been checked for it's behavior when the integral is
not finite. The integration behavior is inherited from scipy.integrate.quad.
The default limits are lb = self.ppf(1e-9, *args), ub = self.ppf(1-1e-9, *args)
For some heavy tailed distributions, 'alpha', 'cauchy', 'halfcauchy',
'levy', 'levy_l', and for 'ncf', the default limits are not set correctly
even when the expectation of the function is finite. In this case, the
integration limits, lb and ub, should be chosen by the user. For example,
for the ncf distribution, ub=1000 works in the examples.
There are also problems with numerical integration in some other cases,
for example if the distribution is very concentrated and the default limits
are too large.
'''
#changes: 20100809
#correction and refactoring how loc and scale are handled
#uses now _pdf
#needs more testing for distribution with bound support, e.g. genpareto
if fn is None:
def fun(x, *args):
return (loc + x*scale)*self._pdf(x, *args)
else:
def fun(x, *args):
return fn(loc + x*scale)*self._pdf(x, *args)
if lb is None:
#lb = self.a
try:
lb = self.ppf(1e-9, *args) #1e-14 quad fails for pareto
except ValueError:
lb = self.a
else:
lb = max(self.a, (lb - loc)/(1.0*scale)) #transform to standardized
if ub is None:
#ub = self.b
try:
ub = self.ppf(1-1e-9, *args)
except ValueError:
ub = self.b
else:
ub = min(self.b, (ub - loc)/(1.0*scale))
if conditional:
invfac = self._sf(lb,*args) - self._sf(ub,*args)
else:
invfac = 1.0
return integrate.quad(fun, lb, ub,
args=args, limit=500)[0]/invfac
### for discrete distributions
#TODO: check that for a distribution with finite support the calculations are
# done with one array summation (np.dot)
#based on _drv2_moment(self, n, *args), but streamlined
def expect_discrete(self, fn=None, args=(), loc=0, lb=None, ub=None,
conditional=False):
'''calculate expected value of a function with respect to the distribution
for discrete distribution
Parameters
----------
(self : distribution instance as defined in scipy stats)
fn : function (default: identity mapping)
Function for which integral is calculated. Takes only one argument.
args : tuple
argument (parameters) of the distribution
optional keyword parameters
lb, ub : numbers
lower and upper bound for integration, default is set to the support
of the distribution, lb and ub are inclusive (ul<=k<=ub)
conditional : boolean (False)
If true then the expectation is corrected by the conditional
probability of the integration interval. The return value is the
expectation of the function, conditional on being in the given
interval (k such that ul<=k<=ub).
Returns
-------
expected value : float
Notes
-----
* function is not vectorized
* accuracy: uses self.moment_tol as stopping criterium
for heavy tailed distribution e.g. zipf(4), accuracy for
mean, variance in example is only 1e-5,
increasing precision (moment_tol) makes zipf very slow
* suppnmin=100 internal parameter for minimum number of points to evaluate
could be added as keyword parameter, to evaluate functions with
non-monotonic shapes, points include integers in (-suppnmin, suppnmin)
* uses maxcount=1000 limits the number of points that are evaluated
to break loop for infinite sums
(a maximum of suppnmin+1000 positive plus suppnmin+1000 negative integers
are evaluated)
'''
#moment_tol = 1e-12 # increase compared to self.moment_tol,
# too slow for only small gain in precision for zipf
#avoid endless loop with unbound integral, eg. var of zipf(2)
maxcount = 1000
suppnmin = 100 #minimum number of points to evaluate (+ and -)
if fn is None:
def fun(x):
#loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
#loc and args from outer scope
return fn(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint
# and there might be problems(?) with correct self.a, self.b at this stage
# maybe not anymore, seems to work now with _pmf
self._argcheck(*args) # (re)generate scalar self.a and self.b
if lb is None:
lb = (self.a)
else:
lb = lb - loc
if ub is None:
ub = (self.b)
else:
ub = ub - loc
if conditional:
invfac = self.sf(lb,*args) - self.sf(ub+1,*args)
else:
invfac = 1.0
tot = 0.0
low, upp = self._ppf(0.001, *args), self._ppf(0.999, *args)
low = max(min(-suppnmin, low), lb)
upp = min(max(suppnmin, upp), ub)
supp = np.arange(low, upp+1, self.inc) #check limits
#print('low, upp', low, upp
tot = np.sum(fun(supp))
diff = 1e100
pos = upp + self.inc
count = 0
#handle cases with infinite support
while (pos <= ub) and (diff > self.moment_tol) and count <= maxcount:
diff = fun(pos)
tot += diff
pos += self.inc
count += 1
if self.a < 0: #handle case when self.a = -inf
diff = 1e100
pos = low - self.inc
while (pos >= lb) and (diff > self.moment_tol) and count <= maxcount:
diff = fun(pos)
tot += diff
pos -= self.inc
count += 1
if count > maxcount:
# replace with proper warning
print('sum did not converge')
return tot/invfac
stats.distributions.rv_continuous.fit_fr = fit_fr
stats.distributions.rv_continuous.nnlf_fr = nnlf_fr
stats.distributions.rv_continuous.expect = expect
stats.distributions.rv_discrete.expect = expect_discrete
stats.distributions.beta_gen._fitstart = _fitstart_beta #not tried out yet
stats.distributions.poisson_gen._fitstart = _fitstart_poisson #not tried out yet
########## end patching scipy
def distfitbootstrap(sample, distr, nrepl=100):
'''run bootstrap for estimation of distribution parameters
hard coded: only one shape parameter is allowed and estimated,
loc=0 and scale=1 are fixed in the estimation
Parameters
----------
sample : array
original sample data for bootstrap
distr : distribution instance with fit_fr method
nrepl : integer
number of bootstrap replications
Returns
-------
res : array (nrepl,)
parameter estimates for all bootstrap replications
'''
nobs = len(sample)
res = np.zeros(nrepl)
for ii in range(nrepl):
rvsind = np.random.randint(nobs, size=nobs)
x = sample[rvsind]
res[ii] = distr.fit_fr(x, frozen=[np.nan, 0.0, 1.0])
return res
def distfitmc(sample, distr, nrepl=100, distkwds={}):
'''run Monte Carlo for estimation of distribution parameters
hard coded: only one shape parameter is allowed and estimated,
loc=0 and scale=1 are fixed in the estimation
Parameters
----------
sample : array
original sample data, in Monte Carlo only used to get nobs,
distr : distribution instance with fit_fr method
nrepl : integer
number of Monte Carlo replications
Returns
-------
res : array (nrepl,)
parameter estimates for all Monte Carlo replications
'''
arg = distkwds.pop('arg')
nobs = len(sample)
res = np.zeros(nrepl)
for ii in range(nrepl):
x = distr.rvs(arg, size=nobs, **distkwds)
res[ii] = distr.fit_fr(x, frozen=[np.nan, 0.0, 1.0])
return res
def printresults(sample, arg, bres, kind='bootstrap'):
'''calculate and print(Bootstrap or Monte Carlo result
Parameters
----------
sample : array
original sample data
arg : float (for general case will be array)
bres : array
parameter estimates from Bootstrap or Monte Carlo run
kind : {'bootstrap', 'montecarlo'}
output is printed for Mootstrap (default) or Monte Carlo
Returns
-------
None, currently only printing
Notes
-----
still a bit a mess because it is used for both Bootstrap and Monte Carlo
made correction:
reference point for bootstrap is estimated parameter
not clear:
I'm not doing any ddof adjustment in estimation of variance, do we
need ddof>0 ?
todo: return results and string instead of printing
'''
print('true parameter value')
print(arg)
print('MLE estimate of parameters using sample (nobs=%d)'% (nobs))
argest = distr.fit_fr(sample, frozen=[np.nan, 0.0, 1.0])
print(argest)
if kind == 'bootstrap':
#bootstrap compares to estimate from sample
argorig = arg
arg = argest
print('%s distribution of parameter estimate (nrepl=%d)'% (kind, nrepl))
print('mean = %f, bias=%f' % (bres.mean(0), bres.mean(0)-arg))
print('median', np.median(bres, axis=0))
print('var and std', bres.var(0), np.sqrt(bres.var(0)))
bmse = ((bres - arg)**2).mean(0)
print('mse, rmse', bmse, np.sqrt(bmse))
bressorted = np.sort(bres)
print('%s confidence interval (90%% coverage)' % kind)
print(bressorted[np.floor(nrepl*0.05)], bressorted[np.floor(nrepl*0.95)])
print('%s confidence interval (90%% coverage) | |
<filename>MapGen.py
import VectorMaps, json, sys
from os import system, path
#Used instead of print() and input() for purposes of accessibility to screenreaders
def dual_print(string):
system("title "+string)
print(string + '\n')
def dual_input(string):
system("title "+string)
return input(string)
#Utility functions
def get_commands():
command = dual_input('Please enter a command: ')
return command
def validate_position(position, bound):
if position not in range(bound + 1):
return False
else:
return True
def export(data):
with open('data.json', 'w') as outfile:
json.dump(data, outfile)
outfile.close()
#Commands that can be used in tile edit mode
def list_fields(tile): #Lists fields within a tile
if tile.fields == []:
dual_print('There are no fields in this tile.')
else:
dual_print('There are ' + str(len(tile.fields)) + ' fields. In order of addition: ')
for f in tile.fields:
dual_print(f.name + ',')
return
def create_field(tile):
field_name = str(dual_input('Enter a name for the new field: '))
for f in tile.fields:
if f.name == field_name:
dual_print('Error: Field named ' + field_name + ' already exists in this tile. Exiting field creation.')
return
while True:
try:
anchor_x = dual_input('Enter the x-position of the field\'s anchor (upper left corner): ')
anchor_x = int(anchor_x)
except:
dual_print('Error: ' + str(anchor_x) + ' is not a number.')
continue
else:
if validate_position(anchor_x, tile.width):
break
else:
dual_print('Error: ' + str(anchor_x) + ' is out of bounds.')
while True:
try:
anchor_y = dual_input('Enter the y-position of the field\'s anchor (upper left corner): ')
anchor_y = int(anchor_y)
except:
dual_print('Error: ' + str(anchor_y) + ' is not a number.')
continue
else:
if validate_position(anchor_y, tile.height):
break
else:
dual_print('Error: ' + str(anchor_y) + ' is out of bounds.')
while True:
try:
field_width = dual_input('Enter the width of the field: ')
field_width = int(field_width)
except:
dual_print('Error: ' + str(field_width) + ' is not a number.')
continue
else:
if validate_position(field_width + anchor_x, tile.width):
break
else:
dual_print('Error: the edge of the field is out of bounds.')
while True:
try:
field_height = dual_input('Enter the height of the field: ')
field_height = int(field_height)
except:
dual_print('Error: ' + str(field_height) + ' is not a number.')
continue
else:
if validate_position(field_height + anchor_y, tile.height):
break
else:
dual_print('Error: the edge of the field is out of bounds. Try shortening it.')
while True:
field_clips = dual_input('Can entities pass through this field? Y/n: ')
if field_clips.lower() == 'y':
field_clips = True
break
elif field_clips.lower() == 'n':
field_clips = False
break
else:
dual_print('Invalid input.')
dual_print('Creating field...')
tile.fields.append(VectorMaps.Field([field_width, field_height],[anchor_x, anchor_y], name=field_name, clipping=field_clips))
dual_print('Done. Returning to tile edit mode.')
return tile
def edit_field(tile):
found = False
field_name = str(dual_input('Enter the name of the field you wish to edit. '))
for f in tile.fields:
if f.name == field_name:
dual_print('Now editing field "' + field_name + '" in tile ' + str(tile.pos) + '.')
found = True
active_field_index = tile.fields.index(f)
if not found:
dual_print('Field "' + field_name + '" was not found. Returning to tile edit mode.')
return
while 1:
option = str(dual_input('Enter the name of the variable you want to edit. Options are: \'anchorX\', \'anchorY\', \'width\', \'height\', and \'name\'. To exit, use \'save\' to save changes and exit or \'cancel\' to revert changes and exit: ')).lower().strip()
if option == 'anchorx':
dual_print('The anchor\'s current coordinates are ' + str(tile.fields[active_field_index].anchor) + '.')
try:
new_x = int(dual_input('Enter the new value for the anchor\'s position on the X axis: '))
except:
dual_print('Error: Not a Number')
continue
else:
if validate_position(new_x + tile.fields[active_field_index].dimensions[0], tile.width):
tile.fields[active_field_index].anchor[0] = new_x
dual_print('Done. The anchor\'s coordinates are now ' + str(tile.fields[active_field_index].anchor) + '.')
else:
dual_print('Error: the edge of the field is now out of bounds. Cancelling action.')
elif option == 'anchory':
try:
new_y = int(dual_input('Enter the new value for the anchor\'s position on the Y axis: '))
except:
dual_print('Error: Not a Number')
continue
else:
if validate_position(new_y + tile.fields[active_field_index].dimensions[1], tile.height):
tile.fields[active_field_index].anchor[1] = new_y
dual_print('Done. The anchor\'s coordinates are now ' + str(tile.fields[active_field_index].anchor) + '.')
else:
dual_print('Error: the edge of the field is now out of bounds. Cancelling action.')
elif option == 'name':
try:
new_name = str(dual_input('Enter the new name for the field: '))
except:
dual_print('Something went wrong. Please report this error.')
continue
else:
tile.fields[active_field_index].name = new_name
dual_print('New name has been set.')
elif option == 'width':
try:
new_width = int(dual_input('Enter the new value for the field\'s width: '))
except:
dual_print('Error: Not a Number')
continue
else:
if validate_position(new_width + tile.fields[active_field_index].anchor[0], tile.width):
tile.fields[active_field_index].dimensions[0] = new_width
dual_print('Done. The field\'s dimensions are now' + str(tile.fields[active_field_index].dimensions) + '.')
else:
dual_print('Error: the edge of the field is now out of bounds. Cancelling action.')
elif option == 'height':
try:
new_height = int(dual_input('Enter the new value for the field\'s height: '))
except:
dual_print('Error: Not a Number')
continue
else:
if validate_position(new_height + tile.fields[active_field_index].anchor[1], tile.height):
tile.fields[active_field_index].dimensions[1] = new_height
dual_print('Done. The field\'s dimensions are now' + str(tile.fields[active_field_index].dimensions) + '.')
else:
dual_print('Error: the edge of the field is now out of bounds. Cancelling action.')
elif option == 'clipping':
field_clips = dual_input('Can entities pass through this field? Y/n: ')
if field_clips.lower() == 'y':
tile.fields[active_field_index].clipping = False
dual_print('Clipping set to False')
break
elif field_clips.lower() == 'n':
tile.fields[active_field_index].clipping = True
dual_print('Clipping set to True')
break
else:
dual_print('Invalid input. Cancelling action.')
elif option == 'cancel':
dual_print('Cancelling all changes. Exiting to tile edit mode.')
return
elif option == 'save':
dual_print('Saving all changes. Exiting to tile edit mode. Note that to save to a file you must export.')
return tile
def delete_field(tile):
found = False
field_name = str(dual_input('Enter the name of the field you wish to delete. '))
for f in tile.fields:
if f.name == field_name:
dual_print('Deleting field...')
found = True
tile.fields.remove(f)
dual_print('Field deleted. Returning to tile edit mode.')
return tile
if not found:
dual_print('Field "' + field_name + '" was not found. Returning to tile edit mode.')
return
def view_field(tile):
found = False
field_name = str(dual_input('Enter the name of the field you wish to view. '))
for f in tile.fields:
if f.name == field_name:
dual_print('Now viewing field "' + field_name + '" in tile ' + str(tile.pos) + '.')
dual_print('anchor: ' + str(f.anchor) + '\ndimensions: ' + str(f.dimensions) + '\nclips: ' + str(f.clipping) )
found = True
return
if not found:
dual_print('Field "' + field_name + '" was not found. Returning to tile edit mode.')
return
def parse_command(command, tile):
command_dict = {
'field' : {'list' : list_fields, 'new' : create_field, 'delete' : delete_field, 'edit' : edit_field, 'view' : view_field},
'goto' : [],
'help' : ['field', 'goto'],
'save' : [],
'exit' : []
}
commands = command.split(' ')
try:
options = command_dict[commands[0]]
except:
dual_print('Error, invalid command.')
else:
if commands[0] == 'field':
try:
if commands[1] in options:
tile = options[commands[1]](tile)
return tile
else:
dual_print('Error, invalid argument')
except:
dual_print('You need to provide an argument. Try \'list\', \'new\', \'delete\', \'edit\', or \'view\'.')
#raise
elif commands[0] == 'help':
try:
if commands[1] not in options:
raise Error
else:
dual_print('Extended help menu is coming soon')
dual_print('Valid arguments are \'field\', \'help\', \'goto\', \'save\', \'exit\'.')
except:
dual_print('Valid commands and arguments are \'field\', \'help\', \'goto\', \'save\', \'exit\'.')
#raise
elif commands[0] == 'save':
return 'save'
elif commands[0] == 'exit':
dual_print('Exiting program.')
sys.exit()
elif commands[0] == 'goto':
while 1:
try:
new_x = int(dual_input('Enter the X-position of the tile you wish to view: '))
except:
dual_print('Error: Not a Number.')
continue
else:
if validate_position(new_x, map.width):
break
else:
dual_print('Error: That position is out of bounds.')
continue
while 1:
try:
new_y = int(dual_input('Enter the Y-position of the tile you wish to view: '))
except:
dual_print('Error: Not a Number.')
continue
else:
if validate_position(new_y, map.height):
break
else:
dual_print('Error: That position is out of bounds.')
continue
return((new_x, new_y))
def setup():
while 1:
try:
MAP_NAME = str(dual_input('Please name the map: '))
except:
print("Invalid name.")
continue
else:
break
while 1:
try:
TILE_WIDTH = int(dual_input('What should the width of each tile be?: '))
except:
print("Error: Not a Number")
continue
else:
break
| |
= False
status = udoc.do_import(mock_cmdp)
self.assertFalse(status)
mock_cmdp.reset_mock()
udoc = udocker.Udocker(mock_local)
mock_cmdp.get.side_effect = cmd_options
mock_chkimg.return_value = ("IMAGE", "")
mock_cmdp.missing_options.return_value = True
status = udoc.do_import(mock_cmdp)
self.assertFalse(status)
mock_cmdp.reset_mock()
udoc = udocker.Udocker(mock_local)
mock_cmdp.get.side_effect = cmd_options
mock_chkimg.return_value = ("IMAGE", "TAG")
mock_cmdp.missing_options.return_value = False
mock_dlocapi.return_value.import_toimage.return_value = False
status = udoc.do_import(mock_cmdp)
self.assertFalse(status)
mock_cmdp.reset_mock()
udoc = udocker.Udocker(mock_local)
mock_cmdp.get.side_effect = cmd_options
mock_chkimg.return_value = ("IMAGE", "TAG")
mock_cmdp.missing_options.return_value = False
mock_dlocapi.return_value.import_toimage.return_value = True
status = udoc.do_import(mock_cmdp)
# self.assertTrue(status)
# def test_08_do_export(self):
# """Test08 Udocker().do_export()."""
# self._init()
# mock_msg.level = 0
# def test_09_do_clone(self):
# """Test09 Udocker().do_clone()."""
# self._init()
# mock_msg.level = 0
@mock.patch('udocker.getpass')
@mock.patch('udocker.raw_input')
@mock.patch('udocker.CmdParser')
@mock.patch('udocker.KeyStore')
@mock.patch('udocker.DockerLocalFileAPI')
@mock.patch('udocker.DockerIoAPI')
@mock.patch('udocker.Msg')
@mock.patch('udocker.LocalRepository')
def test_10_do_login(self, mock_local, mock_msg, mock_dioapi,
mock_dlocapi, mock_ks, mock_cmdp,
mock_rinput, mock_gpass):
"""Test10 Udocker().do_login()."""
self._init()
mock_msg.level = 0
udoc = udocker.Udocker(mock_local)
mock_cmdp.get.side_effect = ["user", "pass", "" "", "", ]
mock_rinput.return_value = "user"
mock_gpass.return_value = "pass"
mock_ks.return_value.put.return_value = False
status = udoc.do_login(mock_cmdp)
self.assertFalse(status)
self.assertFalse(mock_rinput.called)
self.assertFalse(mock_gpass.called)
mock_rinput.reset_mock()
mock_gpass.reset_mock()
udoc = udocker.Udocker(mock_local)
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_rinput.return_value = "user"
mock_gpass.return_value = "pass"
mock_ks.return_value.put.return_value = False
status = udoc.do_login(mock_cmdp)
self.assertFalse(status)
# self.assertTrue(mock_rinput.called)
# self.assertTrue(mock_gpass.called)
udoc = udocker.Udocker(mock_local)
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_rinput.return_value = "user"
mock_gpass.return_value = "pass"
mock_ks.return_value.put.return_value = True
status = udoc.do_login(mock_cmdp)
# self.assertTrue(status)
@mock.patch('udocker.CmdParser')
@mock.patch('udocker.KeyStore')
@mock.patch('udocker.DockerLocalFileAPI')
@mock.patch('udocker.DockerIoAPI')
@mock.patch('udocker.Msg')
@mock.patch('udocker.LocalRepository')
def test_11_do_logout(self, mock_local, mock_msg, mock_dioapi,
mock_dlocapi, mock_ks, mock_cmdp):
"""Test11 Udocker().do_logout()."""
self._init()
mock_msg.level = 0
udoc = udocker.Udocker(mock_local)
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_ks.return_value.delete.return_value = False
status = udoc.do_logout(mock_cmdp)
self.assertFalse(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.get.side_effect = ["ALL", "", "" "", "", ]
mock_ks.return_value.delete.return_value = False
mock_ks.return_value.erase.return_value = True
status = udoc.do_logout(mock_cmdp)
# self.assertTrue(status)
# self.assertTrue(mock_ks.return_value.erase.called)
udoc = udocker.Udocker(mock_local)
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_ks.return_value.delete.return_value = True
status = udoc.do_logout(mock_cmdp)
# self.assertTrue(status)
# self.assertTrue(mock_ks.return_value.delete.called)
@mock.patch('udocker.Udocker._check_imagespec')
@mock.patch('udocker.CmdParser')
@mock.patch('udocker.KeyStore')
@mock.patch('udocker.DockerLocalFileAPI')
@mock.patch('udocker.DockerIoAPI')
@mock.patch('udocker.Msg')
@mock.patch('udocker.LocalRepository')
def test_12_do_pull(self, mock_local, mock_msg, mock_dioapi,
mock_dlocapi, mock_ks, mock_cmdp, mock_chkimg):
"""Test12 Udocker().do_pull()."""
self._init()
mock_msg.level = 0
udoc = udocker.Udocker(mock_local)
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_chkimg.return_value = ("", "TAG")
status = udoc.do_pull(mock_cmdp)
self.assertFalse(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_chkimg.return_value = ("IMAGE", "TAG")
mock_cmdp.missing_options.return_value = True
status = udoc.do_pull(mock_cmdp)
self.assertFalse(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_chkimg.return_value = ("IMAGE", "TAG")
mock_cmdp.missing_options.return_value = False
mock_dioapi.return_value.get.return_value = []
status = udoc.do_pull(mock_cmdp)
self.assertFalse(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_chkimg.return_value = ("IMAGE", "TAG")
mock_cmdp.missing_options.return_value = False
mock_dioapi.return_value.get.return_value = ["F1", "F2", ]
status = udoc.do_pull(mock_cmdp)
self.assertTrue(status)
@mock.patch('udocker.Udocker._create')
@mock.patch('udocker.CmdParser')
@mock.patch('udocker.KeyStore')
@mock.patch('udocker.DockerLocalFileAPI')
@mock.patch('udocker.DockerIoAPI')
@mock.patch('udocker.Msg')
@mock.patch('udocker.LocalRepository')
def test_13_do_create(self, mock_local, mock_msg, mock_dioapi,
mock_dlocapi, mock_ks, mock_cmdp, mock_create):
"""Test13 Udocker().do_create()."""
self._init()
mock_msg.level = 0
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = True
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_create.return_value = ""
status = udoc.do_create(mock_cmdp)
self.assertFalse(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = False
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_create.return_value = ""
status = udoc.do_create(mock_cmdp)
self.assertFalse(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = False
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_create.return_value = "CONTAINER_ID"
status = udoc.do_create(mock_cmdp)
self.assertTrue(status)
@mock.patch('udocker.ContainerStructure')
@mock.patch('udocker.Udocker._check_imagespec')
@mock.patch('udocker.DockerIoAPI')
@mock.patch('udocker.Msg')
@mock.patch('udocker.LocalRepository')
def test_14__create(self, mock_local, mock_msg, mock_dioapi, mock_chkimg,
mock_cstruct):
"""Test14 Udocker()._create()."""
self._init()
mock_msg.level = 0
udoc = udocker.Udocker(mock_local)
mock_dioapi.return_value.is_repo_name.return_value = False
status = udoc._create("IMAGE:TAG")
self.assertFalse(status)
udoc = udocker.Udocker(mock_local)
mock_dioapi.return_value.is_repo_name.return_value = True
mock_chkimg.return_value = ("", "TAG")
mock_cstruct.return_value.create.return_value = True
status = udoc._create("IMAGE:TAG")
self.assertFalse(status)
udoc = udocker.Udocker(mock_local)
mock_dioapi.return_value.is_repo_name.return_value = True
mock_chkimg.return_value = ("IMAGE", "TAG")
mock_cstruct.return_value.create.return_value = True
status = udoc._create("IMAGE:TAG")
self.assertTrue(status)
# @mock.patch('udocker.CmdParser')
# @mock.patch('udocker.LocalRepository')
# def test_15__get_run_options(self, mock_local, mock_cmdp):
# """Test15 Udocker()._get_run_options()"""
# self._init()
# udocker.PRootEngine = mock.MagicMock()
# udocker.PRootEngine.opt = dict()
# udocker.PRootEngine.opt["vol"] = []
# udocker.PRootEngine.opt["env"] = []
# mock_cmdp.get.return_value = "VALUE"
# udoc = udocker.Udocker(mock_local)
# udoc._get_run_options(mock_cmdp, udocker.PRootEngine)
# self.assertEqual(udocker.PRootEngine.opt["dns"], "VALUE")
@mock.patch('udocker.Udocker._get_run_options')
@mock.patch('udocker.PRootEngine')
@mock.patch('udocker.CmdParser')
@mock.patch('udocker.KeyStore')
@mock.patch('udocker.DockerLocalFileAPI')
@mock.patch('udocker.DockerIoAPI')
@mock.patch('udocker.Msg')
@mock.patch('udocker.LocalRepository')
@mock.patch('udocker.LocalRepository.del_container')
@mock.patch('udocker.os.path.realpath')
def test_16_do_run(self, mock_realpath, mock_del, mock_local,
mock_msg, mock_dioapi, mock_dlocapi,
mock_ks, mock_cmdp, mock_eng, mock_getopt):
"""Test16 Udocker().do_run()."""
self._init()
mock_msg.level = 0
mock_realpath.return_value = "/tmp"
mock_cmdp.return_value.missing_options.return_value = True
mock_cmdp.return_value.get.side_effect = ["", "", "" "", "", ]
udoc = udocker.Udocker(mock_local)
status = udoc.do_run(mock_cmdp)
self.assertFalse(status)
mock_local.reset_mock()
mock_cmdp.reset_mock()
mock_eng.reset_mock()
mock_cmdp.return_value.missing_options.return_value = False
mock_cmdp.return_value.get.side_effect = ["", "", "" "", "", ]
udocker.Config.location = "/"
mock_eng.return_value.run.return_value = True
udoc = udocker.Udocker(mock_local)
status = udoc.do_run(mock_cmdp)
self.assertFalse(status)
mock_local.reset_mock()
mock_cmdp.reset_mock()
mock_eng.reset_mock()
mock_cmdp.return_value.missing_options.return_value = False
mock_cmdp.return_value.get.side_effect = ["", "", "" "", "", ]
udocker.Config.location = "/"
mock_eng.return_value.run.return_value = False
udoc = udocker.Udocker(mock_local)
status = udoc.do_run(mock_cmdp)
self.assertFalse(status)
mock_local.reset_mock()
mock_cmdp.reset_mock()
mock_eng.reset_mock()
mock_del.reset_mock()
mock_cmdp.return_value.missing_options.return_value = False
mock_cmdp.return_value.get.side_effect = ["", "", "--rm" "", "", ]
udocker.Config.location = "/"
mock_eng.return_value.run.return_value = False
mock_local.return_value.isprotected_container.return_value = True
mock_del.return_value = True
udoc = udocker.Udocker(mock_local)
status = udoc.do_run(mock_cmdp)
self.assertFalse(mock_del.called)
self.assertFalse(status)
mock_local.reset_mock()
mock_cmdp.reset_mock()
mock_eng.reset_mock()
mock_del.reset_mock()
mock_cmdp.return_value.missing_options.return_value = False
mock_cmdp.return_value.get.side_effect = ["", "", "--rm" "", "", ]
udocker.Config.location = "/"
mock_eng.return_value.run.return_value = False
mock_local.return_value.isprotected_container.return_value = False
mock_del.return_value = True
udoc = udocker.Udocker(mock_local)
status = udoc.do_run(mock_cmdp)
# self.assertTrue(mock_del.called)
self.assertFalse(status)
mock_local.reset_mock()
mock_cmdp.reset_mock()
mock_eng.reset_mock()
mock_cmdp.return_value.missing_options.return_value = False
mock_cmdp.return_value.get.side_effect = ["", "", "" "", "", ]
udocker.Config.location = ""
mock_local.return_value.get_container_id.return_value = ""
mock_eng.return_value.run.return_value = True
udoc = udocker.Udocker(mock_local)
status = udoc.do_run(mock_cmdp)
self.assertFalse(status)
mock_local.reset_mock()
mock_cmdp.reset_mock()
mock_eng.reset_mock()
mock_cmdp.return_value.missing_options.return_value = False
mock_cmdp.return_value.get.side_effect = ["", "", "" "", "", ]
udocker.Config.location = ""
mock_local.return_value.get_container_id.return_value = "CONTAINER_ID"
mock_eng.return_value.run.return_value = True
udoc = udocker.Udocker(mock_local)
status = udoc.do_run(mock_cmdp)
# self.assertTrue(status)
mock_local.reset_mock()
mock_cmdp.reset_mock()
mock_eng.reset_mock()
mock_cmdp.return_value.missing_options.return_value = False
mock_cmdp.return_value.get.side_effect = ["", "", "" "", "NAME", ]
udocker.Config.location = ""
mock_local.return_value.get_container_id.return_value = "CONTAINER_ID"
mock_eng.return_value.run.return_value = True
mock_local.return_value.set_container_name.return_value = True
udoc = udocker.Udocker(mock_local)
status = udoc.do_run(mock_cmdp)
# self.assertTrue(status)
mock_local.reset_mock()
mock_cmdp.reset_mock()
mock_eng.reset_mock()
mock_cmdp.return_value.missing_options.return_value = False
mock_cmdp.return_value.get.side_effect = ["", "", "" "", "NAME", ]
udocker.Config.location = ""
mock_local.return_value.get_container_id.return_value = "CONTAINER_ID"
mock_eng.return_value.run.return_value = True
mock_local.return_value.set_container_name.return_value = False
udoc = udocker.Udocker(mock_local)
status = udoc.do_run(mock_cmdp)
self.assertFalse(status)
@mock.patch('udocker.CmdParser')
@mock.patch('udocker.KeyStore')
@mock.patch('udocker.DockerLocalFileAPI')
@mock.patch('udocker.DockerIoAPI')
@mock.patch('udocker.Msg')
@mock.patch('udocker.LocalRepository')
def test_17_do_images(self, mock_local, mock_msg, mock_dioapi,
mock_dlocapi, mock_ks, mock_cmdp):
"""Test17 Udocker().do_images()."""
self._init()
mock_msg.level = 0
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = True
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
status = udoc.do_images(mock_cmdp)
self.assertFalse(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = False
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_local.get_imagerepos.return_values = []
status = udoc.do_images(mock_cmdp)
self.assertTrue(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = False
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_local.get_imagerepos.return_value = [("I1", "T1"), ("I2", "T2"), ]
mock_local.isprotected_imagerepo.return_value = True
status = udoc.do_images(mock_cmdp)
self.assertTrue(status)
self.assertTrue(mock_local.isprotected_imagerepo.called)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = False
mock_cmdp.get.side_effect = ["LONG", "", "" "", "", ]
mock_local.get_imagerepos.return_value = [("I1", "T1"), ("I2", "T2"), ]
mock_local.isprotected_imagerepo.return_value = True
mock_local.get_layers.return_value = []
status = udoc.do_images(mock_cmdp)
self.assertTrue(status)
self.assertTrue(mock_local.get_layers.called)
@mock.patch('udocker.CmdParser')
@mock.patch('udocker.KeyStore')
@mock.patch('udocker.DockerLocalFileAPI')
@mock.patch('udocker.DockerIoAPI')
@mock.patch('udocker.Msg')
@mock.patch('udocker.LocalRepository')
def test_18_do_ps(self, mock_local, mock_msg, mock_dioapi,
mock_dlocapi, mock_ks, mock_cmdp):
"""Test18 Udocker().do_ps()."""
self._init()
mock_msg.level = 0
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = True
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
status = udoc.do_ps(mock_cmdp)
self.assertFalse(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = False
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_local.get_containers_list.return_value = []
udoc.do_ps(mock_cmdp)
self.assertTrue(mock_local.get_containers_list.called)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = False
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_local.get_containers_list.return_value = [("ID", "NAME", ""), ]
mock_local.isprotected_container.return_value = True
mock_local.iswriteable_container.return_value = True
udoc.do_ps(mock_cmdp)
self.assertTrue(mock_local.isprotected_container.called)
@mock.patch('udocker.CmdParser')
@mock.patch('udocker.KeyStore')
@mock.patch('udocker.DockerLocalFileAPI')
@mock.patch('udocker.DockerIoAPI')
@mock.patch('udocker.Msg')
@mock.patch('udocker.LocalRepository')
def test_19_do_rm(self, mock_local, mock_msg, mock_dioapi,
mock_dlocapi, mock_ks, mock_cmdp):
"""Test19 Udocker().do_rm()."""
self._init()
mock_msg.level = 0
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = True
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
status = udoc.do_rm(mock_cmdp)
self.assertFalse(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = False
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
status = udoc.do_rm(mock_cmdp)
self.assertFalse(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = False
mock_cmdp.get.side_effect = ["X", "12", "" "", "", ]
mock_local.get_container_id.return_value = ""
status = udoc.do_rm(mock_cmdp)
self.assertTrue(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = False
mock_cmdp.get.side_effect = ["X", "1", "" "", "", ]
mock_local.get_container_id.return_value = "1"
mock_local.isprotected_container.return_value = True
status = udoc.do_rm(mock_cmdp)
self.assertTrue(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = False
mock_cmdp.get.side_effect = ["X", "1", "" "", "", ]
mock_local.get_container_id.return_value = "1"
mock_local.isprotected_container.return_value = False
status = udoc.do_rm(mock_cmdp)
self.assertTrue(status)
@mock.patch('udocker.Udocker._check_imagespec')
@mock.patch('udocker.CmdParser')
@mock.patch('udocker.KeyStore')
@mock.patch('udocker.DockerLocalFileAPI')
@mock.patch('udocker.DockerIoAPI')
@mock.patch('udocker.Msg')
@mock.patch('udocker.LocalRepository')
def test_20_do_rmi(self, mock_local, mock_msg, mock_dioapi,
mock_dlocapi, mock_ks, mock_cmdp, mock_chkimg):
"""Test20 Udocker().do_rmi()."""
self._init()
mock_msg.level = 0
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = True
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_chkimg.return_value = ("IMAGE", "TAG")
status = udoc.do_rmi(mock_cmdp)
self.assertFalse(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = False
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_chkimg.return_value = ("", "TAG")
status | |
crossLP.addGlobalParameter('lim', 1.0)
crossLP.setCutoffDistance(3.0)
lambdas_full = np.loadtxt(TypesTable, delimiter=',')
lambdas = np.triu(lambdas_full) + np.triu(lambdas_full, k=1).T
diff_types = len(lambdas)
print(len(lambdas))
lambdas = list(np.ravel(lambdas))
fTypes = self.mm.Discrete2DFunction(diff_types,diff_types,lambdas)
crossLP.addTabulatedFunction('mapType', fTypes)
AB_types = self.changeType_list()
crossLP.addPerParticleParameter("t")
for i in range(self.N):
value = [float(AB_types[i])]
crossLP.addParticle(value)
self.forceDict["CustomTypes"] = crossLP
def changeType_list(self):
R"""
Internal function for indexing unique chromatin types.
"""
n = set(self.type_list)
lista = np.array(self.type_list)
k=0
for t in n:
lista[lista==t] = k
k += 1
return(list(lista))
def addLoops(self, mu=3.22, rc = 1.78, X=-1.612990, looplists=None):
R"""
Adds the Loops interactions according to the MiChroM energy function parameters reported in "<NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2016. Transferable model for chromosome architecture. Proceedings of the National Academy of Sciences, 113(43), pp.12168-12173".
The parameters :math:`\mu` (mu) and rc are part of the probability of crosslink function :math:`f(r_{i,j}) = \frac{1}{2}\left( 1 + tanh\left[\mu(r_c - r_{i,j}\right] \right)`, where :math:`r_{i,j}` is the spatial distance between loci (beads) *i* and *j*.
.. note:: For Multi-chain simulations, the ordering of the loop list files is important! The order of the files should be the same as used in the other functions.
Args:
mu (float, required):
Parameter in the probability of crosslink function. (Default value = 3.22).
rc (float, required):
Parameter in the probability of crosslink function, :math:`f(rc) = 0.5`. (Default value = 1.78).
X (float, required):
Loop interaction parameter. (Default value = -1.612990).
looplists (file, optional):
A two-column text file containing the index *i* and *j* of a loci pair that form loop interactions. (Default value: :code:`None`).
"""
ELoop = "qsi*0.5*(1. + tanh(mu*(rc - r)))"
Loop = self.mm.CustomBondForce(ELoop)
Loop.addGlobalParameter('mu', mu)
Loop.addGlobalParameter('rc', rc)
Loop.addGlobalParameter('qsi', X)
self.getLoops(looplists)
for p in self.loopPosition:
Loop.addBond(p[0]-1,p[1]-1)
self.forceDict["Loops"] = Loop
def addCustomIC(self, mu=3.22, rc = 1.78, dinit=3, dend=200, IClist=None):
R"""
Adds the Ideal Chromosome potential using custom values for interactions between beads separated by a genomic distance :math:`d`. The parameters :math:`\mu` (mu) and rc are part of the probability of crosslink function :math:`f(r_{i,j}) = \frac{1}{2}\left( 1 + tanh\left[\mu(r_c - r_{i,j}\right] \right)`, where :math:`r_{i,j}` is the spatial distance between loci (beads) *i* and *j*.
Args:
mu (float, required):
Parameter in the probability of crosslink function. (Default value = 3.22).
rc (float, required):
Parameter in the probability of crosslink function, :math:`f(rc) = 0.5`. (Default value = 1.78).
dinit (int, required):
The first neighbor in sequence separation (Genomic Distance) to be considered in the Ideal Chromosome potential. (Default value = 3).
dend (int, required):
The last neighbor in sequence separation (Genomic Distance) to be considered in the Ideal Chromosome potential. (Default value = 200).
IClist (file, optional):
A one-column text file containing the energy interaction values for loci *i* and *j* separated by a genomic distance :math:`d`. (Default value: :code:`None`).
"""
energyIC = ("step(d-dinit)*IClists(d)*step(dend -d)*f*step(r-lim);"
"f=0.5*(1. + tanh(mu*(rc - r)));"
"d=abs(idx2-idx1)")
IC = self.mm.CustomNonbondedForce(energyIC)
IClist = np.append(np.zeros(dend),IClist)[:-dend]
tabIClist = self.mm.Discrete1DFunction(IClist)
IC.addTabulatedFunction('IClist', tabIClist)
IC.addGlobalParameter('dinit', dinit)
IC.addGlobalParameter('dend', dend)
IC.addGlobalParameter('mu', mu)
IC.addGlobalParameter('rc', rc)
IC.addGlobalParameter('lim', 1.0)
IC.setCutoffDistance(3.0)
IC.addPerParticleParameter("idx")
for i in range(self.N):
IC.addParticle([i])
self.forceDict["CustomIC"] = IC
def addIdealChromosome(self, mu=3.22, rc = 1.78, Gamma1=-0.030,Gamma2=-0.351,
Gamma3=-3.727, dinit=3, dend=500):
R"""
Adds the Ideal Chromosome potential for interactions between beads separated by a genomic distance :math:`d` according to the MiChroM energy function parameters reported in "<NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2016. Transferable model for chromosome architecture. Proceedings of the National Academy of Sciences, 113(43), pp.12168-12173".
The set of parameters :math:`\{\gamma_d\}` of the Ideal Chromosome potential is fitted in a function: :math:`\gamma(d) = \frac{\gamma_1}{\log{(d)}} +\frac{\gamma_2}{d} +\frac{\gamma_3}{d^2}`.
The parameters :math:`\mu` (mu) and rc are part of the probability of crosslink function :math:`f(r_{i,j}) = \frac{1}{2}\left( 1 + tanh\left[\mu(r_c - r_{i,j}\right] \right)`, where :math:`r_{i,j}` is the spatial distance between loci (beads) *i* and *j*.
Args:
mu (float, required):
Parameter in the probability of crosslink function. (Default value = 3.22).
rc (float, required):
Parameter in the probability of crosslink function, :math:`f(rc) = 0.5`. (Default value = 1.78).
Gamma1 (float, required):
Ideal Chromosome parameter. (Default value = -0.030).
Gamma2 (float, required):
Ideal Chromosome parameter. (Default value = -0.351).
Gamma3 (float, required):
Ideal Chromosome parameter. (Default value = -3.727).
dinit (int, required):
The first neighbor in sequence separation (Genomic Distance) to be considered in the Ideal Chromosome potential. (Default value = 3).
dend (int, required):
The last neighbor in sequence separation (Genomic Distance) to be considered in the Ideal Chromosome potential. (Default value = 500).
"""
energyIC = ("step(d-dinit)*(gamma1/log(d) + gamma2/d + gamma3/d^2)*step(dend -d)*f;"
"f=0.5*(1. + tanh(mu*(rc - r)));"
"d=abs(idx1-idx2)")
IC = self.mm.CustomNonbondedForce(energyIC)
IC.addGlobalParameter('gamma1', Gamma1)
IC.addGlobalParameter('gamma2', Gamma2)
IC.addGlobalParameter('gamma3', Gamma3)
IC.addGlobalParameter('dinit', dinit)
IC.addGlobalParameter('dend', dend)
IC.addGlobalParameter('mu', mu)
IC.addGlobalParameter('rc', rc)
IC.setCutoffDistance(3.0)
IC.addPerParticleParameter("idx")
for i in range(self.N):
IC.addParticle([i])
self.forceDict["IdealChromosome"] = IC
def addMultiChainIC(self, mu=3.22, rc = 1.78, Gamma1=-0.030,Gamma2=-0.351,
Gamma3=-3.727, dinit=3, dend=500, chains=None):
R"""
Adds the Ideal Chromosome potential for multiple chromosome simulations. The interactions between beads separated by a genomic distance :math:`d` is applied according to the MiChroM energy function parameters reported in "<NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2016. Transferable model for chromosome architecture. Proceedings of the National Academy of Sciences, 113(43), pp.12168-12173".
The set of parameters :math:`\{\gamma_d\}` of the Ideal Chromosome potential is fitted in a function: :math:`\gamma(d) = \frac{\gamma_1}{\log{(d)}} +\frac{\gamma_2}{d} +\frac{\gamma_3}{d^2}`.
The parameters :math:`\mu` (mu) and rc are part of the probability of crosslink function :math:`f(r_{i,j}) = \frac{1}{2}\left( 1 + tanh\left[\mu(r_c - r_{i,j}\right] \right)`, where :math:`r_{i,j}` is the spatial distance between loci (beads) *i* and *j*.
Args:
mu (float, required):
Parameter in the probability of crosslink function. (Default value = 3.22).
rc (float, required):
Parameter in the probability of crosslink function, :math:`f(rc) = 0.5`. (Default value = 1.78).
Gamma1 (float, required):
Ideal Chromosome parameter. (Default value = -0.030).
Gamma2 (float, required):
Ideal Chromosome parameter. (Default value = -0.351).
Gamma3 (float, required):
Ideal Chromosome parameter. (Default value = -3.727).
dinit (int, required):
The first neighbor in sequence separation (Genomic Distance) to be considered in the Ideal Chromosome potential. (Default value = 3).
dend (int, required):
The last neighbor in sequence separation (Genomic Distance) to be considered in the Ideal Chromosome potential. (Default value = 500).
chains (list of tuples, optional):
The list of chains in the format [(start, end, isRing)]. isRing is a boolean whether the chromosome chain is circular or not (Used to simulate bacteria genome, for example). The particle range should be semi-open, i.e., a chain :math:`(0,3,0)` links the particles :math:`0`, :math:`1`, and :math:`2`. If :code:`bool(isRing)` is :code:`True` , the first and last particles of the chain are linked, forming a ring. The default value links all particles of the system into one chain. (Default value: :code:`[(0, None, 0)]`).
"""
energyIC = ("step(d-dinit)*(gamma1/log(d) + gamma2/d + gamma3/d^2)*step(dend-d)*f;"
"f=0.5*(1. + tanh(mu*(rc - r)));"
"d=abs(idx1-idx2)")
IC = self.mm.CustomNonbondedForce(energyIC)
IC.addGlobalParameter('gamma1', Gamma1)
IC.addGlobalParameter('gamma2', Gamma2)
IC.addGlobalParameter('gamma3', Gamma3)
IC.addGlobalParameter('dinit', dinit)
IC.addGlobalParameter('dend', dend)
IC.addGlobalParameter('mu', mu)
IC.addGlobalParameter('rc', rc)
IC.setCutoffDistance(3)
groupList = list(range(chains[0],chains[1]+1))
IC.addInteractionGroup(groupList,groupList)
IC.addPerParticleParameter("idx")
for i in range(self.N):
IC.addParticle([i])
self.forceDict["IdealChromosome_chain_"+str(chains[0])] = IC
def _loadParticles(self):
R"""
Internal function that loads the chromosome beads into the simulations system.
"""
if not hasattr(self, "system"):
return
if not self.loaded:
for mass in self.masses:
self.system.addParticle(self.mass * mass)
if self.verbose == True:
print("%d particles loaded" % self.N)
self.loaded = True
def _applyForces(self):
R"""Internal function that adds all loci to the system and applies all the forces present in the forcedict."""
if self.forcesApplied == True:
return
self._loadParticles()
exc = self.bondsForException
print("Number of exceptions:", len(exc))
if len(exc) > 0:
exc = np.array(exc)
exc = np.sort(exc, axis=1)
exc = [tuple(i) for i in exc]
exc = list(set(exc))
for i in list(self.forceDict.keys()):
force = self.forceDict[i]
if hasattr(force, | |
#! /usr/env/python
r"""
Landlab's Continuous-Time Stochastic (CTS) cellular automata modeling package.
Overview
--------
A CellLab CTS model implements a particular type of cellular
automaton (CA): a continuous-time stochastic CA. The approach is based on that
of Narteau et al. (2002, 2009) and Rozier and Narteau (2014). Like a normal
CA, the domain consists of a lattice of cells, each of which has a discrete
state. Unlike a conventional CA, the updating process is stochastic, and takes
place in continuous rather than discrete time. Any given pair (or "doublet")
of adjacent cell states has a certain specified probability of transition to a
different pair of states. The transition probability is given in the form of an
average *transition rate*, :math:`\lambda` (with dimensions of 1/T); the actual
time of transition is a random variable drawn from an exponential probability
distribution with mean :math:`1/\lambda`.
Subclasses
----------
Landlab provides for several different lattice and connection types:
- RasterCTS: regular raster grid with transitions between horizontal and
vertical cell pairs
- OrientedRasterCTS: like a RasterLCA, but different transition rates can
be assigned to vertical and horizontal pairs. This property of
orientation can be used, for example, to implement rules representing
gravitational attraction, or flow of a fluid with a particular
direction.
- RasterD8CTS: like a RasterLCA, but includes diagonal as well as vertical
and horizontal cell pairs.
- OrientedRasterD8CTS: as above but orientation also matters.
- HexCTS: hexagonal grid
- OrientedHexCTS: hexagonal grid, with transition rates allowed to vary
according to orientation.
Encoding of "states"
--------------------
As in any traditional cellular automaton model, a LandlabCellularAutomaton
contains a grid of cells ("nodes" in Landlab parlance), each of which is has a
discrete state. States are represented by integers (0, 1, ... N).
In addition, every active link has an *orientation code* and a *link state
code*. The orientation code represents the orientation of the link in space: is
it "vertical" (aligned with the y axis), "horizontal" (aligned with x), or in
some other orientation? The number of possible orientations depends on the
subclass. The base class has only one orientation code (0) (meaning
"orientation doesn't matter), but this is overridden in some of the subclasses.
For example, the OrientedRasterLCA has two orientation codes (0 and 1, for
vertical and horizontal), while the OrientedHexLCA has three (representing the
three axes in a hex-cell / triagonal grid).
Each active link also has a *link state code*. The *state* of a link refers to
its particular combination of nodes and its orientation. For example, link
state 1 refers to a link in which the tail-node has state 0, the head-node has
state 1, and the orientation code is 0. The number of possible link states is
equal to R N^2, where R is the number of orientations (1 to 3, depending on the
subclass) and N is the number of possible node states. The simplest possible
Landlab CA model would have just one orientation code and two possible cell
states, so that there are four unique link states. These would be represented
by the tuples of (tail-node state, head-node state, orientation) as follows::
link state 0 = (0, 0, 0)
link state 1 = (0, 1, 0)
link state 2 = (1, 0, 0)
link state 3 = (1, 1, 0)
Main data structures
--------------------
node_state : 1d array of int (x number of nodes in grid)
Node-based grid of node-state codes. This is the grid of cell (sic) states.
link_state_dict : dictionary
Keys are 3-element tuples that represent the cell-state pairs and
orientation code for each possible link type; values are the corresponding
link-state codes. Allows you to look up the link-state code corresponding
to a particular pair of adjacent nodes with a particular orientation.
node_pair : list (x number of possible link states)
List of 3-element tuples representing all the various link states. Allows
you to look up the node states and orientation corresponding to a
particular link-state ID.
priority_queue : PriorityQueue object containing event records
Queue containing all future transition events, sorted by time of occurrence
(from soonest to latest).
next_update : 1d array (x number of links)
Time (in the future) at which the link will undergo its next transition.
You might notice that the update time for every scheduled transition is
also stored with each event in the event queue. Why store it twice?
Because a scheduled event might be invalidated after the event has been
scheduled (because another transition has changed one of a link's two
nodes, for example). The way to tell whether a scheduled event is still
valid is to compare its time with the corresponding transition time in the
*next_update* array. If they are different, the event is discarded.
link_orientation : 1d array of int8 (x number of links)
Orientation code for each link.
link_state : 1d array of int (x number of links)
State code for each link.
n_trn : 1d array of int (x number of possible link states)
Number of transitions ("trn" stands for "transition") from a given link
state.
trn_to : 1d array of ints (x # transitions)
Stores the link-state code(s) to which a particular transition ID can
transition.
trn_rate : 1d array of floats (# transitions)
Rate associated with each link-state transition.
Created GT Sep 2014, starting from link_cap.py.
"""
import numpy as np
import pylab as plt
import landlab
from landlab.ca.cfuncs import (
PriorityQueue,
get_next_event_new,
push_transitions_to_event_queue,
run_cts_new,
)
from landlab.grid.nodestatus import NodeStatus
_NEVER = 1e50
_DEBUG = False
_CORE = NodeStatus.CORE
class Transition(object):
"""A transition from one state to another.
Represents a transition from one state ("from_state") to another
("to_state") at a link. The transition probability is represented by a rate
parameter "rate", with dimensions of 1/T. The probability distribution of
time until the transition event occurs is exponentional with mean 1/rate.
The optional name parameter allows the caller to assign a name to any given
transition.
Note that from_state and to_state can now be either integer IDs for the
standardised ordering of the link states (as before), or tuples explicitly
describing the node state at each end, and the orientation.
Orientation is 0: horizontal, L-R; 1: vertical, bottom-top.
For such a tuple, order is (left/bottom, right/top, orientation).
Transition() constructor sets 3 required properties and 2 optional
properties for a transition from one cell pair to another.
Parameters
----------
from_state : int
Code for the starting state of the cell pair (link)
to_state : int
Code for the new state of the cell pair (link)
rate : float
Average rate at which this transition occurs (dimension of 1/time)
name : string (optional)
Name for this transition
swap_properties : bool (optional)
Flag: should properties be exchanged between the two cells?
"""
def __init__(
self,
from_state,
to_state,
rate,
name=None,
swap_properties=False,
prop_update_fn=None,
):
"""Transition() constructor sets 3 required properties and 2 optional
properties for a transition from one cell pair to another.
Parameters
----------
from_state : int
Code for the starting state of the cell pair (link)
to_state : int
Code for the new state of the cell pair (link)
rate : float
Average rate at which this transition occurs (dimension of 1/time)
name : string (optional)
Name for this transition
swap_properties : bool (optional)
Flag: should properties be exchanged between the two cells?
"""
self.from_state = from_state
self.to_state = to_state
self.rate = rate
self.name = name
self.swap_properties = swap_properties
self.prop_update_fn = prop_update_fn
class CAPlotter(object):
"""Handle display of a CellLab-CTS grid.
CAPlotter() constructor keeps a reference to the CA model, and
optionally a colormap to be used with plots.
Parameters
----------
ca : LandlabCellularAutomaton object
Reference to a CA model
cmap : Matplotlib colormap, optional
Colormap to be used in plotting
Examples
--------
>>> from landlab import RasterModelGrid, HexModelGrid
>>> from landlab.ca.celllab_cts import Transition
>>> from landlab.ca.raster_cts import RasterCTS
>>> import numpy as np
>>> grid = RasterModelGrid((3, 5))
>>> nsd = {0 : 'zero', 1 : 'one'}
>>> trn_list = []
>>> trn_list.append(Transition((0, 1, 0), (1, 1, 0), 1.0))
>>> ins = np.arange(15) % 2
>>> ca = RasterCTS(grid, nsd, trn_list, ins)
>>> cap = CAPlotter(ca)
>>> cap.gridtype
'rast'
>>> cap._cmap.name
'jet'
>>> from landlab.ca.hex_cts import HexCTS
>>> import matplotlib
>>> grid = HexModelGrid((3, 3))
>>> ins | |
np.array(coords_move[i]) + np.array(delta)
else:
new_coord = np.array(coords_move[i]) - np.array(delta)
slab_move.append(species_move[i], new_coord, coords_are_cartesian=True)
return slab_move
def Find_Broken_Molecules(slab, sg, species_intact, coords_intact, unique_bulk_subgraphs):
"""
Use molecular identification method to find those molecules in the surface
that are different from that in the bulk.
Parameters
----------
slab: Atoms structure
The surface that is generated by ase library and might have broken molecules.
sg: list of Molecules
Unique Molecules in bulk Structure.
species_intact: list, ['specie_1', 'specie_2', ...]
A list of atomic species of intact molecules.
coords_intact: list, [[coord_1_1, coord_1_2, coord_1_3], ...]
A list of atomic cart_coords of intact molecules.
unique_bulk_subgraphs: list of graphs
A list of intact molecules' graphs. Note that every graph is this list
is unique
"""
slab_sg = StructureGraph.with_local_env_strategy(slab, JmolNN())
# enlarge the cell to a (3 * 3 * 1) super_cell
slab_supercell_sg = slab_sg * (3, 3, 1)
different_subgraphs_in_slab, slab_molecules = \
get_slab_different_subgraphs(slab_supercell_sg, unique_bulk_subgraphs)
slab_molecules = double_screen(slab_molecules, sg)
# the molecules in slab_original would be the template
#print("The number of molecules that need to be fixed : " +
# str(len(slab_molecules)))
# slab_molecules are the molecules that are broken and need to be fixed
delete_sites = reduced_sites(slab_molecules, slab)
# delete_list is the list of broken atoms
delete_list = []
for delete_site in delete_sites:
for i, atom in enumerate(slab):
if atom.is_periodic_image(delete_site):
delete_list.append(i)
break
species_all = slab.species
coords_all = slab.cart_coords
for i, atom in enumerate(slab):
temp = [i == delete for delete in delete_list]
if not any(temp):
species_intact.append(species_all[i])
coords_intact.append(coords_all[i])
delete_list = []
# remove intact molecules in the slab for convenience
#print("Delete all atoms!")
for i, atom in enumerate(slab):
delete_list.append(i)
slab.remove_sites(delete_list)
sites = []
for slab_molecule in slab_molecules:
for curr_site in slab_molecule:
curr_site = mg.PeriodicSite(curr_site.specie,
curr_site.coords,
slab.lattice,
coords_are_cartesian=True)
tmp = [curr_site.is_periodic_image(site) for site in sites]
if not any(tmp):
sites.append(curr_site)
for site in sites:
# add the broken molecules into the system
slab.append(species=site.specie, coords=site.coords,
coords_are_cartesian=True)
return slab
def get_broken_molecules(self, bulk_subgraphs, use_weights=False):
# compare each molecule in slab to each molecule in the bulk,
# get rid of isomorohic, molecules store the brokens
"""
Retrieve broken_subgraphs as molecules
Will return nonunique molecules, duplicates
present in the crystal (a duplicate defined as an
isomorphic subgraph).
Returns:
-------
: list of nonunique broken Molecules in Structure
"""
# creating a supercell is an easy way to extract
# molecules (and not, e.g., layers of a 2D crystal)
# without adding extra logic
supercell_sg = self*(3, 3, 1)
# make undirected to find connected subgraphs
supercell_sg.graph = nx.Graph(supercell_sg.graph)
# find subgraphs
all_subgraphs = list(nx.connected_component_subgraphs(supercell_sg.graph))
# discount subgraphs that lie across *supercell* boundaries
# these will subgraphs representing crystals
molecule_subgraphs = []
for subgraph in all_subgraphs:
intersects_boundary = any([d['to_jimage'] != (0, 0, 0)
for u, v, d in subgraph.edges(data=True)])
if not intersects_boundary:
molecule_subgraphs.append(subgraph)
# add specie names to graph to be able to test for isomorphism
for subgraph in molecule_subgraphs:
for n in subgraph:
subgraph.add_node(n, specie=str(supercell_sg.structure[n].specie))
# now define how we test for isomorphism
def node_match(n1, n2):
return n1['specie'] == n2['specie']
def edge_match(e1, e2):
if use_weights:
return e1['weight'] == e2['weight']
else:
return True
nm = iso.categorical_node_match("specie", "ERROR")
# remove complete molecules in subgraphs
different_subgraphs = []
start = time.time()
for subgraph in molecule_subgraphs:
already_present = [nx.is_isomorphic(subgraph, g,
node_match=nm)
for g in bulk_subgraphs]
if not any(already_present):
different_subgraphs.append(subgraph)
# get Molecule objects for each subgraph
molecules = []
for subgraph in different_subgraphs:
coords = [supercell_sg.structure[n].coords for n
in subgraph.nodes()]
species = [supercell_sg.structure[n].specie for n
in subgraph.nodes()]
molecule = Molecule(species, coords)
# shift so origin is at center of mass
#molecule = molecule.get_centered_molecule()
molecules.append(molecule)
return molecules
# now define how we test for isomorphism
def node_match(n1, n2):
"""the strategy for node matching in is_isomorphic.
Parameters
------
n1, n2 : node
Returns:
-------
True of false : bool
based on whether the species of two nodes are the same.
"""
return n1['specie'] == n2['specie']
def get_bulk_molecules(self, use_weights=False):
# get rid of the repetitve molecule in bulk, only left with unique molecule######
"""
Retrieve subgraphs as molecules, useful for extracting
molecules from periodic crystals.
Will only return unique molecules, not any duplicates
present in the crystal (a duplicate defined as an
isomorphic subgraph).
Parameters:
------
use_weights: (bool) If True, only treat subgraphs
as isomorphic if edges have the same weights. Typically,
this means molecules will need to have the same bond
lengths to be defined as duplicates, otherwise bond
lengths can differ. This is a fairly robust approach,
but will treat e.g. enantiomers as being duplicates.
Returns:
-------
list of unique Molecules in Structure
"""
# creating a supercell is an easy way to extract
# molecules (and not, e.g., layers of a 2D crystal)
# without adding extra logic
# enlarge the structureGraph object to a supercell
supercell_sg = self*(3, 3, 1)
# make undirected to find connected subgraphs
# create networkx undirected graph object to
supercell_sg.graph = nx.Graph(supercell_sg.graph)
# store the input graph
# find subgraphs
all_subgraphs = list(nx.connected_component_subgraphs(
supercell_sg.graph))
# add specie names to graph to be able to test for isomorphism
for subgraph in all_subgraphs:
for n in subgraph:
subgraph.add_node(n, specie=str(supercell_sg.structure[n].specie))
# now define how we test for isomorphism
def node_match(n1, n2):
return n1['specie'] == n2['specie']
def edge_match(e1, e2):
if use_weights:
return e1['weight'] == e2['weight']
else:
return True
nm = iso.categorical_node_match("specie", "ERROR")
# prune duplicate subgraphs
unique_subgraphs = []
for subgraph in all_subgraphs:
already_present = [nx.is_isomorphic(subgraph, g,
node_match=node_match,
edge_match=edge_match)
for g in unique_subgraphs]
if not any(already_present):
unique_subgraphs.append(subgraph)
# get Molecule objects for each subgraph
molecules = []
for subgraph in unique_subgraphs:
coords = [supercell_sg.structure[n].coords for n
in subgraph.nodes()]
species = [supercell_sg.structure[n].specie for n
in subgraph.nodes()]
molecule = Molecule(species, coords)
molecules.append(molecule)
return molecules, unique_subgraphs
#################convert to undirected mx.graph and then determine if isomorphic###############
def isomorphic_to(self, other):
"""
Checks if the graphs of two MoleculeGraphs are isomorphic to one
another. In order to prevent problems with misdirected edges, both
graphs are converted into undirected nx.Graph objects.
Parameters:
----------
other: MoleculeGraph object to be compared.
Returns:
-------
bool
"""
if self.molecule.composition != other.molecule.composition:
return False
else:
self_undir = self.graph.to_undirected()
other_undir = other.graph.to_undirected()
nm = iso.categorical_node_match("specie", "ERROR")
isomorphic = nx.is_isomorphic(self_undir, other_undir, node_match=nm)
return isomorphic
def reduced_sites(molecules, slab):
"""
Find atoms that appear again due to the periodicity.
Parameters:
-----------
molecules: List[molecule].
All molecules that might be within or out of the slab boundary.
slab: ASE structure.
Slab structure.
Returns:
--------
sites: List[atom].
"""
sites = []
for molecule in molecules:
for curr_site in molecule:
curr_site = PeriodicSite(
curr_site.specie, curr_site.coords, slab.lattice, coords_are_cartesian=True)
tmp = [curr_site.is_periodic_image(site) for site in sites]
if not any(tmp):
sites.append(curr_site)
return sites
def is_isomorphic(molecule1, molecule2):
"""
Determin whether two molecules are the same.
Parameters:
-----------
molecule1 and molecule2.
Returns:
--------
bool.
"""
return isomorphic_to(MoleculeGraph.with_local_env_strategy(molecule1, JmolNN()), MoleculeGraph.with_local_env_strategy(molecule2, JmolNN()))
def double_screen(slab_molecules, bulk_molecules):
"""
Double check with bulk if there is any molecule already present in bulk
"""
delete_list = []
for bulk_molecule in bulk_molecules:
for i, slab_molecule in enumerate(slab_molecules):
if is_isomorphic(bulk_molecule, slab_molecule):
delete_list.append(i)
tmp = [x for i, x in enumerate(slab_molecules) if i not in delete_list]
return tmp
def print_run_time(func):
"""
One wrapper that output the run_time of a funtion.
"""
@wraps(func)
def wrapper(*args, **kw):
local_time = time.time()
func(*args, **kw)
print('Current Function [%s] run time is %.2fs' %
(func.__name__, time.time() - local_time))
return wrapper
def updatePOSCAR(output_file):
"""This function is used to correct the output file (POSCAR) of ase.
Parameters:
----------
output_file : str
The file of surface written by the write function of ase.
Returns:
-------
file : str
The file that is corrected.
"""
with open(output_file, 'r') as original_file:
lines = original_file.readlines()
line1 = lines[0]
lines.insert(5, " " + line1)
with open(output_file, 'w') as final_file_1:
for i in range(len(lines)):
final_file_1.writelines(lines[i])
structure = mg.Structure.from_file(output_file)
lattice = Lattice(structure.lattice.matrix)
frac_coords = lattice.get_fractional_coords(structure.cart_coords)
for i in range(frac_coords.shape[0]):
for j in range(frac_coords.shape[1]):
if abs(frac_coords[i][j] - 1) < 1e-5:
frac_coords[i][j] = 1
if abs(frac_coords[i][j] - 0) < 1e-5:
frac_coords[i][j] = 0
with open(output_file, 'r') as final_file_2:
lines = final_file_2.readlines()
lines[7] = 'Direct' + '\n'
for i in | |
== 0:
more_pages_to_query = False
# case where there are more dates to cover
if query_end_date < end_date:
# slide the date window forward and reset the pagination values
query_start_date = query_start_date + timedelta(days=time_delta)
query_end_date = query_start_date + timedelta(days=time_delta)
query_begin = 1
query_end = per_page
else:
more_days_to_query = False
# only append to results in RAM if necessary
if return_results: user_results += query_result['results']
if yield_results: yield query_result['results']
# update the total number of matches to fetch (=inf on error & start)
end = float(query_result['total_matches'])
# validate whether the request succeeded or errored
if query_result['status_code'] == 200:
# continue paginating over responses for the current date range
if query_end < end:
query_begin += per_page
query_end += per_page
# pagination is done, check whether to slide the date window forward
else:
more_pages_to_query = False
# case where there are more dates to cover
if query_end_date < end_date:
# slide the date window forward and reset the pagination values
query_start_date = query_start_date + timedelta(days=time_delta)
query_end_date = query_start_date + timedelta(days=time_delta)
query_begin = 1
query_end = per_page
# also potentially increment the time delta for longer strides
if query_result['total_matches'] < (per_page/2): time_delta += 1
# we're done!
else: more_days_to_query = False
# the request failed, so decrement time_delta or flail
else:
if time_delta > 1:
time_delta -= 1
else: print(' * Abort!')
if return_results:
yield user_results
def run_search(self,
query,
source_id,
begin=1,
end=10,
start_date='2017-12-01',
end_date='2017-12-02',
save_results=True,
get_text=True):
'''
Method that actually submits search requests. Called from self.search(),
which controls the logic that constructs the individual searches
@param: {str} query: the user's document query phrase
@param: {int} source_id: the source id to which queries will be addressed
@param: {int} begin: the starting result number to return
@param: {int} end: the ending result number to return
@param: {str} start_date: the starting query date in string format
@param: {str} end_date: the ending query date in string format
@param: {bool} save_results: save matches to mongo
@param: {bool} get_text: fetch full text content for each match
@returns: {obj} an object with metadata describing search results data
'''
print(' * querying for', query, source_id, begin, end, start_date, end_date)
request = '''
<SOAP-ENV:Envelope
xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/"
SOAP-ENV:encodingStyle= "http://schemas.xmlsoap.org/soap/encoding/">
<soap:Body xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">
<Search xmlns="http://search.search.services.v1.wsapi.lexisnexis.com">
<binarySecurityToken>{0}</binarySecurityToken>
<sourceInformation>
<sourceIdList xmlns="http://common.search.services.v1.wsapi.lexisnexis.com">
<sourceId xmlns="http://common.services.v1.wsapi.lexisnexis.<EMAIL>">{1}</sourceId>
</sourceIdList>
</sourceInformation>
<query>{2}</query>
<projectId>{3}</projectId>
<searchOptions>
<sortOrder xmlns="http://common.search.services.v1.wsapi.lexisnexis.com">Date</sortOrder>
<dateRestriction xmlns="http://common.search.services.v1.wsapi.lexisnexis.com">
<startDate>{4}</startDate>
<endDate>{5}</endDate>
</dateRestriction>
</searchOptions>
<retrievalOptions>
<documentView xmlns="http://result.common.services.v1.wsapi.lexisnexis.com">Cite</documentView>
<documentMarkup xmlns="http://result.common.services.v1.wsapi.lexisnexis.com">Display</documentMarkup>
<documentRange xmlns="http://result.common.services.v1.wsapi.lexisnexis.com">
<begin>{6}</begin>
<end>{7}</end>
</documentRange>
</retrievalOptions>
</Search>
</soap:Body>
</SOAP-ENV:Envelope>
'''.format(self.auth_token, source_id, query, self.project_id,
start_date, end_date, begin, end)
url = self.get_url('Search')
response = requests.post(url=url, headers=self.get_headers(request), data=request)
soup = BeautifulSoup(response.text, 'lxml')
result_packet = {}
result_packet['status_code'] = response.status_code
result_packet['total_matches'] = 0
result_packet['results'] = []
try:
result_count_tag = find_tag_by_name(soup, 'documentsfound')
result_packet['total_matches'] = int(result_count_tag.get_text())
except AttributeError:
result_packet['total_matches'] = 0
if (result_packet['total_matches'] == 0) or (result_packet['status_code'] != 200):
return result_packet
else:
result_packet['results'] = self.get_documents(soup, get_text)
if save_results: self.save_results(result_packet['results'])
return result_packet
def save_results(self, results):
'''
Save all search results to the database
@param: {arr} results: a list of search result objects
'''
if not self.db:
raise Exception('Please call set_db() before saving records')
return
if not results: return
composed_results = []
copied = copy.deepcopy(results)
for i in copied:
i['session_id'] = self.session_id
i['project_id'] = self.project_id
composed_results.append(i)
self.db.results.insert_many(composed_results)
def get_search_dates(self, start_date, end_date):
'''
@param {str} start_date: the starting date for the query: '2017-12-01'
@param {str} end_date: the ending date for the query: '2017-12-02'
@returns datetime, datetime: the start and end dates as datetime objects
'''
return self.string_to_date(start_date), self.string_to_date(end_date)
def string_to_date(self, string_date):
'''
@param: {str} string_date: a date in string format: '2017-12-01'
@returns: {datetime}: the input date in datetime format
'''
year, month, day = [int(i) for i in string_date.split('-')]
return datetime(year, month, day)
def date_to_string(self, datetime_date):
'''
@param: {datetime}: a datetime object
@returns: {str}: the input datetime in string format: 'YYYY-MM-DD'
'''
return datetime_date.strftime('%Y-%m-%d')
def get_documents(self, soup, get_text=True):
'''
@param: {BeautifulSoup}: the result of a search() query
@returns: {arr}: a list of objects, each describing a match's metadata
'''
# create a store of processed documents
docs = []
# find list of document containers
doc_containers = []
for i in soup.findChildren():
if 'documentcontainer' in i.name and 'documentcontainerlist' not in i.name:
doc_containers.append(i)
for idx, i in enumerate(doc_containers):
try:
doc = Document(i).metadata
if get_text:
doc['full_text'] = self.get_full_text(doc['doc_id'])
docs.append(doc)
except Exception as exc:
print(' ! could not process doc', idx, exc)
return docs
##
# Get Full Text Content
##
def get_full_text(self, document_id):
'''
@param: {int}: a document's id number
@returns:
'''
request = '''
<SOAP-ENV:Envelope
xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/"
SOAP-ENV:encodingStyle= "http://schemas.xmlsoap.org/soap/encoding/">
<soap:Body xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">
<GetDocumentsByDocumentId xmlns="http://getdocumentsbydocumentid.retrieve.services.v1.wsapi.lexisnexis.com">
<binarySecurityToken>{0}</binarySecurityToken>
<documentIdList>
<documentId>{1}</documentId>
</documentIdList>
<retrievalOptions>
<documentView>FullText</documentView>
<documentMarkup>Display</documentMarkup>
</retrievalOptions>
</GetDocumentsByDocumentId>
</soap:Body>
</SOAP-ENV:Envelope>
'''.format(self.auth_token, document_id)
url = self.get_url('Retrieval')
response = requests.post(url=url, headers=self.get_headers(request), data=request)
soup = BeautifulSoup(response.text, 'xml')
return base64.b64decode(soup.document.text).decode('utf8')
class Document(dict):
def __init__(self, document_soup):
self.verbose = False
self.include_meta = False
self.metadata = self.format_doc(document_soup)
def format_doc(self, soup):
'''
@param {BeautifulSoup} soup: contains a document from a search() query:
<ns1:documentcontainer>
<ns1:documentid>02A6A252C52</ns1:documentid>
<ns1:document>PD94bWwgdmVyc2lvbj0i</ns1:document>
</ns1:documentcontainer>
Here the <documentid> contains the doc's id and <document> contains a
base64 encoded representation of the doc's metadata
@returns: {obj}: an object with metadata attributes from the decoded doc
'''
formatted = {}
decoded = base64.b64decode(soup.find('ns1:document').get_text())
doc_soup = BeautifulSoup(decoded, 'lxml')
if self.include_meta:
for i in doc_soup.find_all('meta'):
try:
formatted[ i['name'] ] = i['content']
except Exception as exc:
if self.verbose: print(' ! error formatting doc', i['name'], exc)
formatted['doc_id'] = soup.find('ns1:documentid').get_text()
formatted['headline'] = self.get_doc_headline(doc_soup)
formatted['attachment_id'] = self.get_doc_attachment_id(doc_soup)
formatted['pub'] = self.get_doc_pub(doc_soup)
formatted['pub_date'] = self.get_doc_pub_date(doc_soup)
formatted['length'] = self.get_doc_length(doc_soup)
formatted['section'] = self.get_doc_section(doc_soup)
formatted['author'] = self.get_doc_author(doc_soup)
return formatted
##
# Document attribute accessors
##
def get_doc_headline(self, soup):
'''
@param {BeautifulSoup} soup: the soup from a documentcontainer tag
@returns {str} the headline from a document
'''
try:
headline = soup.find('div', {'class': 'HEADLINE'}).string
if headline:
return headline
except Exception as exc:
headline = soup.find('h1').string
if headline:
return headline
else:
if self.verbose: print(' ! error parsing headline', exc)
return ''
def get_doc_attachment_id(self, soup):
'''
@param {BeautifulSoup} soup: a documentcontainer tag
@returns {str}: the attachmentId attribute of a document
'''
try:
attachment_node = soup.find('span', {'class': 'attachmentId'})['id']
return attachment_node if attachment_node else ''
except Exception as exc:
if self.verbose: print(' ! error parsing doc_attachment', exc)
return ''
def get_doc_pub(self, soup, default_name='No pub name'):
'''
@param {BeautifulSoup} soup: a documentcontainer tag
@returns {str}: the publication attribute of a document
'''
try:
pub = soup.find('div', {'class': 'PUB'}).string
if pub:
return pub
except Exception as exc:
pub = soup.find('meta', {'name': 'sourceName'})['content']
if pub:
return pub
else:
if self.verbose: print(' ! error parsing doc_pub', exc)
return default_name
def get_doc_pub_date(self, soup):
'''
Parses different human-readable date formats dynamically,
e.g.:
January 3, 2017 Tuesday 5:00 PM GMT
and returns a date in UTC+Z format using the format,
e.g.:
2017-01-03T17:00:00Z
@param {BeautifulSoup} soup: a documentcontainer tag
@returns {str}: the pub date attribute from a document
'''
bad_date = '1900-01-01T00:00:00Z'
try:
soup_date = soup.find('div', {'class': 'PUB-DATE'})
if not soup_date:
soup_date = soup.find('div', {'class': 'DATE'})
if not soup_date:
soup_date = soup.find('div', {'class': 'DISPLAY-DATE'})
date_str = soup_date.get_text()
print("date_str: ", date_str)
date = ''
while not date:
try:
date = dateparser.parse(date_str)
print(' parsed: ', date)
except Exception as exc:
print(' ! error parsing doc_pub_date', exc)
date_str = ' '.join(date_str.split(' ')[:-1])
date_out = date.strftime('%Y-%m-%dT%H:%M:%SZ')
if not date_out:
date_out = bad_date
print(date_out)
return date_out
except Exception as exc:
self.verbose = True
if self.verbose: print(' ! error parsing doc_pub_date', exc)
return ''
def get_doc_length(self, soup):
'''
@param {BeautifulSoup} soup: a documentcontainer tag
@returns {str}: the length attribute of a document
'''
try:
length = soup.find('div', {'class': 'LENGTH'}).string
length = length.replace(' words', '')
return length if length else ''
except Exception as exc:
if self.verbose: print(' ! error parsing doc_length', exc)
return ''
def get_doc_section(self, soup):
'''
@param {BeautifulSoup} soup: a documentcontainer tag
@returns {str}: the newspaper section attribute of a document
'''
try:
section = soup.find('div', {'class': 'SECTION'}).string
if section:
return section
except Exception as exc:
# for some reason this only works if this option is brought down to the except portion of try except.
# compare to get_doc_pub, for | |
not found
:type fcnName: string
:param fcnName: function name for error reporting
:type pos: string or ``None``
:param pos: position from locator marks for error reporting
:rtype: an object
:return: the extracted object
"""
while len(path) > 0:
head, tail = path[0], path[1:]
try:
obj = obj[head]
except (KeyError, IndexError):
if isinstance(obj, (list, tuple)):
raise PFARuntimeException("array index not found", arrayErrCode, fcnName, pos)
else:
raise PFARuntimeException("map key not found", mapErrCode, fcnName, pos)
path = tail
return obj
def update(state, scope, obj, path, to, arrayErrCode, mapErrCode, fcnName, pos):
"""Return the updated state of a cell or pool at runtime (not in-place).
:type state: titus.genpy.ExecutionState
:param state: runtime state object
:type scope: titus.util.DynamicScope
:param scope: dynamic scope object
:type obj: an object
:param obj: cell or pool data that should be replaced
:type path: list of integers and strings
:param path: extraction path
:type to: an object, possibly callable
:param to: replacement object; if callable, the function is called to perform the update
:type arrayErrCode: integer
:param arrayErrCode: error code to raise if an array index is not found
:type mapErrCode: integer
:param mapErrCode: error code to raise if a map key is not found
:type fcnName: string
:param fcnName: function name for error reporting
:type pos: string or ``None``
:param pos: position from locator marks for error reporting
:rtype: an object
:return: an updated version of the object, for the sake of replacement
"""
if len(path) > 0:
head, tail = path[0], path[1:]
if isinstance(obj, dict):
if len(tail) > 0 and head not in obj:
raise PFARuntimeException("map key not found", mapErrCode, fcnName, pos)
out = {}
for k, v in obj.items():
if k == head:
out[k] = update(state, scope, v, tail, to, arrayErrCode, mapErrCode, fcnName, pos)
else:
out[k] = v
return out
elif isinstance(obj, (list, tuple)):
if (len(tail) > 0 and head >= len(obj)) or head < 0:
raise PFARuntimeException("array index not found", arrayErrCode, fcnName, pos)
out = []
for i, x in enumerate(obj):
if i == head:
out.append(update(state, scope, x, tail, to, arrayErrCode, mapErrCode, fcnName, pos))
else:
out.append(x)
return out
else:
raise Exception
elif callable(to):
callScope = DynamicScope(scope)
callScope.let({to.paramNames[0]: obj})
return to(state, callScope)
else:
return to
def do(*exprs):
"""Helper function for chaining expressions.
The expressions have already been evaluated when this function is called, so this function just returns the last one.
If the list of expressions is empty, it returns ``None``.
"""
# You've already done them; just return the right value.
if len(exprs) > 0:
return exprs[-1]
else:
return None
def ifThen(state, scope, predicate, thenClause):
"""Helper function for constructing an if-then branch as an expression.
:type state: titus.genpy.ExecutionState
:param state: exeuction state
:type scope: titus.util.DynamicScope
:param scope: dynamic scope object
:type predicate: callable
:param predicate: function that returns ``True`` or ``False``
:type thenClause: callable
:param thenClause: function that is called if ``predicate`` returns ``True``
:rtype: ``None``
:return: nothing
"""
if predicate(state, DynamicScope(scope)):
thenClause(state, DynamicScope(scope))
return None
def ifThenElse(state, scope, predicate, thenClause, elseClause):
"""Helper function for constructing an if-then-else branch as an expression.
:type state: titus.genpy.ExecutionState
:param state: exeuction state
:type scope: titus.util.DynamicScope
:param scope: dynamic scope object
:type predicate: callable
:param predicate: function that returns ``True`` or ``False``
:type thenClause: callable
:param thenClause: function that is called if ``predicate`` returns ``True``
:type elseClause: callable
:param elseClause: function that is called if ``predicate`` returns ``False``
:rtype: return type of ``thenClause`` or ``elseClause``
:return: if ``predicate`` returns ``True``, the result of ``thenClause``, else the result of ``elseClause``
"""
if predicate(state, DynamicScope(scope)):
return thenClause(state, DynamicScope(scope))
else:
return elseClause(state, DynamicScope(scope))
def cond(state, scope, ifThens):
"""Helper function for constructing if-elif-elif-...-elif as an expression.
:type state: titus.genpy.ExecutionState
:param state: exeuction state
:type scope: titus.util.DynamicScope
:param scope: dynamic scope object
:type ifThens: list of (callable, callable) pairs
:param ifThens: list of ``(predicate, thenClause)`` pairs
:rtype: ``None``
:return: nothing
"""
for predicate, thenClause in ifThens:
if predicate(state, DynamicScope(scope)):
thenClause(state, DynamicScope(scope))
break
return None
def condElse(state, scope, ifThens, elseClause):
"""Helper function for constructing if-elif-elif-...-elif as an expression.
:type state: titus.genpy.ExecutionState
:param state: exeuction state
:type scope: titus.util.DynamicScope
:param scope: dynamic scope object
:type ifThens: list of (callable, callable) pairs
:param ifThens: list of ``(predicate, thenClause)`` pairs
:type elseClause: callable
:param elseClause: function that is called if ``predicate`` returns ``False``
:rtype: return type of any ``thenClause`` or the ``elseClause``
:return: if any ``predicate`` returns ``True``, the result of the corresponding ``thenClause``, else the result of ``elseClause``
"""
for predicate, thenClause in ifThens:
if predicate(state, DynamicScope(scope)):
return thenClause(state, DynamicScope(scope))
return elseClause(state, DynamicScope(scope))
def doWhile(state, scope, predicate, loopBody):
"""Helper function for constructing pretest loops as an expression.
Calls ``state.checkTime()`` on every iteration.
:type state: titus.genpy.ExecutionState
:param state: exeuction state
:type scope: titus.util.DynamicScope
:param scope: dynamic scope object
:type predicate: callable
:param predicate: function that returns ``True`` or ``False``
:type loopBody: callable
:param loopBody: function that is called while ``predicate`` returns ``True``
:rtype: ``None``
:return: nothing
"""
bodyScope = DynamicScope(scope)
predScope = DynamicScope(bodyScope)
while predicate(state, predScope):
state.checkTime()
loopBody(state, bodyScope)
return None
def doUntil(state, scope, predicate, loopBody):
"""Helper function for constructing posttest loops as an expression.
Calls ``state.checkTime()`` on every iteration.
:type state: titus.genpy.ExecutionState
:param state: exeuction state
:type scope: titus.util.DynamicScope
:param scope: dynamic scope object
:type predicate: callable
:param predicate: function that returns ``True`` or ``False``
:type loopBody: callable
:param loopBody: function that is called until ``predicate`` returns ``True``
:rtype: ``None``
:return: nothing
"""
bodyScope = DynamicScope(scope)
predScope = DynamicScope(bodyScope)
while True:
state.checkTime()
loopBody(state, bodyScope)
if predicate(state, predScope):
break
return None
def doFor(state, scope, initLet, predicate, stepSet, loopBody):
"""Helper function for constructing for loops as an expression.
Calls ``state.checkTime()`` on every iteration.
:type state: titus.genpy.ExecutionState
:param state: exeuction state
:type scope: titus.util.DynamicScope
:param scope: dynamic scope object
:type initLet: callable
:param initLet: initialization of for loop variables
:type predicate: callable
:param predicate: function that returns ``True`` or ``False``
:type stepSet: callable
:param stepSet: updating of for loop variables
:type loopBody: callable
:param loopBody: function that is called while ``predicate`` returns ``True``
:rtype: ``None``
:return: nothing
"""
loopScope = DynamicScope(scope)
predScope = DynamicScope(loopScope)
bodyScope = DynamicScope(loopScope)
initLet(state, loopScope)
while predicate(state, predScope):
state.checkTime()
loopBody(state, bodyScope)
stepSet(state, loopScope)
return None
def doForeach(state, scope, name, array, loopBody):
"""Helper function for constructing foreach loops as an expression.
Calls ``state.checkTime()`` on every iteration.
:type state: titus.genpy.ExecutionState
:param state: exeuction state
:type scope: titus.util.DynamicScope
:param scope: dynamic scope object
:type name: string
:param name: new variable for each array item
:type name: Python iterable
:param name: array to loop over
:type loopBody: callable
:param loopBody: function that is called while ``predicate`` returns ``True``
:rtype: ``None``
:return: nothing
"""
loopScope = DynamicScope(scope)
bodyScope = DynamicScope(loopScope)
for item in array:
state.checkTime()
loopScope.let({name: item})
loopBody(state, bodyScope)
return None
def doForkeyval(state, scope, forkey, forval, mapping, loopBody):
"""Helper function for constructing for key,value loops as an expression.
Calls ``state.checkTime()`` on every iteration.
:type state: titus.genpy.ExecutionState
:param state: exeuction state
:type scope: titus.util.DynamicScope
:param scope: dynamic scope object
:type forkey: string
:param forkey: new variable for each item key
:type forval: string
:param forval: new variable for each item value
:type name: Python dict
:param name: map of key-value pairs to loop over
:type loopBody: callable
:param loopBody: function that is called while ``predicate`` returns ``True``
:rtype: ``None``
:return: nothing
"""
loopScope = DynamicScope(scope)
bodyScope = DynamicScope(loopScope)
for key, val in mapping.items():
state.checkTime()
loopScope.let({forkey: key, forval: val})
loopBody(state, bodyScope)
return None
def cast(state, scope, expr, fromType, cases, partial, parser):
"""Helper function for type-safe casting as an expression.
:type state: titus.genpy.ExecutionState
:param state: exeuction state
:type scope: titus.util.DynamicScope
:param scope: dynamic scope object
:type expr: evaluated expression
:param expr: object to cast
:type fromType: string
:param fromType: JSON-serialized Avro type
:type cases: list of (string, string, callable) triples
:param cases: list of (new variable for one case, JSON-serialized subtype, function to call if type matches) triples
:type partial: boolean
:param partial: if ``True``, allow the set of cases to incompletely cover the ``fromType``
:type | |
# coding: utf-8
import typing
import json
import asyncio
from collections import deque
from starlette.websockets import WebSocket
from .JsPyTextSocket import JsPyTextSocket, JsPyError
from . import JsPyBackground
__all__ = ['push_nowait', 'push', 'pop', 'shift', 'add_callback',
'clear_callback', 'is_empty', 'get_keys',
'has', 'clear', 'clear_all', 'remove', 'remove_all']
# Queue data pool
# key: name of queue
# value: queue data (collections.deque object)
_queue_stack = {}
# Id number assigned to protocol 'queue' and 'queue_call'
_queue_id = 0
_QUEUE_ID_MAX = 0XFFFFFFFF
# Future instance waiting for queue_return from clients.
# key: queue_id
# value: list of future instance
_queue_memory = {}
# Callback function called every time Queue data arrives from client.
# Normal function or async function.
# Function has one argument. It is the key name of the queue that arrived.
_queue_callbacks = set()
def push_nowait(key: str,
target: typing.Union[int,
typing.List[int],
typing.Tuple[int],
typing.Set[int],
None]=None
) -> typing.Callable:
"""Send data to the queue of the connected clients.
Send data to all connected clients or specified clients,
but does not guarantee arrival.
The sent data is stored in the client queue corresponding to the key name.
Examples
----------
from JsMeetsStarlette import JsMeetsPy, JsPyQueue
app = JsMeetsPy() # Create an application instance.
@app.route('/', ['get'])
def page_root():
...
# Send data to all connected clients.
JsPyQueue.push_nowait('stack1')({'a': [1,2,[2,3]], 'b': True})
JsPyQueue.push_nowait('stack1')(10)
JsPyQueue.push_nowait('stack2')(None)
# Send data to socket ID 2 & 5 only.
# If socket ID does not exist, it is ignored.
JsPyQueue.push_nowait('stack2', [2, 5])([True, False])
...
Examples in client side
----------
// You need to load 'JsPyTextSocket.js' and 'JsPyQueue.js' in advance.
// Operations on queue key name 'stack1'
if(!JsPyQueue.is_empty('stack1')){
// undefined is returned if there is no data on the stack1.
pop_data1 = JsPyQueue.pop('stack1');
}
// Operations on queue key name 'stack2'
pop_data2 = JsPyQueue.shift('stack2');
Parameters
----------
key: str
The name that identifies the client side queue.
It has an independent queue for each key name.
target: None or int or list or tuple or set, default None
Specify the socket id of the target client.
If it does not exist, it is ignored without exception.
None -> Call all clients currently connected.
int, list, tuple -> Call to the client with the specified socket id.
Returns
----------
Callable
The argument is the data stored in the queue of the client.
Control returns soon.
Callable Parameters
----------
data:
Data stored in the client side queue.
The data can be converted to a JSON format as follows.
Specifiable types:
int, float, str, True, False, None(convert to null)
list, dict, tuple(convert to list)
"""
def inner_nowait(data) -> None:
global _queue_id, _QUEUE_ID_MAX
nonlocal key, target
_queue_id += 1
if _queue_id > _QUEUE_ID_MAX:
_queue_id = 1
this_id = _queue_id
if this_id > _QUEUE_ID_MAX:
this_id = 1
send_dic = {'protocol': 'queue', 'key': key,
'id': this_id, 'data': data, 'exception': None}
# Send a queue call to clients
JsPyTextSocket.reservecast(send_dic, target)
return inner_nowait
def push(key: str,
timeout: typing.Union[int, float, None]=0,
target: typing.Union[int,
typing.List[int],
typing.Tuple[int],
typing.Set[int],
None]=None
) -> typing.Callable:
"""Send data to the queue of the connected clients.
Send data to all connected clients or specified clients
and confirm that the data has been delivered.
The sent data is stored in the client queue corresponding
to the key name.
Examples in python side
----------
from JsMeetsStarlette import JsMeetsPy, JsPyQueue
app = JsMeetsPy() # Create an application instance.
@app.route('/', ['get'])
async def page_root():
...
# Wait for a data acknowledgment from the clients.
receive_clients = await JsPyQueue.push(
'stack1', timeout=2)({'a': [1,2,[2,3]], 'b': True})
# ex) receive_clients = [2, 3, 5]
# Socket ID that could be sent reliably are 2, 3, 5.
Parameters
----------
key: str
The name that identifies the client side queue.
It has an independent queue for each key name and stacks data.
timeout: float or None, default None
Maximum time to wait for acknowledgment from clients.
If 0 or negative or None, wait indefinitely.
target: None or int or list or tuple or set, default None
Specify the socket id of the target client.
If it does not exist, it is ignored without exception.
None -> Call all clients currently connected.
int, list, tuple -> Call to the client with the specified socket id.
Returns
----------
Async Callable
The argument is the data stored in the queue of the client.
The await keyword is required to have a return value
because of an async function.
Callable Parameters
----------
data:
Data stored in the client side queue.
The data can be converted to a JSON format as follows.
Specifiable types:
int, float, str, True, False, None(convert to null)
list, dict, tuple(convert to list)
Callable Returns
----------
list
List of the socket ID received a response within the time limit.
The socket ID is a unique number that is assigned to the client
by the server when websocket communication is established.
Callable Raises
----------
TypeError
Parameter is not JSON serializable.
"""
async def inner(data) -> list:
global _queue_id, _QUEUE_ID_MAX, _queue_memory
nonlocal key, timeout, target
_queue_id += 1
if _queue_id > _QUEUE_ID_MAX:
_queue_id = 1
this_id = _queue_id
target_sockets = []
if target is None:
target_sockets = JsPyTextSocket.get_socket_id()
elif isinstance(target, int):
all_sockets = JsPyTextSocket.get_socket_id()
if target in all_sockets:
target_sockets = [target]
elif isinstance(target, (tuple, list, set)):
all_sockets = JsPyTextSocket.get_socket_id()
target_sockets = [i for i in target if (i in all_sockets)]
# There is no specified client.
if len(target_sockets) == 0:
return []
send_dic = {'protocol': 'queue_call', 'key': key,
'id': this_id, 'data': data, 'exception': None}
# Make future in the number of clients
this_loop = asyncio.get_running_loop()
this_futures = [this_loop.create_future() for i in target_sockets]
_queue_memory[this_id] = this_futures
# Send a queue call to clients
JsPyTextSocket.multicast(send_dic, target_sockets)
# Waiting for a response from clients with timeout
if (timeout is not None) and (timeout <= 0):
timeout = None
done, pending = await asyncio.wait(this_futures, timeout=timeout)
return_value = []
for ft in done:
try:
return_value.append(ft.result())
except:
pass
del _queue_memory[this_id]
return return_value
return inner
def pop(key: str, default_value=None):
"""Remove and return an element from the right side of the queue.(LIFO)
Data is sent from the client in pairs with the key name.
The received data is stored in the server queue corresponding to key name.
Remove and return an element from the right side of the queue.
If the queue corresponding to the key name is empty or not present,
the default value specified in the parameter is returned.
Parameters
----------
key: str
The name that identifies the server side queue.
It has an independent queue for each key name.
default_value: Any, Default None
Return value when there is no valid queue data.
(Empty or No queue key name)
See Also
----------
shift()
"""
global _queue_stack
if key in _queue_stack:
return(_queue_stack[key].pop()
if len(_queue_stack[key])
else default_value)
else:
return default_value
def shift(key: str, default_value=None):
"""Remove and return an element from the left side of the queue.(FIFO)
Data is sent from the client in pairs with the key name.
The received data is stored in the server queue corresponding to key name.
Remove and return an element from the left side of the queue.
If the queue corresponding to the key name is empty or not present,
the default value specified in the parameter is returned.
Parameters
----------
key: str
The name that identifies the server side queue.
It has an independent queue for each key name.
default_value: Any, Default None
Return value when there is no valid queue data.
(Empty or No queue key name)
See Also
----------
pop()
"""
global _queue_stack
if key in _queue_stack:
return(_queue_stack[key].popleft()
if len(_queue_stack[key])
else default_value)
else:
return default_value
def add_callback(func: typing.Callable) -> None:
"""Register the callback function when data arrives in the server queue.
When the queue data is sent from the client to the server,
the registration functions are called.
Examples
----------
from JsMeetsStarlette import JsMeetsPy, JsPyQueue
app = JsMeetsPy() # Create an application instance.
def qprint(come_key):
print("Queue key {}, value: {} has come.".format(
come_key, JsPyQueue.pop(come_key)))
async def qbroadcast(come_key):
if come_key == 'cast':
x = JsPyQueue.pop(come_key)
await JsPyQueue.push_nowait('message')(x)
JsPyQueue.add_callback(qprint)
JsPyQueue.add_callback(qbroadcast)
Examples in client side
----------
// You need | |
<filename>examples/applications/fileupload.py
import argparse
import base64
import datetime
import enum
import json
import hashlib
import logging
import os
import sys
import time
from typing import Tuple, Optional, List
import requests
from requests.auth import HTTPBasicAuth
def sha256(data_bytes) -> bytes:
return hashlib.sha256(data_bytes).digest()
def sha256d(data_bytes) -> bytes:
return sha256(sha256(data_bytes))
# Window 1:
# set PYTHONPATH=examples\applications
# py -3 electrum-sv -dapp fileupload
# Window 2:
# py -3 electrum-sv setconfig rpcport 8888
# py -3 electrum-sv setconfig rpcuser leo-sayer
# py -3 electrum-sv setconfig rpcpassword <PASSWORD>
# py -3 examples\applications\fileupload.py -f picture.jpg -u leo-sayer -p i-feel-like-dancing
# -wn my_wallet_name -wp my_wallet_password
class WalletClient:
_next_request_id = 0
def __init__(self, electrum_host, electrum_port, rpc_username, rpc_password, wallet_name,
wallet_password=None):
self._logger = logging.getLogger("wallet-client")
self._electrum_host = electrum_host
self._electrum_port = electrum_port
self._rpc_username = rpc_username
self._rpc_password = <PASSWORD>
self._wallet_name = wallet_name
self._wallet_password = <PASSWORD>
self._session = None
def __enter__(self) -> 'BroadcastSession':
self._session = BroadcastSession(self)
return self._session
def __exit__(self, exc_type, exc_value, exc_traceback):
try:
self._session._save_state()
except Exception:
self._logger.exception("Wallet session encountered an error saving state")
def load_wallet(self) -> None:
params = {
'wallet_name': self._wallet_name,
}
if self._wallet_password is not None:
params['password'] = self._wallet_password
return self._send_request("load_wallet", **params)
def get_balance(self) -> Tuple[int, int]:
params = {
'wallet_name': self._wallet_name,
}
if self._wallet_password is not None:
params['password'] = self._wallet_password
result = self._send_request("get_balance", **params)
confirmed, unconfirmed, _unmatured = result
return unconfirmed, confirmed
def make_signed_opreturn_transaction(self, pushdatas: List[bytes]) -> dict:
pushdatas_b64 = []
for pushdata in pushdatas:
pushdata_b64 = base64.b64encode(pushdata).decode('utf-8')
pushdatas_b64.append(pushdata_b64)
params = {
'pushdatas_b64': pushdatas_b64,
'wallet_name': self._wallet_name,
'password': self._wallet_password,
}
result = self._send_request('make_signed_opreturn_transaction', **params)
if 'error' in result:
return result['error']
return result
def broadcast_transaction(self, tx_hex: str) -> str:
params = {
'tx_hex': tx_hex,
}
result = self._send_request('broadcast_transaction', **params)
if 'error' in result:
return result['error']
return result
def get_transaction_state(self, tx_id: str) -> bool:
params = {
'tx_id': tx_id,
'wallet_name': self._wallet_name,
}
return self._send_request('get_transaction_state', **params)
def split_coins(self):
params = {
'wallet_name': self._wallet_name,
'password': self._wallet_password,
}
return self._send_request('split_coins', **params)
def _send_request(self, method, *args, **kwargs):
# JSON-RPC 2.0 allows either a list of arguments or a dictionary of named arguments,
# but not both.
assert not (len(args) and len(kwargs))
params = args
if not len(params) and len(kwargs):
params = kwargs
url = f"http://{self._electrum_host}:{self._electrum_port}/"
headers = {'content-type': 'application/json'}
request_id = self._next_request_id
self._next_request_id += 1
payload = {
"method": method,
"params": params,
"jsonrpc": "2.0",
"id": request_id,
}
response = requests.post(url, data=json.dumps(payload), headers=headers,
auth=HTTPBasicAuth(self._rpc_username, self._rpc_username)).json()
self._logger.debug("_send_request(%s, *%s, **%s) -> %s", method, args, kwargs, response)
if 'error' in response:
error_message = response['error'].get('message', 'Server did not give reason')
if error_message == "too-long-mempool-chain":
raise BitcoinNoValidUTXOsError()
elif error_message == "confirmed-coins-exist":
raise BitcoinSplitNotNecessary()
raise SessionError(error_message)
return response['result']
class SessionError(Exception):
pass
class BitcoinNoValidUTXOsError(SessionError):
pass
class BitcoinSplitNotNecessary(SessionError):
pass
class FileProtocol(enum.IntEnum):
B = 1
Bcat = 2
STATE_DIR_NAME = ".fileupload_state"
class BroadcastSession:
def __init__(self, wallet):
self._state = None
self._state_path = None
self._wallet = wallet
self._logger = logging.getLogger("wallet-session")
def broadcast_file(self, file_path: str, media_type: str, protocol: FileProtocol):
with open(file_path, "rb") as f:
message_bytes = f.read()
file_name = os.path.basename(file_path)
self._wallet.load_wallet()
# These should be deterministically generated from the given file.
initial_push_groups = self._create_initial_push_groups(
message_bytes=message_bytes,
media_type=media_type,
protocol=protocol)
self._load_state(file_name, message_bytes, initial_push_groups)
self._wait_for_utxo_split()
# Broadcast and confirm in mempool for each initial transaction.
self._process_push_groups(initial_push_groups, self._state['initial_group_state'])
# Now that the initial transactions are confirmed to be 0-conf on-chain, create any
# final transactions which likely need to refer to them.
final_push_groups = self._create_final_push_groups(media_type, protocol)
if self._state['final_group_state'] is None:
self._state['final_group_state'] = [ {} for i in range(len(final_push_groups))]
self._process_push_groups(final_push_groups, self._state['final_group_state'])
return True
def get_summary(self) -> dict:
initial_push_groups = self._state['initial_group_state']
final_push_groups = self._state['final_group_state']
result = {
'first_timestamp': initial_push_groups[0]['when_broadcast'],
'last_timestamp': initial_push_groups[-1]['when_broadcast'],
'fees': sum(v['tx_fee'] for v in initial_push_groups),
'count': len(initial_push_groups),
'size': sum(v['tx_size'] for v in initial_push_groups),
'initial_tx_ids': list(v['tx_id'] for v in initial_push_groups),
}
if final_push_groups is not None and len(final_push_groups):
result['last_timestamp'] = final_push_groups[-1]['when_broadcast']
result['fees'] += sum(v['tx_fee'] for v in final_push_groups)
result['size'] += sum(v['tx_size'] for v in final_push_groups)
result['count'] += len(final_push_groups)
result['final_tx_ids'] = list(v['tx_id'] for v in final_push_groups)
return result
def _process_push_groups(self, push_groups, push_groups_state):
for i, push_group in enumerate(push_groups):
state = push_groups_state[i]
if 'tx_id' not in state:
self._logger.debug(f"Signing tx {i}")
sign_result = self._wallet.make_signed_opreturn_transaction(push_group)
# Record metadata that we created a signed transaction for this group.
state['when_signed'] = datetime.datetime.now().astimezone().isoformat()
state['tx_id'] = sign_result['tx_id']
state['tx_fee'] = sign_result['fee']
state['tx_size'] = len(sign_result['tx_hex']) // 2
print(f"Broadcasting transaction {i+1}/{len(push_groups)}")
try:
tx_id = self._wallet.broadcast_transaction(sign_result['tx_hex'])
except BitcoinNoValidUTXOsError:
# Ensure the next attempt will rebuild and rebroadcast the transaction.
del state['tx_id']
# Block until we are ready for that.
self._wait_for_utxo_split()
continue
if tx_id != state['tx_id']:
raise SessionError(
f"Inconsistent tx_id, got '{tx_id}' expected '{state['tx_id']}'")
state['when_broadcast'] = datetime.datetime.now().astimezone().isoformat()
if 'in_mempool' not in state:
print(f"Looking for transaction {i+1}/{len(push_groups)} in mempool")
attempts = 0
tx_id = state['tx_id']
while attempts < 10:
if self._wallet.get_transaction_state(tx_id) is not None:
break
time.sleep(2.0)
attempts += 1
if attempts == 10:
print("Cleared broadcast state for transaction {i+1}/{len(push_groups)}")
del state['tx_id']
raise SessionError(f"Failed to find transaction in mempool '{tx_id}'")
state['in_mempool'] = True
def _wait_for_utxo_split(self):
split_tx_id = self._state.get("split_tx_id")
if split_tx_id is None:
try:
split_tx_result = self._wallet.split_coins()
print("Creating a UTXO split transaction (may take time)")
while split_tx_result is None:
time.sleep(20.0)
split_tx_result = self._wallet.split_coins()
except BitcoinSplitNotNecessary:
return
print("Broadcasting the created UTXO split transaction")
self._wallet.broadcast_transaction(split_tx_result['tx_hex'])
split_tx_id = split_tx_result['tx_id']
self._state["split_tx_id"] = split_tx_id
print("Waiting for UTXO split transaction to confirm (may take time)")
split_state = self._wallet.get_transaction_state(split_tx_id)
while split_state is None or split_state[1] <= 0:
time.sleep(20.0)
split_state = self._wallet.get_transaction_state(split_tx_id)
del self._state['split_tx_id']
def _save_state(self):
if self._state_path is not None and self._state is not None:
with open(self._state_path, "w") as f:
json.dump(self._state, f)
def _load_state(self, file_name:str, message_bytes: bytes,
initial_push_groups: List[List[bytes]]) -> None:
message_hash = sha256(message_bytes)
message_hash_hex = message_hash.hex()
if not os.path.exists(STATE_DIR_NAME):
os.mkdir(STATE_DIR_NAME)
self._state_path = os.path.join(STATE_DIR_NAME, message_hash_hex+ ".json")
if os.path.exists(self._state_path):
with open(self._state_path, "r") as f:
self._state = json.load(f)
else:
self._state = {}
self._state['file_name'] = file_name
self._state['initial_group_state'] = [ {} for i in range(len(initial_push_groups)) ]
self._state['final_group_state'] = None
def _create_initial_push_groups(self,
message_bytes: bytes, media_type: str, protocol: int=FileProtocol.B,
encoding: Optional[str]=None, file_name: Optional[str]=None) -> List[List[bytes]]:
FileProtocol(protocol)
assert media_type is not None
if len(message_bytes) > 99000:
if protocol == FileProtocol.B:
raise SessionError("message too large for B protocol")
else:
if protocol == FileProtocol.Bcat:
raise SessionError("message too small for Bcat protocol")
all_push_groups = []
if protocol == FileProtocol.B:
push_values = [
b"19HxigV4QyBv3tHpQVcUEQyq1pzZVdoAut",
message_bytes,
bytes(media_type, "utf-8"),
]
if encoding:
push_values.append(bytes(encoding, "utf-8"))
if file_name:
if not encoding:
raise SessionError("must provide encoding with filename")
push_values.append(bytes(file_name, "utf-8"))
all_push_groups.append(push_values)
elif protocol == FileProtocol.Bcat:
# Split the message up into OP_RETURN sized segments.
index = 0
message_view = memoryview(message_bytes)
while index < len(message_view):
segment_bytes = bytes(message_view[index:index+99000])
push_values = [
b"1ChDHzdd1H4wSjgGMHyndZm6qxEDGjqpJL",
segment_bytes
]
all_push_groups.append(push_values)
index += 99000
return all_push_groups
def _create_final_push_groups(self, media_type: str,
protocol: int=FileProtocol.B, encoding: Optional[str]=None,
file_name: Optional[str]=None) -> List[List[bytes]]:
FileProtocol(protocol)
all_push_groups = []
if protocol == FileProtocol.Bcat:
push_values = [
b"15DHFxWZJT58f9nhyGnsRBqrgwK4W6h4Up",
b"ElectrumSV",
bytes(media_type, "utf-8"),
# Contrary to what the Bcat spec says, it will error ambiguously on empty strings
# and seems to expect a string with a space instead.
bytes(encoding, "utf-8") if encoding is not None else b"\x20",
bytes(file_name, "utf-8") if file_name is not None else b"\x20",
b"\x20",
]
for group_state in self._state['initial_group_state']:
tx_id_hex = group_state['tx_id']
tx_id_bytes = bytes.fromhex(tx_id_hex)[::-1]
push_values.append(tx_id_bytes)
all_push_groups.append(push_values)
return all_push_groups
def check_file_for_protocol(filepath: str, protocol: FileProtocol) -> bool:
if os.path.getsize(filepath) > 99000:
return protocol != FileProtocol.B
else:
return protocol != FileProtocol.Bcat
def main() -> None:
logging.basicConfig()
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", required=True)
parser.add_argument("-mt", "--media-type", required=False)
parser.add_argument("-enc", "--encoding", required=False)
parser.add_argument("-fn", "--filename", required=False)
parser.add_argument("-eh", "--electrum-host", required=False)
parser.add_argument("-ep", "--electrum-port", required=False, default=8888, type=int)
parser.add_argument("-u", "--rpc-username", required=True)
parser.add_argument("-p", "--rpc-password", required=True)
parser.add_argument("-wn", "--wallet-name", required=True)
parser.add_argument("-wp", "--wallet-password", required=True)
parser.add_argument("-pr", "--protocol", action="store", default='B',
choices = ('B', 'Bcat'), help="Specify file protocol")
parser.add_argument("-v", "--verbose", action="store_true", default=False)
result = parser.parse_args(sys.argv[1:])
if result.verbose:
print(result)
logging.getLogger().setLevel(logging.DEBUG)
filepath = result.file
if not os.path.exists(filepath):
print(f"{filepath}: file not found")
sys.exit(1)
suffix = os.path.splitext(filepath)[1].lower()
media_type = result.media_type
if media_type is None and suffix:
if suffix == ".png":
media_type = "image/png"
elif suffix == ".jpeg" or suffix == ".jpg":
media_type = "image/jpeg"
elif suffix == ".mp4":
media_type = "video/mp4"
if media_type is None:
print(f"{filepath}: unable to guess media type")
sys.exit(1)
# The arg parser guards against the user choosing a non-existent protocol.
protocol = FileProtocol[result.protocol]
if not check_file_for_protocol(filepath, protocol):
print(f"{filepath}: incompatible with protocol (too large? too small?)")
sys.exit(1)
electrum_host = result.electrum_host
if result.electrum_host is None:
electrum_host = "127.0.0.1"
wallet = WalletClient(electrum_host, result.electrum_port, result.rpc_username,
result.rpc_password, result.wallet_name, result.wallet_password)
with wallet as session:
session.broadcast_file(filepath, media_type, protocol)
result | |
'''
## MESSAGING CLIENT 1.0
##
## This is a simple client side app that connects to the messaging server and provides End to End Encryption
## All messages tranfered between clients is indecipherable to all except communicating parties
## Users can register for accounts with the chosen server.
## You can also transfer files which will also be encrypted during transit.
## To transfer files, in the "Enter message: " prompt type "file: (filename)" without quotes,
## ...File must be within the same folder as this file (m_client.py) in order for it to work.
##
## Messages use a json based api
##
## IMPORTANT: If you wish to have multiple accounts, you must create a separate folder for
## each user, each containing the m_client.py and encryption_engine.py files.
## ...DO NOT login to another account from the wrong account folder.
## ...This is because of how the encryption keys are stored
##
## Author: <NAME>
'''
import socket
import threading
import json
import sqlite3
import time
import sys
from encryption_engine import EncryptionEngine
import getpass
from io import BytesIO
class Client(object):
def __init__(self,host,port):
self._logged_user = None
self._remote_user_key = None
self._logged_user_api_key = None
self._server_tuple = (host,port)
self.client_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self._encryption_engine = EncryptionEngine()
self._prepare_app()
self._main_option_menu()
self._initialize_waiting_thread()
self._user_option_menu()
def _poll_server_connection(self,server_tuple):
#Keep trying to connect every 5 seconds until server is found and online
while True:
try:
self.client_socket.connect(server_tuple)
break
except:
time.sleep(5)
def _prepare_app(self):
#This generates the public and private keys and creates the local database
#in SQLite
conn = sqlite3.connect("local.db")
cursor = conn.cursor()
#Create Tables if none exist
user_key_table_sql = "CREATE TABLE IF NOT EXISTS userkeys (id INTEGER PRIMARY KEY NOT NULL,username varchar(200),prikey varchar(2000),pubkey varchar(2000))"
shared_key_table_sql = "CREATE TABLE IF NOT EXISTS sharedkeys (id INTEGER PRIMARY KEY NOT NULL,username varchar(200),symkey varchar(2000))"
message_table_sql = "CREATE TABLE IF NOT EXISTS messages (id INTEGER PRIMARY KEY NOT NULL, message varchar(200), sender varchar(200), receipient varchar(200), date varchar(200))"
cursor.execute(shared_key_table_sql)
cursor.execute(message_table_sql)
cursor.execute(user_key_table_sql)
#Check if you have generated your private keys
check_keys_sql = 'SELECT pubkey,prikey FROM userkeys WHERE username=?'
record = conn.execute(check_keys_sql,("device",))
keys = record.fetchone()
if keys is not None:
pass
else:
self._encryption_engine.generate_private_public_key()
print "Done preparing app"
conn.commit()
conn.close()
def _main_option_menu_header(self):
print ""
print "********* MESSAGING SERVICE *********"
print "1. Register a User "
print "2. Login a User "
print ""
def _main_option_menu(self):
self._main_option_menu_header()
while True:
print "> ",
menu_choice = raw_input("")
if menu_choice == "1":
print""
username = raw_input("Choose a Username: ")
password = getpass.getpass("Choose a Password: ")
print "Connecting to Server"
self._poll_server_connection(self._server_tuple)
public_key = self._encryption_engine.fetch_public_key()
request_json = '{"username":"%s","password":"%s","public_key":"%s","type":"registration"}'%(username,password,public_key)
self.client_socket.sendall(request_json)
elif menu_choice == "2":
print""
username = raw_input("Enter username: ")
password = <PASSWORD>("Enter password: ")
print "Connecting to Server"
self._poll_server_connection(self._server_tuple)
request_json = '{"username":"%s","password":"%s","type":"login"}'%(username,password)
self.client_socket.sendall(request_json)
response = self.client_socket.recv(1024)
response_json = json.loads(response)
if response_json["success"] == True:
self._logged_user = username
self._logged_user_api_key = response_json["api_key"]
break
else:
self.client_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
print""
print response_json["reason"]
print""
def _initialize_waiting_thread(self):
#This thread waits to receive messages from the server and other users
t = threading.Thread(target = self._wait_for_messages)
t.daemon = True
t.start()
def _wait_for_messages(self):
try:
while True:
chunk = self.client_socket.recv(1024)
response = ''
while chunk:
if chunk[-2:] == "/0":
response+=chunk[:-2]
break
response+= chunk
chunk = self.client_socket.recv(1024)
print"Response is: %s"%response
self._handle_message_response(response)
except:
print"Shutting down.."
def _handle_message_response(self,response):
try:
json_data = json.loads(response)
if json_data["type"] == "message":
#Handles when user receives a message
sender = json_data["sender"]
date = json_data["date"]
message = json_data["message"]
receipient = json_data["receipient"]
shared_key = self._encryption_engine.fetch_local_shared_key(sender)
decrypted_text = self._encryption_engine.decrypt_text(message,shared_key)
print"\nMessage Received"
print""
print"From: %s"%sender
print"Date: %s"%date
print"Message: %s"%decrypted_text
print""
conn = sqlite3.connect('local.db')
cursor = conn.cursor()
cursor.execute('INSERT INTO messages (message,sender,receipient,date) VALUES (?,?,?,?)',(decrypted_text,sender,receipient,date))
conn.commit()
conn.close()
elif json_data["type"] == "file":
#Handles receiving of a file, after receiving a filename
sender = json_data["sender"]
shared_key = self._encryption_engine.fetch_local_shared_key(sender)
filename = self._encryption_engine.decrypt_text(json_data["message"],shared_key)
print "Receiving %s from %s, please wait...."%(filename,sender)
#Prevent recv from taking too long
self.client_socket.settimeout(5)
try:
with open(filename,"wb") as f:
chunk = self.client_socket.recv(1024)
data = ''
if chunk == "/0end":
pass
else:
while chunk:
if chunk[-5:] == '/0end':
data+=chunk[:-5]
break
data+=chunk
chunk = self.client_socket.recv(1024)
decrypted_data = self._encryption_engine.decrypt_file(data,shared_key)
f.write(decrypted_data)
except:
pass
self.client_socket.settimeout(None)
print "File Received"
elif json_data["type"] == "unread":
unread_messages = json_data["objects"]
for message in unread_messages:
sender = message["sender"]
date = message["date"]
message_text = message["message"]
receipient = message["receipient"]
shared_key = self._encryption_engine.fetch_local_shared_key(sender)
decrypted_text = self._encryption_engine.decrypt_text(message_text,shared_key)
print""
print"From: %s"%sender
print"Date: %s"%date
print"Message: %s"%decrypted_text
print""
conn = sqlite3.connect('local.db')
cursor = conn.cursor()
cursor.execute('INSERT INTO messages (message,sender,receipient,date) VALUES (?,?,?,?)',(decrypted_text,sender,receipient,date))
conn.commit()
conn.close()
elif json_data["type"] == "alert":
#Handles alerts like success and fails
message = json_data["message"]
#This helps throw an exception if encryption is tried
#on a non existent key
if json_data["success"] == False:
self._remote_user_key = 0
print""
print"Alert: %s"%message
print""
elif json_data["type"] == "publickey":
#Handles response when you fetch a public key remotely
username = json_data["username"]
public_key = json_data["public_key"]
print""
print"Public Key for %s: %s"%(username,public_key)
print""
self._remote_user_key = public_key
elif json_data["type"] == "sharedkey":
#Handle when a user sends you a shared key
#Receives key and saves it to the database
message = json_data["message"]
sender = json_data["sender"]
private_key = self._encryption_engine.fetch_private_key()
decrypted_shared_key = self._encryption_engine.decrypt_key(message,private_key)
self._encryption_engine.save_shared_key(sender,decrypted_shared_key)
print""
self._user_option_menu_header()
except:
if response == 'sent':
print""
print"Success"
print""
else:
print""
print"Failed"
print""
raise
self._user_option_menu_header()
def _user_option_menu_header(self):
print ""
print "1. Send Message "
print "2. View Conversation "
print "3. View Inbox "
print "4. View Outbox "
print "5. View Unread "
print "6. Exit "
print ""
print "> ",
def _user_option_menu(self):
self._user_option_menu_header()
while True:
menu_option = raw_input("")
if menu_option == "1":
self._send_message_method()
elif menu_option == "2":
self._view_conversation_method()
elif menu_option == "3":
self._view_inbox_method()
elif menu_option == "4":
self._view_outbox_method()
elif menu_option == "5":
self._view_unread_method()
elif menu_option == "6":
sys.exit(0)
def _fetch_remote_public_key(self,user):
json_request = '{"username":"%s","logged_user":"%s","api_key":"%s","type":"publickey"}'%(user,self._logged_user,self._logged_user_api_key)
self.client_socket.sendall(json_request)
self.client_socket.send("/0")
timeout = 5
count = 0
while self._remote_user_key is None:
#Check every second if the remote key was fetched
time.sleep(1)
#If server responds with code 0 (from the receiving thread) set key to None
#The try catch will throw an exception an fail gracefully
if self._remote_user_key == 0:
self._remote_user_key = None
break
remote_key = self._remote_user_key
self._remote_user_key = None
return remote_key
def _send_message_method(self):
IS_FILE = False
print ""
message = raw_input("Enter message: ")
receipient = raw_input("Enter recipient: ")
print ""
if message[:6] == "file: ":
IS_FILE = True
message_list = message.split("file: ")
message = message_list[1]
filename = message
sender = self._logged_user
shared_key = self._encryption_engine.fetch_local_shared_key(receipient)
try:
if shared_key is not None:
#The user has a shared key stored for recipient, so head straight to encryption
encrypted_text = self._encryption_engine.encrypt_text(message,shared_key)
else:
#The user has no shared key stored for the recipient,
#so generate and send them a shared key
#fetch remote public key
public_key = self._fetch_remote_public_key(receipient)
#print "Public key fetched"
#generate shared key
shared_key = self._encryption_engine.generate_shared_key()
#print "Shared key generated"
#encrypt shared key with public key
encrypted_shared_key = self._encryption_engine.encrypt_key(shared_key,public_key)
#print"Shared key encrypted"
#save shared key and username to database
self._encryption_engine.save_shared_key(receipient,shared_key)
#print "Shared key saved"
#send to receipient
request_json = '{"sender":"%s","receipient":"%s","logged_user":"%s","message":"%s","api_key":"%s","type":"sharedkey"}'%(sender,receipient,sender,encrypted_shared_key,self._logged_user_api_key)
self.client_socket.sendall(request_json)
self.client_socket.send("/0")
#This wait is just so the recipient of the message can do all necessary calculations and store the key
time.sleep(5)
encrypted_text = self._encryption_engine.encrypt_text(message,shared_key)
#Finally send the (encrypted) message
if IS_FILE == False:
message_json = '{"message":"%s", "receipient":"%s", "sender":"%s", "logged_user":"%s", "api_key":"%s","type":"message"}'%(encrypted_text,receipient,sender,self._logged_user,self._logged_user_api_key)
self.client_socket.sendall(message_json)
self.client_socket.send("/0")
current_time_epoch = time.time()
time_format = '%Y/%m/%d %H:%M:%S'
date = time.strftime(time_format,time.localtime(current_time_epoch))
conn = sqlite3.connect('local.db')
cursor = conn.cursor()
cursor.execute('INSERT INTO messages (message,sender,receipient,date) VALUES (?,?,?,?)',(message,sender,receipient,date))
conn.commit()
conn.close()
else:
try:
with open(filename,"rb") as f:
print "Sending file to %s...."%receipient
message_json = '{"message":"%s", "receipient":"%s", "sender":"%s", "logged_user":"%s", "api_key":"%s","type":"file"}'%(encrypted_text,receipient,sender,self._logged_user,self._logged_user_api_key)
self.client_socket.sendall(message_json)
self.client_socket.send("/0")
data = f.read()
encrypted_data = self._encryption_engine.encrypt_file(data,shared_key)
self.client_socket.sendall(encrypted_data+"/0end")
print "Done!"
except:
print "There was an error... Check that file exists"
self._user_option_menu_header()
pass
except:
#"There was an error!"
pass
def _view_conversation_method(self):
print ""
user1 = raw_input("View conversation with: ")
print""
user2 = self._logged_user
conn | |
= singleFlyDf['HeadX(pix)']
singleFlyEntranceData_TheWindSide_P01 = singleFlyDf['FromTheWindPortEnd_P01_EnterIdx_ExitIdx_EnterHeadX_ExitHeadX']
singleFlyEntranceIndexList_TheWindSide_P01 = [item[0] for item in singleFlyEntranceData_TheWindSide_P01 if item]
for index in singleFlyEntranceIndexList_TheWindSide_P01:
axs[counter+0].plot(range(durationAfterEntrance_frames), singleFlyHeadX[index:index+durationAfterEntrance_frames], linewidth = .6, color='black')
singleFlyEntranceData_TheClosedSide_P01 = singleFlyDf['FromTheClosedEnd_P01_EnterIdx_ExitIdx_EnterHeadX_ExitHeadX']
singleFlyEntranceIndexList_TheClosedSide_P01 = [item[0] for item in singleFlyEntranceData_TheClosedSide_P01 if item]
for index in singleFlyEntranceIndexList_TheClosedSide_P01:
axs[counter+numOfGroups].plot(range(durationAfterEntrance_frames), singleFlyHeadX[index:index+durationAfterEntrance_frames], linewidth = .6, color='black')
singleFlyEntranceData_TheWindSide_P10 = singleFlyDf['FromTheWindPortEnd_P10_EnterIdx_ExitIdx_EnterHeadX_ExitHeadX']
singleFlyEntranceIndexList_TheWindSide_P10 = [item[0] for item in singleFlyEntranceData_TheWindSide_P10 if item]
for index in singleFlyEntranceIndexList_TheWindSide_P10:
axs[counter+2*numOfGroups].plot(range(durationAfterEntrance_frames), singleFlyHeadX[index:index+durationAfterEntrance_frames], linewidth = .6, color='black')
singleFlyEntranceData_TheClosedSide_P10 = singleFlyDf['FromTheClosedEnd_P10_EnterIdx_ExitIdx_EnterHeadX_ExitHeadX']
singleFlyEntranceIndexList_TheClosedSide_P10 = [item[0] for item in singleFlyEntranceData_TheClosedSide_P10 if item]
for index in singleFlyEntranceIndexList_TheClosedSide_P10:
axs[counter+3*numOfGroups].plot(range(durationAfterEntrance_frames), singleFlyHeadX[index:index+durationAfterEntrance_frames], linewidth = .6, color='black')
fontdict = {'fontsize':12}
axs[counter+0].set_title('P01_from Wind End| %s' %(group),fontdict=fontdict)
axs[counter+0].axhline(meanChoiceZoneBorders_P01[0],color='grey')
axs[counter+0].axhline(meanChoiceZoneBorders_P01[1],color='grey')
axs[counter+0].axhspan(meanBorder_P01,145,color='red',alpha = 0.3)
axs[counter+0].set_ylim(ylim[0],ylim[1])
axs[counter+numOfGroups].set_title('P01_from Closed End| %s' %(group),fontdict=fontdict)
axs[counter+numOfGroups].axhline(meanChoiceZoneBorders_P01[0],color='grey')
axs[counter+numOfGroups].axhline(meanChoiceZoneBorders_P01[1],color='grey')
axs[counter+numOfGroups].axhspan(meanBorder_P01,145,color='red',alpha = 0.3)
axs[counter+numOfGroups].set_ylim(ylim[0],ylim[1])
axs[counter+2*numOfGroups].set_title('P10_from Wind End| %s' %(group),fontdict=fontdict)
axs[counter+2*numOfGroups].axhline(meanChoiceZoneBorders_P10[0],color='grey')
axs[counter+2*numOfGroups].axhline(meanChoiceZoneBorders_P10[1],color='grey')
axs[counter+2*numOfGroups].axhspan(0,meanBorder_P10,color='red',alpha = 0.3)
axs[counter+2*numOfGroups].set_ylim(ylim[0],ylim[1])
axs[counter+3*numOfGroups].set_title('P10_from Closed End| %s' %(group),fontdict=fontdict)
axs[counter+3*numOfGroups].axhline(meanChoiceZoneBorders_P10[0],color='grey')
axs[counter+3*numOfGroups].axhline(meanChoiceZoneBorders_P10[1],color='grey')
axs[counter+3*numOfGroups].axhspan(0,meanBorder_P10,color='red',alpha = 0.3)
axs[counter+3*numOfGroups].set_ylim(ylim[0],ylim[1])
#elif mean == True:
elif individualFlies != None:
counter = 0
numOfflies = individualFlies[1] - individualFlies[0]
for fly in range(individualFlies[0],individualFlies[1]):
singleFlyDf = data.iloc[fly]
singleFlyHeadX = singleFlyDf['HeadX(pix)']
genotype = singleFlyDf['Genotype']
flyID = singleFlyDf['Fly ID']
Border_P01 = singleFlyDf['Border|P01']
Border_P10 = singleFlyDf['Border|P10']
ChoiceZoneBorders_P01 = singleFlyDf['ChoiceZoneBordersperFly_P01']
ChoiceZoneBorders_P10 = singleFlyDf['ChoiceZoneBordersperFly_P10']
singleFlyEntranceData_TheWindSide_P01 = singleFlyDf['FromTheWindPortEnd_P01_EnterIdx_ExitIdx_EnterHeadX_ExitHeadX']
singleFlyEntranceIndexList_TheWindSide_P01 = [item[0] for item in singleFlyEntranceData_TheWindSide_P01 if item]
linewidth = 1 + 0.8*(numOfflies-1)
for index in singleFlyEntranceIndexList_TheWindSide_P01:
axs[counter*4+0].plot(range(durationAfterEntrance_frames), singleFlyHeadX[index:index+durationAfterEntrance_frames], linewidth = linewidth, color='black')
singleFlyEntranceData_TheClosedSide_P01 = singleFlyDf['FromTheClosedEnd_P01_EnterIdx_ExitIdx_EnterHeadX_ExitHeadX']
singleFlyEntranceIndexList_TheClosedSide_P01 = [item[0] for item in singleFlyEntranceData_TheClosedSide_P01 if item]
for index in singleFlyEntranceIndexList_TheClosedSide_P01:
axs[counter*4+1].plot(range(durationAfterEntrance_frames), singleFlyHeadX[index:index+durationAfterEntrance_frames], linewidth = linewidth, color='black')
singleFlyEntranceData_TheWindSide_P10 = singleFlyDf['FromTheWindPortEnd_P10_EnterIdx_ExitIdx_EnterHeadX_ExitHeadX']
singleFlyEntranceIndexList_TheWindSide_P10 = [item[0] for item in singleFlyEntranceData_TheWindSide_P10 if item]
for index in singleFlyEntranceIndexList_TheWindSide_P10:
axs[counter*4+2].plot(range(durationAfterEntrance_frames), singleFlyHeadX[index:index+durationAfterEntrance_frames], linewidth = linewidth, color='black')
singleFlyEntranceData_TheClosedSide_P10 = singleFlyDf['FromTheClosedEnd_P10_EnterIdx_ExitIdx_EnterHeadX_ExitHeadX']
singleFlyEntranceIndexList_TheClosedSide_P10 = [item[0] for item in singleFlyEntranceData_TheClosedSide_P10 if item]
for index in singleFlyEntranceIndexList_TheClosedSide_P10:
axs[counter*4+3].plot(range(durationAfterEntrance_frames), singleFlyHeadX[index:index+durationAfterEntrance_frames], linewidth = linewidth, color='black')
fontdict = {'fontsize':12*(numOfGroups/1.2)}
axs[counter*4+0].set_title('%s, ID: %s|P01_from Wind End' %(genotype,flyID),fontdict=fontdict)
axs[counter*4+0].axhline(ChoiceZoneBorders_P01[0],color='grey')
axs[counter*4+0].axhline(ChoiceZoneBorders_P01[1],color='grey')
axs[counter*4+0].axhspan(Border_P01,145,color='red',alpha = 0.3)
axs[counter*4+0].set_ylim(ylim[0],ylim[1])
axs[counter*4+1].set_title('P01_from Closed End',fontdict=fontdict)
axs[counter*4+1].axhline(ChoiceZoneBorders_P01[0],color='grey')
axs[counter*4+1].axhline(ChoiceZoneBorders_P01[1],color='grey')
axs[counter*4+1].axhspan(Border_P01,145,color='red',alpha = 0.3)
axs[counter*4+1].set_ylim(ylim[0],ylim[1])
axs[counter*4+2].set_title('P10_from Wind End',fontdict=fontdict)
axs[counter*4+2].axhline(ChoiceZoneBorders_P10[0],color='grey')
axs[counter*4+2].axhline(ChoiceZoneBorders_P10[1],color='grey')
axs[counter*4+2].axhspan(0,Border_P10,color='red',alpha = 0.3)
axs[counter*4+2].set_ylim(ylim[0],ylim[1])
axs[counter*4+3].set_title('P10_from Closed End',fontdict=fontdict)
axs[counter*4+3].axhline(ChoiceZoneBorders_P10[0],color='grey')
axs[counter*4+3].axhline(ChoiceZoneBorders_P10[1],color='grey')
axs[counter*4+3].axhspan(0,Border_P10,color='red',alpha = 0.3)
axs[counter*4+3].set_ylim(ylim[0],ylim[1])
counter += 1
return axs
def VisualizeTheChoiceZoneTrajectories(df, individualFlies = None, groupBy = None, groupsToPlot = None, durationAfterEntrance_frames=50,
mean = False, CI = 95, hspace = .3, wspace = .3, ylim = [30,110]):
if individualFlies == None:
#if mean == False:
if groupsToPlot == None:
df_grouped = df.groupby(groupBy)
numOfGroups = len(df_grouped)
figSize = (5*numOfGroups,20)
fig, axs = plt.subplots(4,numOfGroups, figsize=figSize, facecolor='w', edgecolor='k')
fig.subplots_adjust(hspace = hspace, wspace = wspace)
axs = axs.ravel()
counter = 0
## for each group of flies (i.e, parent vs offspring), I'm going to plot 4 types of decision zone trajectories:
## P01: entrance from wind AND closed end, P10: entrance from wind AND closed end
for group,data in df_grouped:
axs = VisualizeGroupsOfData(group,data,counter,numOfGroups,axs,individualFlies,durationAfterEntrance_frames,ylim)
counter += 1
else:
df_grouped = df.groupby(groupBy)
numOfGroups = len(groupsToPlot)
figSize = (5*numOfGroups,20)
fig, axs = plt.subplots(4,numOfGroups, figsize=figSize, facecolor='w', edgecolor='k')
fig.subplots_adjust(hspace = hspace, wspace = wspace)
axs = axs.ravel()
counter = 0
for group in groupsToPlot:
data = df_grouped.get_group(group)
axs = VisualizeGroupsOfData(group,data,counter,numOfGroups,axs,individualFlies,durationAfterEntrance_frames,ylim)
counter += 1
#elif mean == True:
elif individualFlies != None:
group = None
counter = None
numOfGroups = individualFlies[1] - individualFlies[0]
figSize = (12*numOfGroups,4*numOfGroups**2)
fig, axs = plt.subplots(numOfGroups,4, figsize=figSize, facecolor='w', edgecolor='k')
fig.subplots_adjust(hspace = hspace, wspace = wspace)
axs = axs.ravel()
axs = VisualizeGroupsOfData(group,df,counter,numOfGroups,axs,individualFlies,durationAfterEntrance_frames,ylim)
sns.set(style="ticks", palette="bright", color_codes=True)
sns.despine()
plt.show()
return None
# In[ ]:
VisualizeTheChoiceZoneTrajectories(d, individualFlies = [5,6], groupBy = 'Genotype', groupsToPlot = None,
durationAfterEntrance_frames = 30, mean=False, CI = 95, hspace = .3,
wspace = .3, ylim = [80,90])
# In[ ]:
d['FromTheClosedEnd_P01_EnterIdx_ExitIdx_EnterHeadX_ExitHeadX'][5]
# In[ ]:
d['ChoiceZoneBordersperFly_P01'][5]
ChoiceZoneBorders_P01 = d['ChoiceZoneBordersperFly_P01'][5]
# In[ ]:
x = d['HeadX(pix)'][5][1045:1075]
fig = plt.figure(figsize=(10,10))
ax1 = plt.subplot(111)
ax1.axhline(ChoiceZoneBorders_P01[0],color='grey')
ax1.axhline(ChoiceZoneBorders_P01[1],color='grey')
ax1.plot(range(len(x)), x, color='black')
# ## Plot any given metric
# In[52]:
def plotTheMetric(df,metric,rootDir,mergeIntensities, combineControls,compareLightType, dropNans=False):
df['Sex'] = df['Sex'].apply(lambda x: x.split('-')[0])
df['Satiety'] = df['Satiety'].apply(lambda x: x.split('-')[0])
##Combine fed2, starved2 into one dataset
df.loc[df['Satiety'] == 'fed2', 'Satiety'] = 'fed'
df.loc[df['Satiety'] == 'starved2', 'Satiety'] = 'starved'
## open new folders to save the results
newFolderName = rootDir + '/' + metric
if not os.path.exists(newFolderName):
os.makedirs(newFolderName)
newFolderName = rootDir + '/' + metric + '/P01'
if not os.path.exists(newFolderName):
os.makedirs(newFolderName)
newFolderName = rootDir + '/' + metric + '/P10'
if not os.path.exists(newFolderName):
os.makedirs(newFolderName)
newFolderName = rootDir + '/' + metric + '/Mean'
if not os.path.exists(newFolderName):
os.makedirs(newFolderName)
## Save the df which contains the quantitative values of the given metrics, in case I need to plot them again
savePath = rootDir + '/' + metric + '/'
df.to_pickle(savePath + metric + '_values.pkl')
## define the color palette
if len(df['Genotype'].unique()) == 3:
myPal = {df['Genotype'].unique()[0] : 'lightgreen',
df['Genotype'].unique()[1] : 'cyan',
df['Genotype'].unique()[2]: 'red'}
elif len(df['Genotype'].unique()) == 5:
myPal = {df['Genotype'].unique()[0] : 'lightgreen',
df['Genotype'].unique()[1] : 'coral',
df['Genotype'].unique()[2]: 'orchid',
df['Genotype'].unique()[3] : 'red',
df['Genotype'].unique()[4]: 'royalblue'}
## get the list of vairables
listofSex = df['Sex'].unique()
listofSatiety = df['Satiety'].unique()
listofWindStat = df['Wind status'].unique()
listofGenotypes = df['Genotype'].unique()
listofLightType = ['Pulse']
df.loc[df['Light Intensity(uW/mm2)'] == ")70u","Light Intensity(uW/mm2)"] = "70uW"
df.loc[df['Light Intensity(uW/mm2)'] == ")28mV","Light Intensity(uW/mm2)"] = "28mV"
df.loc[df['Light Intensity(uW/mm2)'] == ")14mV","Light Intensity(uW/mm2)"] = "14mV"
df.loc[df['Genotype'] == "w1118-UAS-Cschrimson","Genotype"] = "w1118-UAS-CsChrimson"
# listofIntensities = df['Light Intensity(uW/mm2)'].unique()
# listofIntensities = ['4-65mV','9-3mV','14mV'] # constant
# listofIntensities = ['9-3mV','18-6mV','28mV'] # pulsed
listofIntensities = ['14mV','21mV','28mV']
if compareLightType == False:
if mergeIntensities == False:
## if combineControls is true, then status-based df, else genotype-based.
if combineControls == True:
## make the columns to classify data points (status-based in this case)
df = df.assign(Status_Sex_Satiety_Intensity_Wind = pd.Series(df['Status'] + '_' + df['Sex'] + '_' +
df['Satiety'] + '_' + df['Light Intensity(uW/mm2)'] + '_' +
df['Wind status'], index = df.index))
## going to generate plots for each of the combination of these three condition, i.e, male_fed__NoAir
for sex in listofSex:
for satietyStat in listofSatiety:
for windStat in listofWindStat:
## I wanted to keep the original metric name to access the columns in the df.
## Generating the new variable, metricForFileName, helps me to specify whether the nans were dropped
## in the file name.
if dropNans == False:
metricForFileName = metric + '_CombinedControls'
elif dropNans == True:
metricForFileName = metric + '_CombinedControls_NansDropped'
try:
## P01 of the metric
fig,b = dabest.plot(df, x = 'Status_Sex_Satiety_Intensity_Wind', y = metric+'_P01' ,
color_col= 'Genotype', custom_palette = myPal, float_contrast=False,
idx = (
('Parent_' + str(sex) + '_' + str(satietyStat) + '_' + str(listofIntensities[0]) + '_' + str(windStat), 'Offspring_' + str(sex) + '_' + str(satietyStat) + '_' + str(listofIntensities[0]) + '_' + str(windStat)),
('Parent_' + str(sex) + '_' + str(satietyStat) + '_' + str(listofIntensities[1]) + '_' + str(windStat), 'Offspring_' + str(sex) + '_' + str(satietyStat) + '_' + str(listofIntensities[1]) + '_' + str(windStat)),
('Parent_' + str(sex) + '_' + str(satietyStat) + '_' + str(listofIntensities[2]) + '_' + str(windStat), 'Offspring_' + str(sex) + '_' + str(satietyStat) + '_' + str(listofIntensities[2]) + '_' + str(windStat)),
('Parent_' + str(sex) + '_' + str(satietyStat) + '_' + str(listofIntensities[3]) + '_' + str(windStat), 'Offspring_' + str(sex) + '_' + str(satietyStat) + '_' + str(listofIntensities[3]) + '_' + str(windStat)),
('Parent_' + str(sex) + '_' + str(satietyStat) + '_' + str(listofIntensities[4]) + '_' + str(windStat), 'Offspring_' + str(sex) + '_' + str(satietyStat) + '_' + str(listofIntensities[4]) + '_' + str(windStat))
))
savePath = rootDir + '/' + metric + '/P01/'
saveFileName = metricForFileName + '_P01_' + str(sex) + '_' + str(satietyStat) + '_' + str(windStat)
plt.savefig(savePath + saveFileName + '.svg',dpi=1000,bbox_inches='tight')
## Get the sample size
list_of_Status_Sex_Satiety_Intensity_Wind = df['Status_Sex_Satiety_Intensity_Wind'].unique()
## The unique list of Status includes both wind and no wind, while the plots include either wind or
## no wind. So, in each iteration, I select wind or no wind data based on the variable WindStat,
## just like the plotting function.
select_list_of_Status_Sex_Satiety_Intensity_Wind = [i for i in list_of_Status_Sex_Satiety_Intensity_Wind if str('_' + str(windStat)) in i]
temp_parent_N = []
temp_offspring_N = []
temp_parent_STD = []
temp_offspring_STD = []
for c in select_list_of_Status_Sex_Satiety_Intensity_Wind:
N | |
isinstance(inputs, s3_input):
input_dict['training'] = inputs
elif isinstance(inputs, dict):
for k, v in inputs.items():
input_dict[k] = _TrainingJob._format_s3_uri_input(v)
else:
raise ValueError('Cannot format input {}. Expecting one of str, dict or s3_input'.format(inputs))
channels = []
for channel_name, channel_s3_input in input_dict.items():
channel_config = channel_s3_input.config.copy()
channel_config['ChannelName'] = channel_name
channels.append(channel_config)
return channels
@staticmethod
def _format_s3_uri_input(input):
if isinstance(input, str):
if not input.startswith('s3://'):
raise ValueError('Training input data must be a valid S3 URI and must start with "s3://"')
return s3_input(input)
if isinstance(input, s3_input):
return input
else:
raise ValueError('Cannot format input {}. Expecting one of str or s3_input'.format(input))
@staticmethod
def _prepare_output_config(s3_path, kms_key_id):
config = {'S3OutputPath': s3_path}
if kms_key_id is not None:
config['KmsKeyId'] = kms_key_id
return config
@staticmethod
def _prepare_resource_config(instance_count, instance_type, volume_size):
resource_config = {'InstanceCount': instance_count,
'InstanceType': instance_type,
'VolumeSizeInGB': volume_size}
return resource_config
@staticmethod
def _prepare_stopping_condition(max_run):
stop_condition = {'MaxRuntimeInSeconds': max_run}
return stop_condition
@property
def name(self):
return self.job_name
def wait(self, logs=True):
if logs:
self.sagemaker_session.logs_for_job(self.job_name, wait=True)
else:
self.sagemaker_session.wait_for_job(self.job_name)
class Estimator(EstimatorBase):
"""
A generic Estimator to train using any supplied algorithm. This class is designed for use with
algorithms that don't have their own, custom class.
"""
def __init__(self, image_name, role, train_instance_count, train_instance_type,
train_volume_size=30, train_max_run=24 * 60 * 60, input_mode='File',
output_path=None, output_kms_key=None, base_job_name=None, sagemaker_session=None):
"""Initialize an ``Estimator`` instance.
Args:
image_name (str): The container image to use for training.
role (str): An AWS IAM role (either name or full ARN). The Amazon SageMaker training jobs and APIs
that create Amazon SageMaker endpoints use this role to access training data and model artifacts.
After the endpoint is created, the inference code might use the IAM role,
if it needs to access an AWS resource.
train_instance_count (int): Number of Amazon EC2 instances to use for training.
train_instance_type (str): Type of EC2 instance to use for training, for example, 'ml.c4.xlarge'.
train_volume_size (int): Size in GB of the EBS volume to use for storing input data
during training (default: 30). Must be large enough to store training data if File Mode is used
(which is the default).
train_max_run (int): Timeout in seconds for training (default: 24 * 60 * 60).
After this amount of time Amazon SageMaker terminates the job regardless of its current status.
input_mode (str): The input mode that the algorithm supports (default: 'File'). Valid modes:
* 'File' - Amazon SageMaker copies the training dataset from the S3 location to a local directory.
* 'Pipe' - Amazon SageMaker streams data directly from S3 to the container via a Unix-named pipe.
output_path (str): S3 location for saving the trainig result (model artifacts and output files).
If not specified, results are stored to a default bucket. If the bucket with the specific name
does not exist, the estimator creates the bucket during the
:meth:`~sagemaker.estimator.EstimatorBase.fit` method execution.
output_kms_key (str): Optional. KMS key ID for encrypting the training output (default: None).
base_job_name (str): Prefix for training job name when the :meth:`~sagemaker.estimator.EstimatorBase.fit`
method launches. If not specified, the estimator generates a default job name, based on
the training image name and current timestamp.
sagemaker_session (sagemaker.session.Session): Session object which manages interactions with
Amazon SageMaker APIs and any other AWS services needed. If not specified, the estimator creates one
using the default AWS configuration chain.
"""
self.image_name = image_name
self.hyperparam_dict = {}
super(Estimator, self).__init__(role, train_instance_count, train_instance_type,
train_volume_size, train_max_run, input_mode,
output_path, output_kms_key, base_job_name, sagemaker_session)
def train_image(self):
"""
Returns the docker image to use for training.
The fit() method, that does the model training, calls this method to find the image to use
for model training.
"""
return self.image_name
def set_hyperparameters(self, **kwargs):
for k, v in kwargs.items():
self.hyperparam_dict[k] = v
def hyperparameters(self):
"""Returns the hyperparameters as a dictionary to use for training.
The fit() method, that does the model training, calls this method to find the hyperparameters you specified.
"""
return self.hyperparam_dict
def create_model(self, image=None, predictor_cls=None, serializer=None, deserializer=None,
content_type=None, accept=None, **kwargs):
"""
Create a model to deploy.
Args:
image (str): An container image to use for deploying the model. Defaults to the image used for training.
predictor_cls (RealTimePredictor): The predictor class to use when deploying the model.
serializer (callable): Should accept a single argument, the input data, and return a sequence
of bytes. May provide a content_type attribute that defines the endpoint request content type
deserializer (callable): Should accept two arguments, the result data and the response content type,
and return a sequence of bytes. May provide a content_type attribute that defines th endpoint
response Accept content type.
content_type (str): The invocation ContentType, overriding any content_type from the serializer
accept (str): The invocation Accept, overriding any accept from the deserializer.
The serializer, deserializer, content_type, and accept arguments are only used to define a default
RealTimePredictor. They are ignored if an explicit predictor class is passed in. Other arguments
are passed through to the Model class.
Returns: a Model ready for deployment.
"""
if predictor_cls is None:
def predict_wrapper(endpoint, session):
return RealTimePredictor(endpoint, session, serializer, deserializer, content_type, accept)
predictor_cls = predict_wrapper
return Model(self.model_data, image or self.train_image(), self.role, sagemaker_session=self.sagemaker_session,
predictor_cls=predictor_cls, **kwargs)
class Framework(EstimatorBase):
"""Base class that cannot be instantiated directly.
Subclasses define functionality pertaining to specific ML frameworks,
such as training/deployment images and predictor instances.
"""
def __init__(self, entry_point, source_dir=None, hyperparameters=None, enable_cloudwatch_metrics=False,
container_log_level=logging.INFO, code_location=None, **kwargs):
"""Base class initializer. Subclasses which override ``__init__`` should invoke ``super()``
Args:
entry_point (str): Path (absolute or relative) to the Python source file which should be executed
as the entry point to training. This should be compatible with either Python 2.7 or Python 3.5.
source_dir (str): Path (absolute or relative) to a directory with any other training
source code dependencies aside from tne entry point file (default: None). Structure within this
directory are preserved when training on Amazon SageMaker.
hyperparameters (dict): Hyperparameters that will be used for training (default: None).
The hyperparameters are made accessible as a dict[str, str] to the training code on SageMaker.
For convenience, this accepts other types for keys and values, but ``str()`` will be called
to convert them before training.
enable_cloudwatch_metrics (bool): Whether training and hosting containers will
generate CloudWatch metrics under the AWS/SageMakerContainer namespace (default: False).
container_log_level (int): Log level to use within the container (default: logging.INFO).
Valid values are defined in the Python logging module.
code_location (str): Name of the S3 bucket where custom code is uploaded (default: None).
If not specified, default bucket created by ``sagemaker.session.Session`` is used.
**kwargs: Additional kwargs passed to the ``EstimatorBase`` constructor.
"""
super(Framework, self).__init__(**kwargs)
self.source_dir = source_dir
self.entry_point = entry_point
self.enable_cloudwatch_metrics = enable_cloudwatch_metrics
self.container_log_level = container_log_level
self._hyperparameters = hyperparameters or {}
self.code_location = code_location
def fit(self, inputs, wait=True, logs=True, job_name=None):
"""Train a model using the input training dataset.
The API calls the Amazon SageMaker CreateTrainingJob API to start model training.
The API uses configuration you provided to create the estimator and the
specified input training data to send the CreatingTrainingJob request to Amazon SageMaker.
This is a synchronous operation. After the model training successfully completes,
you can call the ``deploy()`` method to host the model using the Amazon SageMaker hosting services.
Args:
inputs (str or dict or sagemaker.session.s3_input): Information about the training data.
This can be one of three types:
(str) - the S3 location where training data is saved.
(dict[str, str] or dict[str, sagemaker.session.s3_input]) - If using multiple channels for
training data, you can specify a dict mapping channel names
to strings or :func:`~sagemaker.session.s3_input` objects.
(sagemaker.session.s3_input) - channel configuration for S3 data sources that can provide
additional information about the training dataset. See :func:`sagemaker.session.s3_input`
for full details.
wait (bool): Whether the call shouldl wait until the job completes (default: True).
logs (bool): Whether to show the logs produced by the job.
Only meaningful when wait is True (default: True).
job_name (str): Training job name. If not specified, the estimator generates a default job name,
based on the training image name and current timestamp.
"""
# always determine new job name _here_ because it is used before base is called
if job_name is not | |
<gh_stars>0
#!/usr/bin/env python
import json
import os
import sys
import numpy as np
import pylab as pl
from scipy.optimize import curve_fit
import tqdm
import quantities_for_comparison as qc
SHOW_IN_BROWSER=True
DEFAULT_PREFACTOR = 100
DEFAULT_wb0 = 1.0/8.0
DEFAULT_wb1 = 1.0/64.0
EXPECTED_SCRIPT_VERSION = ["0.0.3","0.0.4"]
from bokeh.models import (
ColumnDataSource,
LinearColorMapper,
LogColorMapper,
ColorBar,
BasicTicker,
)
from bokeh.plotting import figure, output_file
from bokeh.io import show as show_, export_png
from bokeh.sampledata.periodic_table import elements
from bokeh.transform import dodge
from matplotlib.colors import Normalize, LogNorm, to_hex
from matplotlib.cm import (
plasma,
inferno,
magma,
viridis,
cividis,
turbo,
ScalarMappable,
)
from pandas import options
from typing import List
import warnings
from bokeh.io import export_svg
def abs_V0_rel_diff(*args, **kwargs):
return abs(qc.V0_rel_diff(*args, **kwargs))
def abs_B0_rel_diff(*args, **kwargs):
return abs(qc.B0_rel_diff(*args, **kwargs))
def abs_B1_rel_diff(*args, **kwargs):
return abs(qc.B1_rel_diff(*args, **kwargs))
quantity_for_comparison_map = {
"delta_per_formula_unit": qc.delta,
"B0_rel_diff": qc.B0_rel_diff,
"V0_rel_diff": qc.V0_rel_diff,
"B1_rel_diff": qc.B1_rel_diff,
"abs_V0_rel_diff": abs_V0_rel_diff,
"abs_B0_rel_diff": abs_B0_rel_diff,
"abs_B1_rel_diff": abs_B1_rel_diff,
f"nu({DEFAULT_wb0},{DEFAULT_wb1})": qc.rel_errors_vec_length,
"epsilon": qc.epsilon
}
if __name__ == "__main__":
try:
SET_NAME = sys.argv[1]
except IndexError:
print(f"The first argument must be the set name.")
sys.exit(1)
try:
QUANTITY = sys.argv[2]
except IndexError:
print(f"The second argument must be the quantity to use for comparison. Choose among {quantity_for_comparison_map.keys()}")
sys.exit(1)
if QUANTITY not in quantity_for_comparison_map.keys():
print(f"The second argument must be the quantity to use for comparison. Choose among {quantity_for_comparison_map.keys()}")
sys.exit(1)
try:
with open(f'results-{SET_NAME}-fleur.json') as fhandle:
compare_plugin_data = json.load(fhandle)
if not compare_plugin_data['script_version'] in EXPECTED_SCRIPT_VERSION:
raise ValueError(
f"This script only works with data generated at version {EXPECTED_SCRIPT_VERSION}. "
f"Please re-run ./get_results.py to update the data format for results-{SET_NAME}-fleur.json!"
)
sys.exit(1)
except OSError:
print(f"No data found for fleur (set '{SET_NAME}'), fleur is the reference and must be present")
sys.exit(1)
file_prefix = f'results-{SET_NAME}-'
file_suffix = '.json'
results_folder = os.curdir
code_results = {}
for fname in os.listdir(results_folder):
if fname.startswith(file_prefix) and fname.endswith(file_suffix):
label = fname[len(file_prefix):-len(file_suffix)]
if label in ["fleur","wien2k"]:
continue
with open(os.path.join(results_folder, fname)) as fhandle:
code_results[label] = json.load(fhandle)
if not code_results[label]['script_version'] in EXPECTED_SCRIPT_VERSION:
raise ValueError(
f"This script only works with data generated at version {EXPECTED_SCRIPT_VERSION}. "
f"Please re-run ./get_results.py to update the data format for {fname}! Skipping {label}"
)
code_results.pop(label)
for plugin, plugin_data in code_results.items():
print(f"Using data for plugin '{plugin}' (set '{SET_NAME}') compared with fleur.")
all_systems = set(plugin_data['eos_data'].keys())
all_systems = set(plugin_data['BM_fit_data'].keys())
#all_systems.update(compare_plugin_data['BM_fit_data'].keys())
collect = {
"X/Diamond" : {"elements": [], "values": []},
"X/FCC" : {"elements": [], "values": []},
"X/BCC" : {"elements": [], "values": []},
"X/SC" : {"elements": [], "values": []},
"X2O3" : {"elements": [], "values": []},
"X2O5" : {"elements": [], "values": []},
"XO2" : {"elements": [], "values": []},
"XO3" : {"elements": [], "values": []},
"XO" : {"elements": [], "values": []},
"X2O" : {"elements": [], "values": []}
}
progress_bar = tqdm.tqdm(sorted(all_systems))
for element_and_configuration in progress_bar:
progress_bar.set_description(f"{element_and_configuration:12s}")
progress_bar.refresh()
element, configuration = element_and_configuration.split('-')
# Get the data for the reference plugin
ref_BM_fit_data = plugin_data['BM_fit_data'][f'{element}-{configuration}']
if ref_BM_fit_data is None:
continue
scaling_factor_ref = qc.get_volume_scaling_to_formula_unit(
plugin_data['num_atoms_in_sim_cell'][f'{element}-{configuration}'],
element, configuration
)
V0=ref_BM_fit_data['min_volume']/scaling_factor_ref
B0=ref_BM_fit_data['bulk_modulus_ev_ang3']
B01=ref_BM_fit_data['bulk_deriv']
# Get the data for the compare_with plugin, if specified (and if the EOS worked for the
# reference plugin, otherwise we don't know which E0 to use)
try:
compare_BM_fit_data = compare_plugin_data['BM_fit_data'][f'{element}-{configuration}']
if compare_BM_fit_data is None:
# No fitting data in the plugin to compare with.
# Raise this exception that is catched one line below, so
# it will set `compare_eos_fit_energy` to None.
raise KeyError
except KeyError:
# Set to None if fit data is missing (if we are here, the EOS points
# are there, so it means that the fit failed). I will still plot the
# points
continue
scaling_factor_comp = qc.get_volume_scaling_to_formula_unit(
compare_plugin_data['num_atoms_in_sim_cell'][f'{element}-{configuration}'],
element, configuration
)
CV0=compare_BM_fit_data['min_volume']/scaling_factor_comp
CB0=compare_BM_fit_data['bulk_modulus_ev_ang3']
CB01=compare_BM_fit_data['bulk_deriv']
quant = quantity_for_comparison_map[QUANTITY](V0,B0,B01,CV0,CB0,CB01,DEFAULT_PREFACTOR,DEFAULT_wb0,DEFAULT_wb1)
collect[configuration]["values"].append(quant)
collect[configuration]["elements"].append(element)
# Way to decide whether is a unaries or an oxides set is a bit fragile.
if collect["X/Diamond"]["values"]:
unaries = True
list_confs = ["X/Diamond","X/FCC","X/BCC","X/SC"]
else:
unaries = False
list_confs = ["X2O3","X2O5","X2O","XO2","XO3","XO"]
width = 1050
cmap = "plasma"
alpha = 0.65
extended = True
log_scale = False
cbar_height = None
cbar_standoff = 12
cbar_fontsize = 14
blank_color = "#c4c4c4"
under_value = None
under_color = "#140F0E"
over_value = None
over_color = "#140F0E"
special_elements = None
special_color = "#6F3023"
options.mode.chained_assignment = None
# Assign color palette based on input argument
if cmap == "plasma":
cmap = plasma
bokeh_palette = "Plasma256"
elif cmap == "magma":
cmap = magma
bokeh_palette = "Magma256"
else:
raise ValueError("Unknown color map")
# Define number of and groups
period_label = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
group_range = [x for x in range(1, 19)]
#We "fake" that La-Yb has period 9 and group from 5 to 18, also that
#Th-Lr has period 10 and group from 5 to 18. It is just to place them in
#the correct point in the chart.
count=0
for i in range(56, 70):
elements.period[i] = 9
elements.group[i] = count + 4
count += 1
count = 0
for i in range(88, 102):
elements.period[i] = 10
elements.group[i] = count + 4
count += 1
per = [int(i) for i in elements["period"]]
grou = [int(i) for i in elements["group"]]
min_data = min([min(collect[i]["values"]) for i in list_confs])
max_data = max([max(collect[i]["values"]) for i in list_confs])
color_list={}
for conf in list_confs:
data_elements = collect[conf]["elements"]
data = collect[conf]["values"]
if len(data) != len(data_elements):
raise ValueError("Unequal number of atomic elements and data points")
# Define matplotlib and bokeh color map
if log_scale:
for datum in data:
if datum < 0:
raise ValueError(
f"Entry for element {datum} is negative but log-scale is selected"
)
color_mapper = LogColorMapper(
palette=bokeh_palette, low=min_data, high=max_data
)
norm = LogNorm(vmin=min_data, vmax=max_data)
else:
color_mapper = LinearColorMapper(
palette=bokeh_palette, low=min_data, high=max_data
)
norm = Normalize(vmin=min_data, vmax=max_data)
color_scale = ScalarMappable(norm=norm, cmap=cmap).to_rgba(data, alpha=None)
# Set blank color
color_list[conf] = [blank_color] * len(elements)
# Compare elements in dataset with elements in periodic table
for i, data_element in enumerate(data_elements):
element_entry = elements.symbol[
elements.symbol.str.lower() == data_element.lower()
]
if element_entry.empty == False:
element_index = element_entry.index[0]
else:
warnings.warn("Invalid chemical symbol: " + data_element)
if color_list[conf][element_index] != blank_color:
warnings.warn("Multiple entries for element " + data_element)
elif under_value is not None and data[i] <= under_value:
color_list[conf][element_index] = under_color
elif over_value is not None and data[i] >= over_value:
color_list[conf][element_index] = over_color
else:
color_list[conf][element_index] = to_hex(color_scale[i])
if unaries:
# Define figure properties for visualizing data
source = ColumnDataSource(
data=dict(
group=grou,
period=per,
top=[i-0.45 for i in per],
bottom=[i+0.45 for i in per],
left=[i-0.45 for i in grou],
right=[i+0.45 for i in grou],
sym=elements["symbol"],
atomic_number=elements["atomic number"],
type_color_dia=color_list["X/Diamond"],
type_color_sc=color_list["X/SC"],
type_color_bcc=color_list["X/BCC"],
type_color_fcc=color_list["X/FCC"],
)
)
# Plot the periodic table
p = figure(x_range=[0,19], y_range=[11,0], tools="save")
p.plot_width = width
p.outline_line_color = None
p.background_fill_color = None
p.border_fill_color = None
p.toolbar_location = "above"
p.quad(left="left", right="group", top="period", bottom="bottom", source=source, alpha=alpha, color="type_color_dia")
p.quad(left="left", right="group", top="top", bottom="period", source=source, alpha=alpha, color="type_color_sc")
p.quad(left="group", right="right", top="period", bottom="bottom", source=source, alpha=alpha, color="type_color_bcc")
p.quad(left="group", right="right", top="top", bottom="period", source=source, alpha=alpha, color="type_color_fcc")
p.axis.visible = False
#The reference block
p.quad(left=5,right=6.5,bottom=0,top=1.5,fill_color="white")
p.quad(left=5,right=6.5,bottom=1.5,top=3,fill_color="white")
p.quad(left=6.5,right=8,bottom=0,top=1.5,fill_color="white")
p.quad(left=6.5,right=8,bottom=1.5,top=3,fill_color="white")
xx=[5.75,5.75,7.25,7.25]
yy=[0.75,2.25,0.75,2.25]
text=["SC","Diamond","FCC","BCC"]
sou = ColumnDataSource(dict(x=xx, y=yy, text=text))
p.text(
x="x",
y="y",
text="text",
source=sou,
text_font_style="bold",
text_font_size="17pt",
text_align= "center",
text_baseline= "middle",
angle=-45,
angle_units="grad"
)
else:
# Define figure properties for visualizing data
source = ColumnDataSource(
data=dict(
group=grou,
period=per,
top=[i-0.45 for i in per],
bottom=[i+0.45 for i in per],
left=[i-0.45 for i in grou],
right=[i+0.45 for i in grou],
midup=[i-0.15 for i in per],
middown=[i+0.15 for i in per],
sym=elements["symbol"],
atomic_number=elements["atomic number"],
type_color_X2O3=color_list["X2O3"],
type_color_X2O5=color_list["X2O5"],
type_color_X2O=color_list["X2O"],
type_color_XO2=color_list["XO2"],
type_color_XO3=color_list["XO3"],
type_color_XO=color_list["XO"],
)
)
# Plot the periodic table
p = figure(x_range=[0,19], y_range=[11,0], tools="save")
p.plot_width = width
p.outline_line_color = None
p.background_fill_color = None
p.border_fill_color = None
p.toolbar_location = "above"
p.quad(left="left", right="group", top="top", bottom="midup", source=source, alpha=alpha, color="type_color_X2O3")
p.quad(left="left", right="group", top="midup", bottom="middown", source=source, alpha=alpha, color="type_color_X2O")
p.quad(left="left", right="group", top="middown", bottom="bottom", source=source, alpha=alpha, color="type_color_XO3")
p.quad(left="group", right="right", top="top", bottom="midup", source=source, alpha=alpha, color="type_color_X2O5")
p.quad(left="group", right="right", top="midup", bottom="middown", source=source, alpha=alpha, color="type_color_XO2")
p.quad(left="group", right="right", top="middown", bottom="bottom", source=source, alpha=alpha, color="type_color_XO")
p.axis.visible = False
#The reference block
p.quad(left=5,right=6.5,bottom=0,top=1,fill_color="white")
p.quad(left=5,right=6.5,bottom=1,top=2,fill_color="white")
p.quad(left=5,right=6.5,bottom=2,top=3,fill_color="white")
p.quad(left=6.5,right=8,bottom=0,top=1,fill_color="white")
p.quad(left=6.5,right=8,bottom=1,top=2,fill_color="white")
p.quad(left=6.5,right=8,bottom=2,top=3,fill_color="white")
xx=[5.75,5.75,5.75,7.25,7.25,7.25]
yy=[0.5,1.5,2.5,0.5,1.5,2.5]
text=["X2O3","X2O","XO3","X2O5","XO2","XO"]
sou = ColumnDataSource(dict(x=xx, y=yy, text=text))
p.text(
x="x",
y="y",
text="text",
source=sou,
text_font_style="bold",
text_font_size="17pt",
text_align= "center",
text_baseline= "middle",
)
#Add element name
text_props = {
"source": source,
"angle": 0,
"color": "black",
"text_align": "center",
"text_baseline": "middle",
}
#x = dodge("group", -0.4, range=p.x_range)
#y = dodge("period", 0.3, range=p.y_range)
p.text(
x="group",
y="period",
text="sym",
text_font_style="bold",
text_font_size="16pt",
**text_props,
)
#p.text(x=x, y=y, text="atomic_number", text_font_size="11pt", **text_props)
p.title = f"{plugin} {QUANTITY}"
color_bar = ColorBar(
color_mapper=color_mapper,
ticker=BasicTicker(desired_num_ticks=10),
border_line_color=None,
| |
or "--l" in content:
pass
elif "{" in content and "}" in content:
try:
to_find_list = []
for i in range(len(content)):
if content[i] == "{":
back_brace = content.find("}", i)
if not back_brace == -1:
to_find_list.append([i, back_brace])
if len(to_find_list) >= 3:
break
except IndexError:
return
if to_find_list:
for i in to_find_list:
start = i[0] + 1
end = i[1]
if not start == end:
to_find = content[start:end]
else:
return
if "{" not in to_find or "}" not in to_find:
data = await find_anime_by_name(to_find)
return_data = await build_small_embed(data)
if not return_data:
self.send_log("Anime {Qry", "{} - Queried \"{}\" to Anilist.co (USING BRACKETS), "
"not found!".format(message.author, to_find))
embed = self.basic_embed(False, "Title __{}__ **not found**!".format(content))
else:
embed = return_data[0]
self.send_log("Anime {Qry", "{} - Queried \"{}\" to Anilist.co (USING BRACKETS), "
"returned {}".format(message.author, to_find, return_data[1]))
await message.channel.send(embed=embed)
elif "[" in content and "]" in content:
try:
to_find_list = []
for i in range(len(content)):
if content[i] == "[":
back_brace = content.find("]", i)
if not back_brace == -1:
to_find_list.append([i, back_brace])
if len(to_find_list) >= 3:
break
except IndexError:
return
if to_find_list:
for i in to_find_list:
start = i[0] + 1
end = i[1]
if not start == end:
to_find = content[start:end]
else:
return
if "[" not in to_find or "]" not in to_find:
data = await find_manga_by_name(to_find)
self.send_log("Manga [Qry", "{} - Queried \"{}\" to Anilist.co (USING BRACKETS), "
"not found!".format(message.author, to_find))
return_data = await build_small_manga_embed(data)
if not return_data:
print("not found!")
embed = self.basic_embed(False, "Title __{}__ **not found**!".format(content))
else:
embed = return_data[0]
self.send_log("Manga [Qry", "{} - Queried \"{}\" to Anilist.co (USING BRACKETS), "
"returned {}".format(message.author, to_find, return_data[1]))
await message.channel.send(embed=embed)
elif content.split(" ")[0] == "!8ball":
question = ""
for i in content.split(" "):
if not i == "!8ball":
question += i
question += " "
question = question.strip()
await self.magic8(message, question)
elif ">>" in content:
to_translate = None
if content == "^ >>":
messageable = message.channel
async for msg in messageable.history(limit=2):
if not message == msg:
to_translate = msg.content
else:
notation_place = content.find(" >>")
if content[notation_place - 1] == content[-4]:
to_translate = content[:notation_place]
if to_translate:
async with message.channel.typing():
embed = await build_translate_embed(to_translate)
embed.set_footer(text=f"JP to EN Translation | Requested by {message.author}",
icon_url=self.user.avatar_url)
await message.channel.send(embed=embed)
else:
try:
await self.process_commands(message)
except discord.ext.commands.errors.CommandNotFound:
embed = self.basic_embed(False, "Command **{}** not found!".format(
message.content.split(" ")[0].replace("!", "")))
embed.set_footer(text='!help to see a list of all available commands.')
cmd_not_found_msg = await message.channel.send(embed=embed)
await cmd_not_found_msg.delete(delay=5)
async def on_member_join(self, member):
# Adding Member Role
self.send_log("User Join", f"{member}({member.id}) joined.")
await member.add_roles(self.roles.member, self.roles.spacer_pings, self.roles.spacer_special)
# server-logs message
embed = discord.Embed(
title=":door: **{}#{}** joined the server.".format(member.name, member.discriminator),
description="** **",
color=0x99cc99,
timestamp=member.joined_at
)
embed.set_thumbnail(url=member.avatar_url)
embed.add_field(name="Account created at:", value=str(member.created_at) + " ({} ago)".format(
await time_diff(datetime.utcnow(), member.created_at)), inline=False)
embed.add_field(name="User ID:", value=f"{member.mention} {member.id}", inline=False)
await self.channels.logs.send(embed=embed)
async def on_member_remove(self, member):
# server-logs message
self.send_log("User Leave", f"{member}({member.id}) left.")
embed = discord.Embed(
title=":no_entry_sign: **{}#{}** left the server after **{}**.".format(
member.name,
member.discriminator,
strfdelta(datetime.utcnow() - member.joined_at)
),
description="** **",
color=0xcc9999,
timestamp=datetime.utcnow()
)
embed.set_thumbnail(url=member.avatar_url)
embed.add_field(name="User ID:", value=f"{member.mention} {member.id}", inline=False)
await self.channels.logs.send(embed=embed)
async def on_message_edit(self, before, after):
member = before.author
if after.content and before.content and not after.content == before.content:
self.send_log(
"Msg Edit",
f"{member}({member.id}) edited message in #{before.channel.name}:\n"
f" {before.content}\n"
f" -> {after.content}"
)
embed = discord.Embed(
title=":information_source: **{}#{}** Edited message:".format(member.name, member.discriminator),
description="** **",
color=0x999999,
timestamp=after.created_at
)
embed.set_thumbnail(url=member.avatar_url)
embed.add_field(name="Before:", value=before.content, inline=False)
embed.add_field(name="After:", value=after.content, inline=False)
embed.add_field(name="In:", value=f'<#{before.channel.id}>', inline=False)
embed.add_field(name="User ID:", value=f"{after.author.mention} {after.author.id}", inline=False)
await self.channels.logs.send(embed=embed)
async def on_message_delete(self, message):
member = message.author
self.send_log(
"Msg Delete",
f"[Msg Del ] {member}({member.id}) deleted message in #{message.channel.name}:\n"
f" - {message.content}")
embed = discord.Embed(
title=":information_source: **{}#{}** Deleted message:".format(member.name, member.discriminator),
description="** **",
color=0x999999,
timestamp=message.created_at
)
embed.set_thumbnail(url=member.avatar_url)
embed.add_field(name="Content:", value=message.content, inline=False)
embed.add_field(name="In:", value=f'<#{message.channel.id}>', inline=False)
embed.add_field(name="User ID:", value=f"{member.mention} {message.author.id}", inline=False)
await self.channels.logs.send(embed=embed)
async def on_reaction_add(self, reaction, user):
if not user.bot:
if reaction.emoji == "📌":
self.send_log("Pinboard", "Pin reaction found")
has_pin = False
message = reaction.message
for reaction in message.reactions:
if reaction.emoji == "📌":
if reaction.count > 1:
has_pin = True
break
if not has_pin:
await self.pin_loop(message)
elif reaction.emoji == "📰":
if reaction.message.channel == self.channels.roles:
await user.add_roles(self.roles.news_server)
self.send_log("Role Rxn +", "Added Server News to {} ({})".format(user, user.id))
elif reaction.emoji == "🇯🇵":
if reaction.message.channel == self.channels.roles:
await user.add_roles(self.roles.news_anime)
self.send_log("Role Rxn +", "Added Anime News to {} ({})".format(user, user.id))
elif reaction.emoji == "🇦":
if reaction.message.channel == self.channels.roles:
await user.add_roles(self.roles.disc_anime)
self.send_log("Role Rxn +", "Added Anime Disc to {} ({})".format(user, user.id))
elif reaction.emoji == "🇲":
if reaction.message.channel == self.channels.roles:
await user.add_roles(self.roles.disc_manga)
self.send_log("Role Rxn +", "Added Manga Disc to {} ({})".format(user, user.id))
async def on_reaction_remove(self, reaction, user):
if not user.bot:
if reaction.emoji == "📰":
if reaction.message.channel == self.channels.roles:
await user.remove_roles(self.roles.news_server)
self.send_log("Role Rxn -", "Removed Server News from {} ({})".format(user, user.id))
elif reaction.emoji == "🇯🇵":
if reaction.message.channel == self.channels.roles:
await user.remove_roles(self.roles.news_anime)
self.send_log("Role Rxn -", "Removed Anime News from {} ({})".format(user, user.id))
elif reaction.emoji == "🇦":
if reaction.message.channel == self.channels.roles:
await user.remove_roles(self.roles.disc_anime)
self.send_log("Role Rxn -", "Removed Anime Disc from {} ({})".format(user, user.id))
elif reaction.emoji == "🇲":
if reaction.message.channel == self.channels.roles:
await user.remove_roles(self.roles.disc_manga)
self.send_log("Role Rxn -", "Removed Manga Disc from {} ({})".format(user, user.id))
# ======================== #
# #
# ######### UTIL ######### #
# #
# ======================== #
async def pin_loop(self, message):
msg_channel = message.channel
pin_count = 0
pinned = False
self.send_log("Pinboard", "Pin tracking on message \"{}\"".format(message.content))
embed = discord.Embed(
title=":pushpin: Pinboard vote **Initiated**.",
description="**{}**/**{}**".format(1, self.pin_threshold),
timestamp=message.created_at,
color=0xbd3d45
)
embed_msg = await msg_channel.send(embed=embed)
if not message.content:
content = "*`<No content>`*"
else:
content = message.content
for i in range(0, 600):
await asyncio.sleep(0.05)
message = await msg_channel.fetch_message(id=message.id)
message_reactions = message.reactions
for reaction in message_reactions:
if reaction.emoji == "📌":
pc_now = pin_count
pin_count = reaction.count
if pc_now != pin_count:
self.send_log("Pinboard", "Another pin added on message \"{}\", now {}".format(
content, pin_count))
if pin_count >= self.pin_threshold:
embed = discord.Embed(
title=":pushpin: Pinboard vote **succeeded**!",
description="**{}**/**{}**".format(pin_count, self.pin_threshold),
timestamp=message.created_at,
color=self.color_good
)
await embed_msg.edit(embed=embed)
pinned = True
break
else:
embed = discord.Embed(
title=":pushpin: Pinboard vote **Pending**:",
description="**{}**/**{}**".format(pin_count, self.pin_threshold),
timestamp=message.created_at,
color=0xffa749
)
await embed_msg.edit(embed=embed)
else:
try:
await embed_msg.delete()
break
except Exception as error:
self.send_log("Pinboard e", str(error))
continue
if pinned:
break
else:
pinned = False
if not pinned:
embed = discord.Embed(
title=":pushpin: Pinboard vote **Failed**!",
description="**{}**/**{}**".format(pin_count, self.pin_threshold),
timestamp=message.created_at,
color=0xbd3d45
)
await embed_msg.edit(embed=embed)
else:
self.send_log("Pinboard +", "({}) {}#{}: {} [{}]".format(
pin_count, message.author.name, message.author.discriminator, content, message.created_at))
has_embed = False
embed_url = None
if message.embeds:
has_embed = True
for i in message.embeds:
i = i.to_dict()
if i["type"] != "image":
has_embed = False
else:
embed_url = i["url"]
has_embed = True
break
elif message.attachments:
has_embed = True
for i in message.attachments:
embed_url = i.url
break
embed_title = "**Pinned message:**".format(message.channel.name)
embed_desc = ":pushpin: **in** <#{}>:\n─────────────────\n<@{}>:".format(
message.channel.id, message.author.id)
embed = discord.Embed(
title=embed_title,
description=embed_desc,
timestamp=message.created_at,
color=0xbd3d45
)
embed.set_thumbnail(url=message.author.avatar_url)
if has_embed:
embed.set_image(url=embed_url)
content = content.replace(embed_url, "*`<Embedded URL>`*")
embed.add_field(name=content, value="** **")
pin_msg = await self.channels.pins.send(content=f"**Jump to:** <{message.jump_url}>", embed=embed)
if pin_msg:
pass
#
# for i in range(0, 1800):
# await asyncio.sleep(1)
# msg_channel = message.channel
# message = await msg_channel.fetch_message(id=message.id)
# message_reactions = message.reactions
# for reaction in message_reactions:
# if reaction.emoji == "📌":
# pc_now = pin_count
# pin_count = reaction.count
# if pc_now != pin_count:
# self.send_log("Pinboard", "Another pin added on message \"{}\", now {}".format(
# content, pin_count))
#
# embed_desc = ":pushpin: **x {} ** in <#{}>:\n─────────────────\n<@{}>:".format(
# pin_count,
# message.channel.id,
# message.author.id
# )
# embed = discord.Embed(
# title=embed_title,
# description=embed_desc,
# timestamp=message.created_at,
# color=0xbd3d45
# )
# embed.add_field(name=content, value="** **")
# embed.set_thumbnail(url=message.author.avatar_url)
# if has_embed:
# embed.set_image(url=embed_url)
# await pin_msg.edit(embed=embed)
async def magic8(self, msg, question):
replies = ["Definitely yes.",
"Perhaps.",
"Maybe yes?",
"Probably not.",
"That's a no.",
"Fuck no!",
"Ask again?",
"Not now.",
"In a week's time.",
"No doubt",
"I cannot tell you now.",
"Reply hazy... Try again later.",
"It's better you don't ask.",
"You even need to ask that?",
"If that's what you want.",
"I have no idea.",
"YES! YES! YES!",
"Please stop asking me."]
random_choice = random.choice(replies)
title = ":8ball: **{}#{} summoned the 8-ball:**".format(msg.author.name, msg.author.discriminator)
question = question.replace("?", "")
description = "─────────────────\n\nQ: {}?\nA: **{}**".format(question, random_choice)
embed = discord.Embed(
title=title,
description=description,
color=0x6800d1
)
self.send_log("8 Ball", "{} Asked the 8 ball: {}".format(msg.author, | |
<reponame>majacQ/pyEX
# *****************************************************************************
#
# Copyright (c) 2020, the pyEX authors.
#
# This file is part of the pyEX library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
from enum import Enum
from functools import lru_cache
from ..common import _timeseriesWrapper, _expire, _UTC
from ..points import points
from ..timeseries import timeSeries, timeSeriesDF
class RatesPoints(Enum):
"""Rates data points
https://iexcloud.io/docs/api/#treasuries
Attributes:
THIRTY; 30 Year constant maturity rate
TWENTY; 20 Year constant maturity rate
TEN; 10 Year constant maturity rate
FIVE; 5 Year constant maturity rate
TWO; 2 Year constant maturity rate
ONE; 1 Year constant maturity rate
SIXMONTH; 6 Month constant maturity rate
THREEMONTH; 3 Month constant maturity rate
ONEMONTH; 1 Month constant maturity rate
"""
THIRTY = "DGS30"
TWENTY = "DGS20"
TEN = "DGS10"
SEVEN = "DGS7"
FIVE = "DGS5"
THREE = "DGS3"
TWO = "DGS2"
ONE = "DGS1"
SIXMONTH = "DGS6MO"
THREEMONTH = "DGS3MO"
ONEMONTH = "DGS1MO"
@staticmethod
@lru_cache(1)
def options():
"""Return a list of the available rates points options"""
return list(map(lambda c: c.value, RatesPoints))
@_expire(hour=8, tz=_UTC)
def thirtyYear(token="", version="stable"):
"""Rates data points
https://iexcloud.io/docs/api/#treasuries
THIRTY; 30 Year constant maturity rate
"""
return points("DGS30", token=token, version=version)
@_expire(hour=8, tz=_UTC)
def thirtyYearHistory(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="TREASURY",
key="DGS30",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def thirtyYearHistoryDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="TREASURY",
key="DGS30",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def twentyYear(token="", version="stable"):
"""Rates data points
https://iexcloud.io/docs/api/#treasuries
TWENTY; 20 Year constant maturity rate
"""
return points("DGS20", token=token, version=version)
@_expire(hour=8, tz=_UTC)
def twentyYearHistory(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="TREASURY",
key="DGS20",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def twentyYearHistoryDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="TREASURY",
key="DGS20",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def tenYear(token="", version="stable"):
"""Rates data points
https://iexcloud.io/docs/api/#treasuries
TEN; 10 Year constant maturity rate
"""
return points("DGS10", token=token, version=version)
@_expire(hour=8, tz=_UTC)
def tenYearHistory(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="TREASURY",
key="DGS10",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def tenYearHistoryDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="TREASURY",
key="DGS10",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def sevenYear(token="", version="stable"):
"""Rates data points
https://iexcloud.io/docs/api/#treasuries
SEVEN; 7 Year constant maturity rate
"""
return points("DGS7", token=token, version=version)
@_expire(hour=8, tz=_UTC)
def sevenYearHistory(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="TREASURY",
key="DGS7",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def sevenYearHistoryDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="TREASURY",
key="DGS7",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def fiveYear(token="", version="stable"):
"""Rates data points
https://iexcloud.io/docs/api/#treasuries
FIVE; 5 Year constant maturity rate
"""
return points("DGS5", token=token, version=version)
@_expire(hour=8, tz=_UTC)
def fiveYearHistory(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="TREASURY",
key="DGS5",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def fiveYearHistoryDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="TREASURY",
key="DGS5",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def threeYear(token="", version="stable"):
"""Rates data points
https://iexcloud.io/docs/api/#treasuries
THREE; 3 Year constant maturity rate
"""
return points("DGS3", token=token, version=version)
@_expire(hour=8, tz=_UTC)
def threeYearHistory(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="TREASURY",
key="DGS3",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def threeYearHistoryDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="TREASURY",
key="DGS3",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def twoYear(token="", version="stable"):
"""Rates data points
https://iexcloud.io/docs/api/#treasuries
TWO; 2 Year constant maturity rate
"""
return points("DGS2", token=token, version=version)
@_expire(hour=8, tz=_UTC)
def twoYearHistory(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="TREASURY",
key="DGS2",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def twoYearHistoryDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="TREASURY",
key="DGS2",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def oneYear(token="", version="stable"):
"""Rates data points
https://iexcloud.io/docs/api/#treasuries
ONE; 1 Year constant maturity rate
"""
return points("DGS1", token=token, version=version)
@_expire(hour=8, tz=_UTC)
def oneYearHistory(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="TREASURY",
key="DGS1",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def oneYearHistoryDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="TREASURY",
key="DGS1",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def sixMonth(token="", version="stable"):
"""Rates data points
https://iexcloud.io/docs/api/#treasuries
SIXMONTH; 6 Month constant maturity rate
"""
return points("DGS6MO", token=token, version=version)
@_expire(hour=8, tz=_UTC)
def sixMonthHistory(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="TREASURY",
key="DGS6MO",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def sixMonthHistoryDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Rates data
https://iexcloud.io/docs/api/#treasuries
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="TREASURY",
key="DGS6MO",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def threeMonth(token="", version="stable"):
"""Rates data points
https://iexcloud.io/docs/api/#treasuries
THREEMONTH; 3 Month constant maturity rate
"""
return points("DGS3MO", token=token, | |
Mod5;2 Mk-46 Mod5;10 DICASS (85) Sonobuoy;10 LOFAR (85) Sonobuoy;30 DIFAR (85) Sonobuoy;')
unit = SM.GetDefaultUnit()
unit.className = 'Oliver Hazard Perry FFGHM'
unit.unitName = "USS Rentz"
UI = SM.GetUnitInterface('USS Carl Vinson')
leader_track = UI.GetTrackById(UI.GetPlatformId())
lon_deg = 57.296*leader_track.Lon + 0.3405
lat_deg = 57.296*leader_track.Lat + -0.0534
unit.SetPosition(lon_deg, lat_deg, 0.0)
unit.heading = -11.19
unit.speed = 3.0
SM.AddUnitToAlliance(unit, 1)
SM.SetUnitLauncherItem(unit.unitName, 0, '20mm mark 244-0 ELC', 97)
SM.SetUnitLauncherItem(unit.unitName, 1, '76mm HE-MOM', 80)
SM.SetUnitLauncherItem(unit.unitName, 2, 'RIM-66L', 1)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Mk-46 Mod5', 3)
SM.SetUnitLauncherItem(unit.unitName, 4, 'Mk-46 Mod5', 3)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("USS Rentz", 'Fuel', 28080)
SM.AddToUnitMagazine("USS Rentz", 'Mk-46 Mod5', 25)
SM.AddToUnitMagazine("USS Rentz", 'AGM-114 Hellfire', 8)
SM.AddToUnitMagazine("USS Rentz", '120 gallon tank', 2)
SM.AddToUnitMagazine("USS Rentz", 'Chaff-1', 50)
SM.AddToUnitMagazine("USS Rentz", 'Flare-1', 50)
SM.AddToUnitMagazine("USS Rentz", 'DICASS (85) Sonobuoy', 105)
SM.AddToUnitMagazine("USS Rentz", 'LOFAR (85) Sonobuoy', 105)
SM.AddToUnitMagazine("USS Rentz", 'DIFAR (85) Sonobuoy', 315)
SM.AddToUnitMagazine("USS Rentz", 'RIM-66L', 35)
SM.AddToUnitMagazine("USS Rentz", 'RGM-84D Harpoon', 4)
SM.AddToUnitMagazine("USS Rentz", '20mm mark 244-0 ELC', 523)
SM.AddToUnitMagazine("USS Rentz", '76mm HE-MOM', 240)
UI.AddTask('MissileWarning', 3.000000, 3)
UI.AddTask('RefuelAllAircraft', 3.000000, 3)
BB = UI.GetBlackboardInterface()
leader_id = UI.LookupFriendlyId('USS <NAME>')
UI.SetFormationLeader(leader_id)
UI.SetFormationMode(2)
UI.SetFormationPosition(20.002, 4.934, 2.100, 0.322)
UI.SetFormationAltitudeOffset(0.0)
UI.SetFormationUseNorthBearing(0)
SM.AddUnitToFlightDeck('USS Rentz', 'SH-60B', 'Perry FFG Seahawk 101', 1)
SM.SetFlightDeckUnitLoadout('USS Rentz', 'Perry FFG Seahawk 101', '1 Mk-46 Mod5;1 120 gallon tank;1 Mk-46 Mod5;25 Flare-1;25 Chaff-1;5 DICASS (85) Sonobuoy;5 LOFAR (85) Sonobuoy;15 DIFAR (85) Sonobuoy;')
unit = SM.GetDefaultUnit()
unit.className = 'Ticonderoga CG Baseline 3'
unit.unitName = "USS Princeton"
UI = SM.GetUnitInterface('USS Carl Vinson')
leader_track = UI.GetTrackById(UI.GetPlatformId())
lon_deg = 57.296*leader_track.Lon + 0.1650
lat_deg = 57.296*leader_track.Lat + 0.0177
unit.SetPosition(lon_deg, lat_deg, 0.0)
unit.heading = -11.19
unit.speed = 3.0
SM.AddUnitToAlliance(unit, 1)
SM.SetUnitLauncherItem(unit.unitName, 0, 'RIM-66M', 1)
SM.SetUnitLauncherItem(unit.unitName, 1, 'RIM-66M', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'RIM-66M', 1)
SM.SetUnitLauncherItem(unit.unitName, 3, '20mm mark 244-0 ELC', 97)
SM.SetUnitLauncherItem(unit.unitName, 4, '20mm mark 244-0 ELC', 97)
SM.SetUnitLauncherItem(unit.unitName, 5, 'RGM-84D Harpoon', 4)
SM.SetUnitLauncherItem(unit.unitName, 6, 'RGM-84D Harpoon', 4)
SM.SetUnitLauncherItem(unit.unitName, 7, '127mm mk 80 HE-PD mk 67', 20)
SM.SetUnitLauncherItem(unit.unitName, 8, '127mm mk 80 HE-PD mk 67', 20)
SM.SetUnitLauncherItem(unit.unitName, 9, 'Mk-46 Mod5', 3)
SM.SetUnitLauncherItem(unit.unitName, 10, 'Mk-46 Mod5', 3)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("USS Princeton", 'Fuel', 56161)
SM.AddToUnitMagazine("USS Princeton", 'Mk-46 Mod5', 46)
SM.AddToUnitMagazine("USS Princeton", 'AGM-114 Hellfire', 16)
SM.AddToUnitMagazine("USS Princeton", '120 gallon tank', 4)
SM.AddToUnitMagazine("USS Princeton", 'Chaff-1', 75)
SM.AddToUnitMagazine("USS Princeton", 'Flare-1', 75)
SM.AddToUnitMagazine("USS Princeton", 'LOFAR (85) Sonobuoy', 203)
SM.AddToUnitMagazine("USS Princeton", 'DICASS (85) Sonobuoy', 203)
SM.AddToUnitMagazine("USS Princeton", 'DIFAR (85) Sonobuoy', 608)
SM.AddToUnitMagazine("USS Princeton", 'RIM-66M', 119)
SM.AddToUnitMagazine("USS Princeton", '20mm mark 244-0 ELC', 1046)
SM.AddToUnitMagazine("USS Princeton", '127mm mk 80 HE-PD mk 67', 1200)
UI.AddTask('MissileWarning', 3.000000, 3)
UI.AddTask('RefuelAllAircraft', 3.000000, 3)
BB = UI.GetBlackboardInterface()
leader_id = UI.LookupFriendlyId('USS <NAME>')
UI.SetFormationLeader(leader_id)
UI.SetFormationMode(2)
UI.SetFormationPosition(9.511, 1.481, 1.563, 0.290)
UI.SetFormationAltitudeOffset(0.0)
UI.SetFormationUseNorthBearing(0)
SM.AddUnitToFlightDeck('USS Princeton', 'SH-60B', 'Tico Seahawk 1', 1)
SM.SetFlightDeckUnitLoadout('USS Princeton', 'Tico Seahawk 1', '1 Mk-46 Mod5;1 120 gallon tank;1 Mk-46 Mod5;25 Flare-1;25 Chaff-1;5 DICASS (85) Sonobuoy;5 LOFAR (85) Sonobuoy;15 DIFAR (85) Sonobuoy;')
SM.AddUnitToFlightDeck('USS Princeton', 'SH-60B', 'Tico Seahawk 2', 1)
SM.SetFlightDeckUnitLoadout('USS Princeton', 'Tico Seahawk 2', '1 Mk-46 Mod5;1 120 gallon tank;1 Mk-46 Mod5;25 Flare-1;25 Chaff-1;5 DICASS (85) Sonobuoy;5 LOFAR (85) Sonobuoy;15 DIFAR (85) Sonobuoy;')
unit = SM.GetDefaultUnit()
unit.className = 'Oliver Hazard Perry FFGHM'
unit.unitName = "USS McInerney"
UI = SM.GetUnitInterface('USS <NAME>')
leader_track = UI.GetTrackById(UI.GetPlatformId())
lon_deg = 57.296*leader_track.Lon + -0.0756
lat_deg = 57.296*leader_track.Lat + 0.1754
unit.SetPosition(lon_deg, lat_deg, 0.0)
unit.heading = -11.19
unit.speed = 3.0
SM.AddUnitToAlliance(unit, 1)
SM.SetUnitLauncherItem(unit.unitName, 0, '20mm mark 244-0 ELC', 97)
SM.SetUnitLauncherItem(unit.unitName, 1, '76mm HE-MOM', 80)
SM.SetUnitLauncherItem(unit.unitName, 2, 'RIM-66L', 1)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Mk-46 Mod5', 3)
SM.SetUnitLauncherItem(unit.unitName, 4, 'Mk-46 Mod5', 3)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("USS McInerney", 'Fuel', 28080)
SM.AddToUnitMagazine("USS McInerney", 'Mk-46 Mod5', 25)
SM.AddToUnitMagazine("USS McInerney", 'AGM-114 Hellfire', 8)
SM.AddToUnitMagazine("USS McInerney", '120 gallon tank', 2)
SM.AddToUnitMagazine("USS McInerney", 'Chaff-1', 50)
SM.AddToUnitMagazine("USS McInerney", 'Flare-1', 50)
SM.AddToUnitMagazine("USS McInerney", 'DICASS (85) Sonobuoy', 105)
SM.AddToUnitMagazine("USS McInerney", 'LOFAR (85) Sonobuoy', 105)
SM.AddToUnitMagazine("USS McInerney", 'DIFAR (85) Sonobuoy', 315)
SM.AddToUnitMagazine("USS McInerney", 'RIM-66L', 35)
SM.AddToUnitMagazine("USS McInerney", 'RGM-84D Harpoon', 4)
SM.AddToUnitMagazine("USS McInerney", '20mm mark 244-0 ELC', 523)
SM.AddToUnitMagazine("USS McInerney", '76mm HE-MOM', 240)
UI.AddTask('MissileWarning', 3.000000, 3)
UI.AddTask('RefuelAllAircraft', 3.000000, 3)
BB = UI.GetBlackboardInterface()
leader_id = UI.LookupFriendlyId('USS <NAME>')
UI.SetFormationLeader(leader_id)
UI.SetFormationMode(2)
UI.SetFormationPosition(19.986, 4.455, -0.002, 0.356)
UI.SetFormationAltitudeOffset(0.0)
UI.SetFormationUseNorthBearing(0)
SM.AddUnitToFlightDeck('USS McInerney', 'SH-60B', 'Perry FFG Seahawk 201', 1)
SM.SetFlightDeckUnitLoadout('USS McInerney', 'Perry FFG Seahawk 201', '1 Mk-46 Mod5;1 120 gallon tank;1 Mk-46 Mod5;25 Flare-1;25 Chaff-1;5 DICASS (85) Sonobuoy;5 LOFAR (85) Sonobuoy;15 DIFAR (85) Sonobuoy;')
unit = SM.GetDefaultUnit()
unit.className = 'Ticonderoga CG Baseline 2'
unit.unitName = "USS Antietam"
UI = SM.GetUnitInterface('USS <NAME>')
leader_track = UI.GetTrackById(UI.GetPlatformId())
lon_deg = 57.296*leader_track.Lon + -0.1639
lat_deg = 57.296*leader_track.Lat + -0.0172
unit.SetPosition(lon_deg, lat_deg, 0.0)
unit.heading = -11.19
unit.speed = 3.0
SM.AddUnitToAlliance(unit, 1)
SM.SetUnitLauncherItem(unit.unitName, 0, 'RIM-66M', 1)
SM.SetUnitLauncherItem(unit.unitName, 1, 'RIM-66M', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'RIM-66M', 1)
SM.SetUnitLauncherItem(unit.unitName, 3, '20mm Mark 149-4', 90)
SM.SetUnitLauncherItem(unit.unitName, 4, '20mm Mark 149-4', 90)
SM.SetUnitLauncherItem(unit.unitName, 5, 'RGM-84D Harpoon', 4)
SM.SetUnitLauncherItem(unit.unitName, 6, 'RGM-84D Harpoon', 4)
SM.SetUnitLauncherItem(unit.unitName, 7, '127mm mk 80 HE-PD mk 67', 20)
SM.SetUnitLauncherItem(unit.unitName, 8, '127mm mk 80 HE-PD mk 67', 20)
SM.SetUnitLauncherItem(unit.unitName, 9, 'Mk-46 Mod5', 3)
SM.SetUnitLauncherItem(unit.unitName, 10, 'Mk-46 Mod5', 3)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("USS Antietam", 'Fuel', 56161)
SM.AddToUnitMagazine("USS Antietam", 'Mk-46 Mod5', 46)
SM.AddToUnitMagazine("USS Antietam", 'AGM-114 Hellfire', 16)
SM.AddToUnitMagazine("USS Antietam", '120 gallon tank', 4)
SM.AddToUnitMagazine("USS Antietam", 'Chaff-1', 75)
SM.AddToUnitMagazine("USS Antietam", 'Flare-1', 75)
SM.AddToUnitMagazine("USS Antietam", 'LOFAR (85) Sonobuoy', 203)
SM.AddToUnitMagazine("USS Antietam", 'DICASS (85) Sonobuoy', 203)
SM.AddToUnitMagazine("USS Antietam", 'DIFAR (85) Sonobuoy', 608)
SM.AddToUnitMagazine("USS Antietam", 'RIM-66M', 119)
SM.AddToUnitMagazine("USS Antietam", '20mm Mark 149-4', 1046)
SM.AddToUnitMagazine("USS Antietam", '127mm mk 80 HE-PD mk 67', 1200)
UI.AddTask('MissileWarning', 3.000000, 3)
UI.AddTask('RefuelAllAircraft', 3.000000, 3)
BB = UI.GetBlackboardInterface()
leader_id = UI.LookupFriendlyId('USS <NAME>')
UI.SetFormationLeader(leader_id)
UI.SetFormationMode(2)
UI.SetFormationPosition(9.460, 1.713, -1.564, 0.285)
UI.SetFormationAltitudeOffset(0.0)
UI.SetFormationUseNorthBearing(0)
SM.AddUnitToFlightDeck('USS Antietam', 'SH-60B', 'Tico Seahawk 101', 1)
SM.SetFlightDeckUnitLoadout('USS Antietam', 'Tico Seahawk 101', '1 Mk-46 Mod5;1 120 gallon tank;1 Mk-46 Mod5;25 Flare-1;25 Chaff-1;5 DICASS (85) Sonobuoy;5 LOFAR (85) Sonobuoy;15 DIFAR (85) Sonobuoy;')
SM.AddUnitToFlightDeck('USS Antietam', 'SH-60B', 'Tico Seahawk 102', 1)
SM.SetFlightDeckUnitLoadout('USS Antietam', 'Tico Seahawk 102', '1 Mk-46 Mod5;1 120 gallon tank;1 Mk-46 Mod5;25 Flare-1;25 Chaff-1;5 DICASS (85) Sonobuoy;5 LOFAR (85) Sonobuoy;15 DIFAR (85) Sonobuoy;')
unit = SM.GetDefaultUnit()
unit.className = 'Sacramento AOEHM'
unit.unitName = "USS Sacramento"
UI = SM.GetUnitInterface('USS Carl Vinson')
leader_track = UI.GetTrackById(UI.GetPlatformId())
lon_deg = 57.296*leader_track.Lon + 0.0432
lat_deg = 57.296*leader_track.Lat + 0.0072
unit.SetPosition(lon_deg, lat_deg, 0.0)
unit.heading = 348.81
unit.speed = 3.0
SM.AddUnitToAlliance(unit, 1)
SM.SetUnitLauncherItem(unit.unitName, 0, '20mm mark 244-0 ELC', 97)
SM.SetUnitLauncherItem(unit.unitName, 1, '20mm mark 244-0 ELC', 97)
SM.SetUnitLauncherItem(unit.unitName, 2, 'RIM-7P(v1)', 8)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("USS Sacramento", '20mm mark 244-0 ELC', 1046)
UI.AddTask('MissileWarning', 3.000000, 3)
UI.AddTask('RefuelAllAircraft', 3.000000, 3)
BB = UI.GetBlackboardInterface()
leader_id = UI.LookupFriendlyId('USS <NAME>')
UI.SetFormationLeader(leader_id)
UI.SetFormationMode(2)
UI.SetFormationPosition(2.900, 3.200, 1.433, 0.385)
UI.SetFormationAltitudeOffset(0.0)
UI.SetFormationUseNorthBearing(0)
unit = SM.GetDefaultUnit()
unit.className = 'Oliver Hazard Perry FFGHM'
unit.unitName = "USS Duncan"
UI = SM.GetUnitInterface('USS Carl Vinson')
leader_track = UI.GetTrackById(UI.GetPlatformId())
lon_deg = 57.296*leader_track.Lon + -0.2653
lat_deg = 57.296*leader_track.Lat + -0.1191
unit.SetPosition(lon_deg, lat_deg, 0.0)
unit.heading = -11.19
unit.speed = 3.0
SM.AddUnitToAlliance(unit, 1)
SM.SetUnitLauncherItem(unit.unitName, 0, '20mm mark 244-0 ELC', 97)
SM.SetUnitLauncherItem(unit.unitName, 1, '76mm HE-MOM', 80)
SM.SetUnitLauncherItem(unit.unitName, 2, 'RIM-66L', 1)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Mk-46 Mod5', 3)
SM.SetUnitLauncherItem(unit.unitName, 4, 'Mk-46 Mod5', 3)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("USS Duncan", 'Fuel', 28080)
SM.AddToUnitMagazine("USS Duncan", 'Mk-46 Mod5', 25)
SM.AddToUnitMagazine("USS Duncan", 'AGM-114 Hellfire', 8)
SM.AddToUnitMagazine("USS Duncan", '120 gallon tank', 2)
SM.AddToUnitMagazine("USS Duncan", 'Chaff-1', 50)
SM.AddToUnitMagazine("USS Duncan", 'Flare-1', 50)
SM.AddToUnitMagazine("USS Duncan", 'DICASS (85) Sonobuoy', 105)
SM.AddToUnitMagazine("USS Duncan", 'LOFAR (85) Sonobuoy', 105)
SM.AddToUnitMagazine("USS Duncan", 'DIFAR (85) Sonobuoy', 315)
SM.AddToUnitMagazine("USS Duncan", 'RIM-66L', 35)
SM.AddToUnitMagazine("USS Duncan", 'RGM-84D Harpoon', 4)
SM.AddToUnitMagazine("USS Duncan", '20mm mark 244-0 ELC', 523)
SM.AddToUnitMagazine("USS Duncan", '76mm HE-MOM', 240)
UI.AddTask('MissileWarning', 3.000000, 3)
UI.AddTask('RefuelAllAircraft', 3.000000, 3)
BB = UI.GetBlackboardInterface()
leader_id = UI.LookupFriendlyId('USS <NAME>')
UI.SetFormationLeader(leader_id)
UI.SetFormationMode(2)
UI.SetFormationPosition(20.026, 3.861, -2.094, 0.341)
UI.SetFormationAltitudeOffset(0.0)
UI.SetFormationUseNorthBearing(0)
SM.AddUnitToFlightDeck('USS Duncan', 'SH-60B', 'Perry FFG Seahawk 1', 1)
SM.SetFlightDeckUnitLoadout('USS Duncan', 'Perry FFG Seahawk 1', '1 Mk-46 Mod5;1 120 gallon tank;1 Mk-46 Mod5;25 Flare-1;25 Chaff-1;5 DICASS (85) Sonobuoy;5 LOFAR (85) Sonobuoy;15 DIFAR (85) Sonobuoy;')
unit = SM.GetDefaultUnit()
unit.className = 'Spruance DDG ABL'
unit.unitName = "USS Merrill"
UI = SM.GetUnitInterface('USS Carl Vinson')
leader_track = UI.GetTrackById(UI.GetPlatformId())
lon_deg = 57.296*leader_track.Lon + 0.0244
lat_deg = 57.296*leader_track.Lat + -0.0488
unit.SetPosition(lon_deg, lat_deg, 0.0)
unit.heading = -11.19
unit.speed = 3.0
SM.AddUnitToAlliance(unit, 1)
SM.SetUnitLauncherItem(unit.unitName, 0, 'RIM-7P(v1)', 8)
SM.SetUnitLauncherItem(unit.unitName, 1, '127mm mk 80 HE-PD mk 67', 20)
SM.SetUnitLauncherItem(unit.unitName, 2, '127mm mk 80 HE-PD mk 67', 20)
SM.SetUnitLauncherItem(unit.unitName, 3, '20mm mark 244-0 ELC', 97)
SM.SetUnitLauncherItem(unit.unitName, 4, '20mm mark 244-0 ELC', 97)
SM.SetUnitLauncherItem(unit.unitName, | |
argument,
# 2. Specified by a subclass' _default_type attribute, or
# 3. Hinted for a subclass' components attribute.
cls = cls or self._Component
component = cls(id=id, **kwargs)
if "order" not in kwargs:
# For automatically created dimensions, give a serial value to the
# order property
try:
component.order = self.auto_order
self.auto_order += 1
except ValueError:
pass
self.components.append(component)
return component
# Properties of components
def __getitem__(self, key) -> CT:
"""Convenience access to components."""
return self.components[key]
def __len__(self):
return len(self.components)
def __iter__(self):
return iter(self.components)
def __repr__(self):
return "<{}: {}>".format(
self.__class__.__name__, "; ".join(map(repr, self.components))
)
def __eq__(self, other):
"""ID equal and same components occur in same order."""
return super().__eq__(other) and all(
s == o for s, o in zip(self.components, other.components)
)
# Must be reset because __eq__ is defined
def __hash__(self):
return super().__hash__()
def compare(self, other, strict=True):
"""Return :obj:`True` if `self` is the same as `other`.
Two ComponentLists are the same if:
- :meth:`.IdentifiableArtefact.compare` is :obj:`True`, and
- corresponding :attr:`components` compare equal.
Parameters
----------
strict : bool, optional
Passed to :func:`.compare` and :meth:`.IdentifiableArtefact.compare`.
"""
return super().compare(other, strict) and all(
c.compare(other.get(c.id), strict) for c in self.components
)
# §4.3: Codelist
class Code(Item):
"""SDMX-IM Code."""
class Codelist(ItemScheme[Code]):
_Item = Code
# §4.5: Category Scheme
class Category(Item):
"""SDMX-IM Category."""
class CategoryScheme(ItemScheme[Category]):
_Item = Category
class Categorisation(MaintainableArtefact):
#:
category: Optional[Category] = None
#:
artefact: Optional[IdentifiableArtefact] = None
# §4.6: Organisations
class Contact(BaseModel):
"""Organization contact information.
IMF is the only data provider that returns messages with :class:`Contact`
information. These differ from the IM in several ways. This class reflects
these differences:
- 'name' and 'org_unit' are InternationalString, instead of strings.
- 'email' may be a list of e-mail addresses, rather than a single address.
- 'uri' may be a list of URIs, rather than a single URI.
"""
#:
name: InternationalString = InternationalString()
#:
org_unit: InternationalString = InternationalString()
#:
telephone: Optional[str] = None
#:
responsibility: InternationalString = InternationalString()
#:
email: List[str]
#:
uri: List[str]
class Organisation(Item):
#:
contact: List[Contact] = []
class Agency(Organisation):
pass
# DataProvider delayed until after ConstrainableArtefact, below
# Update forward references to 'Agency'
for cls in list(locals().values()):
if isclass(cls) and issubclass(cls, MaintainableArtefact):
cls.update_forward_refs()
class OrganisationScheme:
"""SDMX-IM abstract OrganisationScheme."""
class AgencyScheme(ItemScheme[Agency], OrganisationScheme):
_Item = Agency
# DataProviderScheme delayed until after DataProvider, below
# §10.2: Constraint inheritance
class ConstrainableArtefact(BaseModel):
"""SDMX-IM ConstrainableArtefact."""
class DataProvider(Organisation, ConstrainableArtefact):
"""SDMX-IM DataProvider."""
class DataProviderScheme(ItemScheme[DataProvider], OrganisationScheme):
_Item = DataProvider
# §10.3: Constraints
class ConstraintRole(BaseModel):
#:
role: ConstraintRoleType
class ComponentValue(BaseModel):
#:
value_for: Component
#:
value: str
class DataKey(BaseModel):
#: :obj:`True` if the :attr:`keys` are included in the
#: :class:`.Constraint`; :obj:`False` if they are excluded.
included: bool
#: Mapping from :class:`.Component` to :class:`.ComponentValue` comprising
#: the key.
key_value: Dict[Component, ComponentValue]
class DataKeySet(BaseModel):
#: :obj:`True` if the :attr:`keys` are included in the
#: :class:`.Constraint`; :obj:`False` if they are excluded.
included: bool
#: :class:`DataKeys <.DataKey>` appearing in the set.
keys: List[DataKey]
class Constraint(MaintainableArtefact):
#: :class:`.DataKeySet` included in the Constraint.
data_content_keys: Optional[DataKeySet] = None
# metadata_content_keys: MetadataKeySet = None
# NB the spec gives 1..* for this attribute, but this implementation allows
# only 1
role: ConstraintRole
# NB this is required to prevent “unhashable type: 'dict'” in pydantic
class Config:
validate_assignment = False
class SelectionValue(BaseModel):
"""SDMX-IM SelectionValue."""
class MemberValue(SelectionValue):
#:
value: str
#:
cascade_values: Optional[bool] = None
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if isinstance(other, KeyValue):
return self.value == other.value
else:
return self.value == other
class MemberSelection(BaseModel):
#:
included: bool = True
#:
values_for: Component
#: NB the spec does not say what this feature should be named
values: Set[MemberValue] = set()
def __contains__(self, value):
"""Compare KeyValue to MemberValue."""
return any(mv == value for mv in self.values)
class CubeRegion(BaseModel):
#:
included: bool = True
#:
member: Dict["Dimension", MemberSelection] = {}
def __contains__(self, key):
for ms in self.member.values():
if key[ms.values_for.id] not in ms:
return False
return True
def to_query_string(self, structure):
all_values = []
for dim in structure.dimensions:
if isinstance(dim, TimeDimension):
# TimeDimensions handled by query parameters
continue
ms = self.member.get(dim, None)
values = sorted(mv.value for mv in ms.values) if ms else []
all_values.append("+".join(values))
return ".".join(all_values)
class ContentConstraint(Constraint):
#: :class:`CubeRegions <.CubeRegion>` included in the ContentConstraint.
data_content_region: List[CubeRegion] = []
#:
content: Set[ConstrainableArtefact] = set()
# metadata_content_region: MetadataTargetRegion = None
# NB this is required to prevent RecursionError in pydantic;
# see https://github.com/samuelcolvin/pydantic/issues/524
class Config:
validate_assignment_exclude = "data_content_region"
def __contains__(self, value):
if self.data_content_region:
return any(value in cr for cr in self.data_content_region)
else:
raise NotImplementedError(
"ContentConstraint does not contain a CubeRegion."
)
def to_query_string(self, structure):
cr_count = len(self.data_content_region)
try:
if cr_count > 1:
warn(f"to_query_string() using first of {cr_count} " "CubeRegions.")
return self.data_content_region[0].to_query_string(structure)
except IndexError:
raise RuntimeError("ContentConstraint does not contain a CubeRegion.")
class AttachmentConstraint(Constraint):
#:
attachment: Set[ConstrainableArtefact] = set()
# §5.2: Data Structure Definition
class DimensionComponent(Component):
#:
order: Optional[int] = None
class Dimension(DimensionComponent):
"""SDMX-IM Dimension."""
CubeRegion.update_forward_refs()
class TimeDimension(DimensionComponent):
"""SDMX-IM TimeDimension."""
class MeasureDimension(DimensionComponent):
"""SDMX-IM MeasureDimension."""
class PrimaryMeasure(Component):
"""SDMX-IM PrimaryMeasure."""
class MeasureDescriptor(ComponentList[PrimaryMeasure]):
_Component = PrimaryMeasure
class AttributeRelationship(BaseModel):
pass
class NoSpecifiedRelationship(AttributeRelationship):
pass
class PrimaryMeasureRelationship(AttributeRelationship):
pass
class DimensionRelationship(AttributeRelationship):
#:
dimensions: List[DimensionComponent] = []
#: NB the IM says "0..*" here in a diagram, but the text does not match.
group_key: Optional["GroupDimensionDescriptor"] = None
class GroupRelationship(AttributeRelationship):
# 'Retained for compatibility reasons' in SDMX 2.1; not used by pandaSDMX.
#:
group_key: Optional["GroupDimensionDescriptor"] = None
class DataAttribute(Component):
#:
related_to: Optional[AttributeRelationship] = None
#:
usage_status: Optional[UsageStatus] = None
class ReportingYearStartDay(DataAttribute):
pass
class AttributeDescriptor(ComponentList[DataAttribute]):
_Component = DataAttribute
class Structure(MaintainableArtefact):
#:
grouping: Optional[ComponentList] = None
class StructureUsage(MaintainableArtefact):
#:
structure: Optional[Structure] = None
class DimensionDescriptor(ComponentList[DimensionComponent]):
"""Describes a set of dimensions.
IM: “An ordered set of metadata concepts that, combined, classify a
statistical series, and whose values, when combined (the key) in an
instance such as a data set, uniquely identify a specific observation.”
:attr:`.components` is a :class:`list` (ordered) of :class:`Dimension`,
:class:`MeasureDimension`, and/or :class:`TimeDimension`.
"""
_Component = Dimension
def assign_order(self):
"""Assign the :attr:`.DimensionComponent.order` attribute.
The Dimensions in :attr:`components` are numbered, starting from 1.
"""
for i, component in enumerate(self.components):
component.order = i + 1
def order_key(self, key):
"""Return a key ordered according to the DSD."""
result = key.__class__()
for dim in sorted(self.components, key=attrgetter("order")):
try:
result[dim.id] = key[dim.id]
except KeyError:
continue
return result
@classmethod
def from_key(cls, key):
"""Create a new DimensionDescriptor from a *key*.
For each :class:`KeyValue` in the *key*:
- A new :class:`Dimension` is created.
- A new :class:`Codelist` is created, containing the
:attr:`KeyValue.value`.
Parameters
----------
key : :class:`Key` or :class:`GroupKey` or :class:`SeriesKey`
"""
dd = cls()
for order, (id, kv) in enumerate(key.values.items()):
cl = Codelist(id=id)
cl.append(Code(id=kv.value))
dd.components.append(
Dimension(
id=id,
local_representation=Representation(enumerated=cl),
order=order,
)
)
return dd
class GroupDimensionDescriptor(DimensionDescriptor):
#:
attachment_constraint: Optional[bool] = None
#:
constraint: Optional[AttachmentConstraint] = None
def assign_order(self):
""":meth:`assign_order` has no effect for GroupDimensionDescriptor."""
pass
DimensionRelationship.update_forward_refs()
GroupRelationship.update_forward_refs()
@validate_dictlike("group_dimensions")
class DataStructureDefinition(Structure, ConstrainableArtefact):
"""SDMX-IM DataStructureDefinition (‘DSD’)."""
#: A :class:`AttributeDescriptor` that describes the attributes of the
#: data structure.
attributes: AttributeDescriptor = AttributeDescriptor()
#: A :class:`DimensionDescriptor` that describes the dimensions of the
#: data structure.
dimensions: DimensionDescriptor = DimensionDescriptor()
#: A :class:`.MeasureDescriptor`.
measures: MeasureDescriptor = MeasureDescriptor()
#: Mapping from :attr:`.GroupDimensionDescriptor.id` to
#: :class:`.GroupDimensionDescriptor`.
group_dimensions: DictLike[str, GroupDimensionDescriptor] = DictLike()
# Convenience methods
def make_constraint(self, key):
"""Return a constraint for *key*.
*key* is a :class:`dict` wherein:
- keys are :class:`str` ids of Dimensions appearing in this
DSD's :attr:`dimensions`, and
- values are '+'-delimited :class:`str` containing allowable values,
_or_ iterables of :class:`str`, each an allowable value.
For example::
cc2 = dsd.make_constraint({'foo': 'bar+baz', 'qux': 'q1+q2+q3'})
``cc2`` includes any key where the 'foo' dimension is 'bar' *or* 'baz',
*and* the 'qux' dimension is one of 'q1', 'q2', or 'q3'.
Returns
-------
ContentConstraint
A constraint with one :class:`CubeRegion` in its
:attr:`data_content_region <ContentConstraint.data_content_region>`
, including only the values appearing in *keys*.
Raises
------
ValueError
if *key* contains a dimension IDs not appearing in
:attr:`dimensions`.
"""
# Make a copy to avoid pop()'ing off the object in the calling scope
key = key.copy()
cr = CubeRegion()
for dim in self.dimensions:
mvs = set()
try:
values = key.pop(dim.id)
except KeyError:
continue
values = values.split("+") if isinstance(values, str) else values
for value in values:
# TODO validate values
mvs.add(MemberValue(value=value))
cr.member[dim] = MemberSelection(included=True, values_for=dim, values=mvs)
if len(key):
raise ValueError(
"Dimensions {!r} not in {!r}".format(list(key.keys()), self.dimensions)
)
return ContentConstraint(
data_content_region=[cr],
role=ConstraintRole(role=ConstraintRoleType.allowable),
)
@classmethod
def from_keys(cls, keys):
"""Return a new DSD given some *keys*.
The DSD's :attr:`dimensions` refers to a set of new :class:`Concepts
| |
_Pg2ac = np.minimum(0, _Pg)
# Energy sums in MWH
# Electrical demand including the energy consumption of the other system components
_E['El'] = np.sum(np.abs(_Plt)) * _dt / 3.6e9
# DC output of the PV generator including curtailment
_E['Epv'] = np.sum(np.abs(_Ppv)) * _dt / 3.6e9
# DC input of the battery (charged)
_E['Ebatin'] = np.sum(np.abs(_Pbatin)) * _dt / 3.6e9
# DC output of the battery (discharged)
_E['Ebatout'] = np.sum(np.abs(_Pbatout)) * _dt / 3.6e9
# Grid feed-in
_E['Eac2g'] = np.sum(np.abs(_Pac2g)) * _dt / 3.6e9
# Grid demand
_E['Eg2ac'] = np.sum(np.abs(_Pg2ac)) * _dt / 3.6e9
# Load supply by the grid
_E['Eg2l'] = np.sum(np.abs(_Pg2l)) * _dt / 3.6e9
# Demand of the other system components
_E['Eperi'] = np.sum(np.abs(_Pperi)) * _dt / 3.6e9
# Curtailed PV energy
_E['Ect'] = np.sum(np.abs(_Pct)) * _dt / 3.6e9
if _parameter['Top'] == 'AC': # AC-coupled systems
# AC output of the PV system including curtailment
_E['Epvs'] = np.sum(np.abs(_Ppvs)) * _dt / 3.6e9
# AC input of the battery system
_E['Eac2bs'] = np.sum(np.abs(_Pac2bs)) * _dt / 3.6e9
# AC output of the battery system
_E['Ebs2ac'] = np.sum(np.abs(_Pbs2ac)) * _dt / 3.6e9
# Direct use of PV energy
_E['Epvs2l'] = np.sum(np.abs(_Ppvs2l)) * _dt / 3.6e9
# PV charging
_E['Epvs2bs'] = np.sum(np.abs(_Ppvs2bs)) * _dt / 3.6e9
# Grid charging
_E['Eg2bs'] = np.sum(np.abs(_Pg2bs)) * _dt / 3.6e9
# PV feed-in
_E['Epvs2g'] = np.sum(np.abs(_Ppvs2g)) * _dt / 3.6e9
# Load supply by the battery system
_E['Ebs2l'] = np.sum(np.abs(_Pbs2l)) * _dt / 3.6e9
# Battery feed-in
_E['Ebs2g'] = np.sum(np.abs(_Pbs2g)) * _dt / 3.6e9
elif _parameter['Top'] == 'DC' or _parameter['Top'] == 'PV': # DC- and PV-coupled systems
# Grid demand of the PV-battery system
_E['Eg2pvbs'] = np.sum(np.abs(_Pg2pvbs)) * _dt / 3.6e9
# AC input of the PV-battery system
_E['Eac2pvbs'] = np.sum(np.abs(_Pac2pvbs)) * _dt / 3.6e9
# AC output of the PV-battery system
_E['Epvbs2ac'] = np.sum(np.abs(_Ppvbs2ac)) * _dt / 3.6e9
# Load supply by the PV-battery system
_E['Epvbs2l'] = np.sum(np.abs(_Ppvbs2l)) * _dt / 3.6e9
return _E
def bat_res_mod_ideal(_parameter, _Pl, _Ppv, _Pbat, _dt, *args):
E = dict() # Dictionary to store energy sums
if _parameter['Top'] == 'AC':
Ppvs = args[0] # AC output power of the PV system
Pbs = args[1] # AC power of the battery system
Pperi = args[2] # Additional power consumption of the other system components
elif _parameter['Top'] == 'DC':
Ppv2ac = args[0]
Ppv2bat_in = args[1]
Ppvbs = args[2]
Pperi = args[3]
Ppv2ac_in = _Ppv - Ppv2bat_in
# Additional power consumption of the other system components
Pperi = np.zeros_like(_Ppv)
# Total load including the power consumption of the other system components
Plt = _Pl
# DC input power of the battery (charged)
Pbatin = np.maximum(0, _Pbat)
# DC output power of the battery (discharged)
Pbatout = np.minimum(0, _Pbat)
if _parameter['Top'] == 'AC':
# Grid power
Pg = Ppvs - _Pl - Pbs
# Residual power
Pr = Ppvs - Plt
# AC input power of the battery system
Pac2bs = np.maximum(0, Pbs)
# AC output power of the battery system
Pbs2ac = np.minimum(0, Pbs)
# Negative residual power (residual load demand)
Prn = np.minimum(0, Pr)
# Positive residual power (surplus PV power)
Prp = np.maximum(0, Pr)
# Direct use of PV power by the load
Ppvs2l = np.minimum(Ppvs, Plt)
# PV charging power
Ppvs2bs=np.minimum(Prp, Pac2bs)
# Grid charging power
Pg2bs=np.maximum(Pac2bs - Prp, 0)
# Grid supply power of the load
Pg2l=np.minimum(Prn - Pbs2ac, 0)
# Battery supply power of the load
Pbs2l=np.maximum(Prn, Pbs2ac)
# Battery feed-in power
Pbs2g=np.minimum(Pbs2ac - Prn, 0)
# PV feed-in power
Ppvs2g=np.maximum(Prp - Pac2bs, 0)
elif _parameter['Top'] == 'DC':
# Grid power
Pg = Ppvbs - _Pl
# Grid power demand of the PV-battery system
Pg2pvbs = np.minimum(0, Ppvbs)
# AC input power of the PV-battery system
Pac2pvbs = Pg2pvbs
# AC output power of the PV-battery system
Ppvbs2ac = np.maximum(0, Ppvbs)
# Load supply power by the PV-battery system
Ppvbs2l = np.minimum(_Pl, Ppvbs2ac)
# Load supply power by the grid
Pg2l = (Plt - Ppvbs2l)
# Curtailed PV power (AC output power)
Pct = np.zeros_like(_Ppv)
# Power demand from the grid
Pg2ac = np.minimum(0, Pg)
# Feed-in power to the grid
Pac2g=np.maximum(0, Pg)
# Energy sums
# Electrical demand including the energy consumption of the other system components
E['El'] = np.sum(np.abs(Plt)) / 3.6e9
# DC output of the PV generator including curtailment
E['Epv'] = np.sum(np.abs(_Ppv)) / 3.6e9
# DC input of the battery (charged)
E['Ebatin'] = np.sum(np.abs(Pbatin)) / 3.6e9
# DC output of the battery (discharged)
E['Ebatout'] = np.sum(np.abs(Pbatout)) / 3.6e9
# Grid feed-in
E['Eac2g'] = np.sum(np.abs(Pac2g)) / 3.6e9
# Grid demand
E['Eg2ac'] = np.sum(np.abs(Pg2ac)) / 3.6e9
# Load supply by the grid
E['Eg2l'] = np.sum(np.abs(Pg2l)) / 3.6e9
# Demand of the other system components
E['Eperi'] = np.sum(np.abs(Pperi)) / 3.6e9
# Curtailed PV energy
E['Ect'] = np.sum(np.abs(Pct)) / 3.6e9
if _parameter['Top'] == 'AC':
# AC output of the PV system including curtailment
E['Epvs']=np.sum(np.abs(Ppvs)) / 3.6e9
# AC input of the battery system
E['Eac2bs']=np.sum(np.abs(Pac2bs)) / 3.6e9
# AC output of the battery system
E['Ebs2ac']=np.sum(np.abs(Pbs2ac)) / 3.6e9
# Direct use of PV energy
E['Epvs2l']=np.sum(np.abs(Ppvs2l)) / 3.6e9
# PV charging
E['Epvs2bs']=np.sum(np.abs(Ppvs2bs)) / 3.6e9
# Grid charging
E['Eg2bs']=np.sum(np.abs(Pg2bs)) / 3.6e9
# PV feed-in
E['Epvs2g']=np.sum(np.abs(Ppvs2g)) / 3.6e9
# Load supply by the battery system
E['Ebs2l']=np.sum(np.abs(Pbs2l)) / 3.6e9
# Battery feed-in
E['Ebs2g']=np.sum(np.abs(Pbs2g)) / 3.6e9
elif _parameter['Top'] == 'DC':
# Grid demand of the PV-battery system
E['Eg2pvbs'] = np.sum(np.abs(Pg2pvbs)) / 3.6e9
# AC input of the PV-battery system
E['Eac2pvbs'] = np.sum(np.abs(Pac2pvbs)) / 3.6e9
# AC output of the PV-battery system
E['Epvbs2ac'] = np.sum(np.abs(Ppvbs2ac)) / 3.6e9
# Load supply by the PV-battery system
E['Epvbs2l'] = np.sum(np.abs(Ppvbs2l)) / 3.6e9
return E
def load_parameter(fname, col_name):
"""Loads system parameter from excel file
:param fname: Path to the excel file
:type fname: string
:param col_name: Column to read data from
:type col_name: string
:return: Dictionary holding parameters from the Excel sheet
:rtype: dict
"""
wb = load_workbook(fname, data_only=True)
ws = wb['Data'] # Load Data sheet of excel file
# read keys and values from Excel sheet
keys = (c.value for c in ws['E'][1:])
values = (c.value if c.value != 'ns' else None for c in ws[col_name][1:])
parameter = dict(zip(keys, values))
# deletes entries where key is None
del parameter[None]
# Assign specific parameters
parameter['P_PV2AC_out_PVINV'] = ws[col_name][15].value
parameter['P_PV2AC_out'] = ws[col_name][24].value
parameter['P_AC2BAT_in_DCC'] = ws[col_name][25].value
parameter['P_AC2BAT_in'] = ws[col_name][26].value
parameter['P_BAT2AC_out'] = ws[col_name][27].value
parameter['P_BAT2AC_out_DCC'] = ws[col_name][28].value
# Set refrence case values to boolean
if parameter['ref_1'] == 'yes':
parameter['ref_1'] = True
elif parameter['ref_1'] == 'no':
parameter['ref_1'] = False
if parameter['ref_2'] == 'yes':
parameter['ref_2'] = True
elif parameter['ref_2'] == 'no':
parameter['ref_2'] = False
# Specific parameters of DC-coupled systems
if parameter['Top'] == 'DC':
parameter['P_AC2BAT_in'] = parameter['P_AC2BAT_in_DCC'] # Nominal charging power (AC) in kW
parameter['P_BAT2AC_out'] = parameter['P_BAT2AC_out_DCC']
# Specific parameters of PV inverters and AC-coupled systems
if parameter['Top'] == 'PVINV' or parameter['Top'] == 'AC' and parameter['P_PV2AC_out_PVINV'] is not None:
parameter['P_PV2AC_out'] = parameter['P_PV2AC_out_PVINV']
# Specific parameters of PV-coupled systems
if parameter['Top'] == 'PV':
parameter['P_BAT2PV_in'] = parameter['P_BAT2AC_in']
parameter['P_BAT2AC_out'] = parameter['P_BAT2AC_out_DCC']
# replace 'ns', 'o' and 'c' entries to None
for key, value in parameter.items():
if value == 'ns' or value == 'o' or value == 'c' or value == ' ':
parameter[key] = None
# Convert to kW
convert_to_kw = ['P_PV2AC_in', 'P_PV2AC_out_PVINV','P_PV2AC_out','P_AC2BAT_in_DCC','P_AC2BAT_in','P_BAT2AC_out',
'P_BAT2AC_out_DCC','P_PV2BAT_in','P_BAT2PV_out','P_PV2BAT_out','P_BAT2AC_in']
for par in convert_to_kw:
if parameter[par]:
parameter[par] /= 1000
return parameter
def eta2abc(parameter):
"""Function to calculate the parameters of the power loss functions (quadratic equations) from the path efficiencies
:param parameter: Holds parameters of the system
:type parameter: dict
:return: Dictionary holding parameters from the Excel sheet
:rtype: dict
"""
# PV2AC conversion pathway TODO
if parameter['Top'] == 'DC' or parameter['Top'] == 'PVINV' or parameter['Top'] == 'PV' and parameter['P_PV2AC_out'] is not None or parameter['Top'] == 'AC' and parameter['P_PV2AC_out'] is not None:
# Create variables for the sampling points and corresponding efficiencies TODO
p_pv2ac = np.fromiter((value for key, value in parameter.items() if 'p_PV2AC_' in key | |
<reponame>dnikishov/fuel-web<filename>nailgun/nailgun/task/task.py
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from copy import deepcopy
import os
import socket
import netaddr
import six
import yaml
from sqlalchemy import func
from sqlalchemy import not_
from sqlalchemy.orm import ColumnProperty
from sqlalchemy.orm import object_mapper
from nailgun import consts
from nailgun.db import db
from nailgun.db.sqlalchemy.models import CapacityLog
from nailgun.db.sqlalchemy.models import Cluster
from nailgun.db.sqlalchemy.models import Node
from nailgun.db.sqlalchemy.models import Task
from nailgun.errors import errors
from nailgun.logger import logger
from nailgun.network.checker import NetworkCheck
from nailgun.network.manager import NetworkManager
from nailgun import objects
from nailgun.orchestrator import deployment_graph
from nailgun.orchestrator import deployment_serializers
from nailgun.orchestrator import provisioning_serializers
from nailgun.orchestrator import stages
from nailgun.orchestrator import task_based_deployment
from nailgun.orchestrator import tasks_serializer
from nailgun.orchestrator import tasks_templates
import nailgun.rpc as rpc
from nailgun.settings import settings
from nailgun.task.fake import FAKE_THREADS
from nailgun.task.helpers import TaskHelper
from nailgun.utils import logs as logs_utils
from nailgun.utils.restrictions import VmwareAttributesRestriction
from nailgun.utils.role_resolver import RoleResolver
from nailgun.utils.zabbix import ZabbixManager
def make_astute_message(task, method, respond_to, args):
message = {
'api_version': settings.VERSION['api'],
'method': method,
'respond_to': respond_to,
'args': args
}
message['args']['task_uuid'] = task.uuid
task.cache = message
return message
def fake_cast(queue, messages, **kwargs):
def make_thread(message, join_to=None):
thread = FAKE_THREADS[message['method']](
data=message,
params=kwargs,
join_to=join_to
)
logger.debug("Fake thread called: data: %s, params: %s",
message, kwargs)
thread.start()
thread.name = message['method'].upper()
return thread
def make_thread_task_in_orchestrator(message):
task_in_orchestrator = {
'args': {'task_uuid': message['args'].get('task_uuid')},
'respond_to': 'task_in_orchestrator',
'method': 'task_in_orchestrator'
}
make_thread(task_in_orchestrator)
if isinstance(messages, (list,)):
thread = None
for m in messages:
thread = make_thread(m, join_to=thread)
make_thread_task_in_orchestrator(m)
else:
make_thread(messages)
make_thread_task_in_orchestrator(messages)
class DeploymentTask(object):
"""Task for applying changes to cluster
LOGIC
Use cases:
1. Cluster exists, node(s) added
If we add one node to existing OpenStack cluster, other nodes may require
updates (redeployment), but they don't require full system
reinstallation.
How to: run deployment for all nodes which system type is target.
Run provisioning first and then deployment for nodes which are in
discover system type.
Q: Should we care about node status (provisioning, error, deploying)?
A: offline - when node doesn't respond (agent doesn't run, not
implemented); let's say user should remove this node from
cluster before deployment.
ready - target OS is loaded and node is Ok, we redeploy
ready nodes only if cluster has pending changes i.e.
network or cluster attrs were changed
discover - in discovery mode, provisioning is required
provisioning - at the time of task execution there should not be such
case. If there is - previous provisioning has failed.
Possible solution would be to try again to provision
deploying - the same as provisioning, but stucked in previous deploy,
solution - try to deploy. May loose some data if reprovis.
error - recognized error in deployment or provisioning... We have to
know where the error was. If in deployment - reprovisioning
may not be a solution (can loose data).
If in provisioning - can do provisioning & deployment again
2. New cluster, just added nodes
Provision first, and run deploy as second
3. Remove some and add some another node
Deletion task will run first and will actually remove nodes, include
removal from DB.. however removal from DB happens when remove_nodes_resp
is ran. It means we have to filter nodes and not to run deployment on
those which are prepared for removal.
"""
@classmethod
def _get_deployment_method(cls, cluster, ignore_task_deploy=False):
"""Get deployment method name based on cluster version
:param cluster: Cluster db object
:param ignore_task_deploy: do not check that task deploy enabled
:returns: string - deploy/granular_deploy
"""
if not ignore_task_deploy and \
objects.Cluster.is_task_deploy_enabled(cluster):
return "task_deploy"
if objects.Release.is_granular_enabled(cluster.release):
return 'granular_deploy'
return 'deploy'
@classmethod
def message(cls, task, nodes, deployment_tasks=None,
reexecutable_filter=None):
logger.debug("DeploymentTask.message(task=%s)" % task.uuid)
task_ids = deployment_tasks or []
objects.NodeCollection.lock_nodes(nodes)
for n in nodes:
if n.pending_roles:
n.roles = n.roles + n.pending_roles
n.pending_roles = []
# If receiver for some reasons didn't update
# node's status to provisioned when deployment
# started, we should do it in nailgun
if n.status in (consts.NODE_STATUSES.deploying,):
n.status = consts.NODE_STATUSES.provisioned
n.progress = 0
db().flush()
deployment_mode = cls._get_deployment_method(task.cluster)
while True:
try:
message = getattr(cls, deployment_mode)(
task, nodes, task_ids, reexecutable_filter
)
break
except errors.TaskBaseDeploymentNotAllowed:
deployment_mode = cls._get_deployment_method(
task.cluster, True
)
logger.warning("fallback to %s deploy.", deployment_mode)
# After serialization set pending_addition to False
for node in nodes:
node.pending_addition = False
rpc_message = make_astute_message(
task,
deployment_mode,
'deploy_resp',
message
)
db().flush()
return rpc_message
@classmethod
def granular_deploy(cls, task, nodes, task_ids, reexecutable_filter):
orchestrator_graph = deployment_graph.AstuteGraph(task.cluster)
orchestrator_graph.only_tasks(task_ids)
orchestrator_graph.reexecutable_tasks(reexecutable_filter)
# NOTE(dshulyak) At this point parts of the orchestration can be empty,
# it should not cause any issues with deployment/progress and was
# done by design
role_resolver = RoleResolver(nodes)
serialized_cluster = deployment_serializers.serialize(
orchestrator_graph, task.cluster, nodes)
pre_deployment = stages.pre_deployment_serialize(
orchestrator_graph, task.cluster, nodes,
role_resolver=role_resolver)
post_deployment = stages.post_deployment_serialize(
orchestrator_graph, task.cluster, nodes,
role_resolver=role_resolver)
return {
'deployment_info': serialized_cluster,
'pre_deployment': pre_deployment,
'post_deployment': post_deployment
}
deploy = granular_deploy
@classmethod
def task_deploy(cls, task, nodes, task_ids, reexecutable_filter):
deployment_tasks = objects.Cluster.get_deployment_tasks(task.cluster)
serialized_cluster = deployment_serializers.serialize(
None, task.cluster, nodes
)
serialized_tasks = task_based_deployment.TasksSerializer.serialize(
task.cluster, nodes, deployment_tasks, task_ids
)
return {
"deployment_info": serialized_cluster,
"deployment_tasks": serialized_tasks
}
class UpdateNodesInfoTask(object):
"""Task for updating nodes.yaml and /etc/hosts on all slaves
The task is intended to be used in order to update both nodes.yaml and
/etc/hosts on all slaves. This task aren't going to manage node or cluster
statuses, and should be used only in one case - when we remove some node
and don't add anything new (if some new node is added, these tasks will
be executed without any additional help).
"""
# the following post deployment tasks are used to update nodes
# information on all slaves
_tasks = [
tasks_serializer.UploadNodesInfo.identity,
tasks_serializer.UpdateHosts.identity,
]
@classmethod
def message(cls, task):
orchestrator_graph = deployment_graph.AstuteGraph(task.cluster)
orchestrator_graph.only_tasks(cls._tasks)
rpc_message = make_astute_message(
task,
'execute_tasks',
'deploy_resp',
{
'tasks': orchestrator_graph.post_tasks_serialize([])
}
)
db().flush()
return rpc_message
class UpdateTask(object):
@classmethod
def message(cls, task, nodes):
logger.debug("%s.message(task=%s)", cls.__class__.__name__, task.uuid)
for n in nodes:
if n.pending_roles:
n.roles += n.pending_roles
n.pending_roles = []
n.status = 'provisioned'
n.progress = 0
orchestrator_graph = deployment_graph.AstuteGraph(task.cluster)
serialized_cluster = deployment_serializers.serialize(
orchestrator_graph, task.cluster, nodes)
# After serialization set pending_addition to False
for node in nodes:
node.pending_addition = False
rpc_message = make_astute_message(
task,
'deploy',
'deploy_resp',
{
'deployment_info': serialized_cluster
}
)
db().flush()
return rpc_message
class ProvisionTask(object):
@classmethod
def _get_provision_method(cls, cluster):
"""Get provision method name based on cluster attributes
:param cluster: Cluster db object
:returns: string - an Astute callable
"""
cluster_attrs = objects.Attributes.merged_attrs_values(
cluster.attributes)
provision_method = cluster_attrs.get('provision', {}).get(
'method', consts.PROVISION_METHODS.cobbler)
# NOTE(kozhukalov):
#
# Map provisioning method to Astute callable.
if provision_method == consts.PROVISION_METHODS.cobbler:
return 'native_provision'
return 'image_provision'
@classmethod
def message(cls, task, nodes_to_provisioning):
logger.debug("ProvisionTask.message(task=%s)" % task.uuid)
task = objects.Task.get_by_uid(
task.id,
fail_if_not_found=True,
lock_for_update=True
)
objects.NodeCollection.lock_nodes(nodes_to_provisioning)
serialized_cluster = provisioning_serializers.serialize(
task.cluster, nodes_to_provisioning)
for node in nodes_to_provisioning:
if settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP:
continue
logs_utils.prepare_syslog_dir(node)
rpc_message = make_astute_message(
task,
cls._get_provision_method(task.cluster),
'provision_resp',
{
'provisioning_info': serialized_cluster
}
)
db().commit()
return rpc_message
class DeletionTask(object):
@classmethod
def format_node_to_delete(cls, node, mclient_remove=True):
"""Convert node to dict for deletion.
:param node: Node object
:param mclient_remove: Boolean flag telling Astute whether to also
remove node from mclient (True by default). For offline nodes this
can be set to False to avoid long retrying unsuccessful deletes.
:return: Dictionary in format accepted by Astute.
"""
return {
'id': node.id,
'uid': node.id,
'roles': node.roles,
'slave_name': objects.Node.get_slave_name(node),
'mclient_remove': mclient_remove,
}
# TODO(ikalnitsky): Get rid of this, maybe move to fake handlers?
@classmethod
def format_node_to_restore(cls, node):
"""Convert node to dict for restoring, works only in fake mode.
Fake mode can optionally restore the removed node (this simulates
the node being rediscovered). This method creates the appropriate
input for that procedure.
:param node:
:return: dict
"""
# only fake tasks
if cls.use_fake():
new_node = {}
reset_attrs = (
'id',
'cluster_id',
'roles',
'pending_deletion',
'pending_addition',
'group_id',
'hostname',
)
for prop in object_mapper(node).iterate_properties:
if isinstance(
prop, ColumnProperty
) and prop.key not in reset_attrs:
new_node[prop.key] = getattr(node, prop.key)
return new_node
# /only fake tasks
@classmethod
def prepare_nodes_for_task(cls, nodes, mclient_remove=True):
"""Format | |
<filename>packages/main/tests/python/test_tables.py<gh_stars>100-1000
import os
import tempfile
from collections import namedtuple, OrderedDict
from contextlib import contextmanager
from pathlib import Path
import pytest
from RPA.Tables import Table, Tables, Dialect
RESOURCES = Path(__file__).parent / ".." / "resources"
DATA_COLUMNS = ["one", "two", "three", "four"]
TUPLE_THREE = namedtuple("Three", ["one", "two", "three"])
TUPLE_FOUR = namedtuple("Four", ["one", "two", "three", "four"])
TUPLE_SPARSE = namedtuple("Sparse", ["one", "two", "four"])
TUPLE_EMPTY = namedtuple("Empty", [])
DATA_NAMEDTUPLE = [
TUPLE_THREE(1, 2, 3),
TUPLE_THREE("a", "b", "c"),
TUPLE_SPARSE(1, 2, 4),
TUPLE_EMPTY(),
TUPLE_FOUR(1, 2, 3, 4),
TUPLE_EMPTY(),
]
DATA_DICT_LIST = {
"one": [1, "a", 1, None, 1, None],
"two": [2, "b", 2, None, 2],
"three": [3, "c", None, None, 3, None],
"four": [None, None, 4, None, 4],
}
DATA_LIST_DICT = [
{"one": 1, "two": 2, "three": 3},
{"one": "a", "two": "b", "three": "c"},
{"one": 1, "two": 2, "four": 4},
{},
{"one": 1, "two": 2, "three": 3, "four": 4},
{},
]
DATA_LIST_LIST = [[1, 2, 3], ["a", "b", "c"], [1, 2, None, 4], [], [1, 2, 3, 4], []]
DATA_FIXTURE = {
"dict-list": (DATA_DICT_LIST, None),
"list-dict": (DATA_LIST_DICT, None),
"list-list": (DATA_LIST_LIST, DATA_COLUMNS),
"namedtuple": (DATA_NAMEDTUPLE, None),
}
@contextmanager
def temppath():
with tempfile.NamedTemporaryFile() as fd:
path = fd.name
try:
yield path
finally:
os.unlink(path)
@pytest.fixture
def library():
return Tables()
@pytest.fixture(params=DATA_FIXTURE)
def table(request):
data, columns = DATA_FIXTURE[request.param]
return Table(data, columns)
def test_table_repr(table):
assert str(table) == "Table(columns=['one', 'two', 'three', 'four'], rows=6)"
def test_table_compare(table):
assert table == Table(DATA_NAMEDTUPLE)
assert table != "not-comparable"
def test_table_from_table(table):
copy = Table(table)
assert copy.columns == table.columns
assert copy.data == table.data
copy = Table(table, columns=["first", "second", "third", "fourth"])
assert copy.columns == ["first", "second", "third", "fourth"]
assert copy.data == table.data
def test_table_from_dict():
copy = Table(DATA_DICT_LIST)
assert copy.columns == ["one", "two", "three", "four"]
assert len(copy.data) == 6
copy = Table(DATA_DICT_LIST, columns=["one", "two"])
assert copy.columns == ["one", "two"]
assert copy.data == Table(DATA_DICT_LIST).get(columns=["one", "two"]).data
def test_table_invalid_data():
with pytest.raises(TypeError):
Table("cool")
def test_table_columns(table):
assert table.columns == ["one", "two", "three", "four"]
def test_table_index(table):
assert table.index == [0, 1, 2, 3, 4, 5]
def test_table_pad_short(table):
assert table[0] == [1, 2, 3, None]
def test_table_pad_sparse(table):
assert table[2] == [1, 2, None, 4]
def test_table_empty_row(table):
assert table[3] == [None, None, None, None]
def test_table_negative_row_index(table):
assert table[-1] == [None, None, None, None]
assert table[-2] == [1, 2, 3, 4]
assert table[-3] == [None, None, None, None]
def test_table_negative_column_index(table):
assert table[0, 1] == 2
assert table[0, -1] == None
assert table[0, -2] == 3
def test_table_slice_index(table):
assert table[1:3] == [["a", "b", "c", None], [1, 2, None, 4]]
def test_table_length(table):
assert len(table) == 6
def test_table_invalid_column(table):
with pytest.raises(ValueError):
table.get_column("not_exist")
def test_table_range_columns():
table = Table(DATA_LIST_LIST)
assert table.columns == [0, 1, 2, 3]
def test_table_named_columns():
table = Table(DATA_NAMEDTUPLE, columns=["two", "four"])
assert table.columns == ["two", "four"]
assert table.index == [0, 1, 2, 3, 4, 5]
assert table[0] == [2, None]
assert table[4] == [2, 4]
def test_table_too_short_columns():
with pytest.raises(ValueError):
Table(DATA_LIST_LIST, columns=["two", "four"])
def test_table_duplicate_columns():
with pytest.raises(ValueError):
Table(DATA_NAMEDTUPLE, columns=["two", "four", "two"])
def test_table_iterate_tuples():
table = Table(
[{"valid_key": 1, "invalid-key1": 2, "invalid/key2": 3, "123invalidkey3": 4}]
)
assert table.columns == [
"valid_key",
"invalid-key1",
"invalid/key2",
"123invalidkey3",
]
rows = list(table.iter_tuples(with_index=False))
assert len(rows) == 1
assert rows[0] == (1, 2, 3, 4)
assert rows[0]._fields == (
"valid_key",
"invalid_key1",
"invalid_key2",
"invalidkey3",
)
def test_table_iterate_tuples_invalid():
table = Table([{"one": 1, "two": 2, "assert": 3, "": 4}])
assert table.columns == [
"one",
"two",
"assert",
"",
]
with pytest.raises(ValueError):
list(table.iter_tuples(with_index=False))
@pytest.mark.parametrize(
"data, columns", DATA_FIXTURE.values(), ids=DATA_FIXTURE.keys()
)
def test_keyword_create_table(data, columns, library):
table = library.create_table(data)
assert len(table) == 6
def test_keyword_export_table_as_list(library, table):
exported = library.export_table(table)
assert exported == [
{"one": 1, "two": 2, "three": 3, "four": None},
{"one": "a", "two": "b", "three": "c", "four": None},
{"one": 1, "two": 2, "three": None, "four": 4},
{"one": None, "two": None, "three": None, "four": None},
{"one": 1, "two": 2, "three": 3, "four": 4},
{"one": None, "two": None, "three": None, "four": None},
]
def test_keyword_export_table_as_dict(library, table):
exported = library.export_table(table, with_index=True, as_list=False)
assert exported == OrderedDict(
{
"index": [0, 1, 2, 3, 4, 5],
"one": [1, "a", 1, None, 1, None],
"two": [2, "b", 2, None, 2, None],
"three": [3, "c", None, None, 3, None],
"four": [None, None, 4, None, 4, None],
}
)
def test_keyword_copy_table(library, table):
copied = library.copy_table(table)
assert copied == table
def test_keyword_clear_table(library, table):
library.clear_table(table)
assert len(table) == 0
assert len(table.index) == 0
assert table.columns == DATA_COLUMNS
def test_merge_tables(library):
prices = {"Name": ["Egg", "Cheese", "Ham"], "Price": [10.0, 15.0, 20.0]}
stock = {"Name": ["Egg", "Cheese", "Ham", "Spider"], "Stock": [12, 99, 0, 1]}
merged = library.merge_tables(Table(prices), Table(stock))
assert len(merged) == 7
assert merged.columns == ["Name", "Price", "Stock"]
assert merged[None, "Name"] == [
"Egg",
"Cheese",
"Ham",
"Egg",
"Cheese",
"Ham",
"Spider",
]
merged = library.merge_tables(Table(prices), Table(stock), index="Name")
assert len(merged) == 4
assert merged.get_row(0) == {"Name": "Egg", "Price": 10.0, "Stock": 12}
assert merged.get_row(1) == {"Name": "Cheese", "Price": 15.0, "Stock": 99}
assert merged.get_row(2) == {"Name": "Ham", "Price": 20.0, "Stock": 0}
assert merged.get_row(3) == {"Name": "Spider", "Price": None, "Stock": 1}
def test_keyword_get_table_dimensions(library, table):
rows, columns = library.get_table_dimensions(table)
assert rows == 6
assert columns == 4
def test_keyword_rename_table_columns(library, table):
library.rename_table_columns(table, ["a", "b", "c", "d"])
assert table.columns == ["a", "b", "c", "d"]
assert table.get_column("a", as_list=True) == [1, "a", 1, None, 1, None]
library.rename_table_columns(table, ["1", None, "2"])
assert table.columns == ["1", "b", "2", "d"]
def test_keyword_add_table_column(library, table):
library.add_table_column(table, name="five")
assert table.columns == ["one", "two", "three", "four", "five"]
assert table[0] == [1, 2, 3, None, None]
def test_keyword_add_table_rows(library, table):
library.add_table_row(table, ["x", "y", "z"])
assert len(table) == 7
assert table.index[-2] == 5
assert table[-1] == ["x", "y", "z", None]
def test_keyword_add_table_rows_too_long(library, table):
library.add_table_row(table, ["x", "y", "z", "i", "j", "k"])
assert len(table) == 7
assert table.index[-2] == 5
assert table[-1] == ["x", "y", "z", "i"]
def test_keyword_get_table_row(library, table):
assert library.get_table_row(table, 0) == {
"one": 1,
"two": 2,
"three": 3,
"four": None,
}
def test_keyword_get_table_column(library, table):
assert library.get_table_column(table, 0) == [1, "a", 1, None, 1, None]
def test_keyword_set_table_row(library, table):
assert table[1] == ["a", "b", "c", None]
library.set_table_row(table, 1, ["w", "x", "y", "z"])
assert table[1] == ["w", "x", "y", "z"]
def test_keyword_set_table_column(library, table):
library.set_table_column(table, "one", "NaN")
for row in table:
assert row["one"] == "NaN"
def test_keyword_pop_table_row(library, table):
assert len(table) == 6
assert table[0] == [1, 2, 3, None]
row = library.pop_table_row(table, row=0, as_list=True)
assert len(table) == 5
assert table[0] == ["a", "b", "c", None]
assert row == [1, 2, 3, None]
def test_keyword_pop_table_column(library, table):
library.pop_table_column(table, "two")
assert table.columns == ["one", "three", "four"]
assert len(table) == 6
assert table[0] == [1, 3, None]
def test_keyword_get_table_slice(library, table):
result = library.get_table_slice(table)
assert result == table
result = library.get_table_slice(table, start=3)
assert len(result) == 3
result = library.get_table_slice(table, end=2)
assert len(result) == 2
result = library.get_table_slice(table, end=-1)
assert len(result) == 5
result = library.get_table_slice(table, start=2, end=3)
assert len(result) == 1
result = library.get_table_slice(table, start=3, end=2)
assert len(result) == 0
def test_keyword_find_table_rows(library, table):
matches = library.find_table_rows(table, "three", "==", 3)
assert len(matches) == 2
matches = library.find_table_rows(table, "four", "is", None)
assert len(matches) == 4
def test_keyword_set_row_as_column_names(library, table):
assert table.columns == ["one", "two", "three", "four"]
assert len(table) == 6
library.set_row_as_column_names(table, 4)
assert table.columns == [1, 2, 3, 4]
assert len(table) == 5
def test_keyword_table_head(library, table):
head = library.table_head(table, count=3)
assert isinstance(head, Table)
assert len(head) == 3
assert head[0] == table[0]
assert head[-1] == table[2]
def test_keyword_table_head_list(library, table):
head = library.table_head(table, count=3, as_list=True)
assert isinstance(head, list)
assert len(head) == 3
assert head[0] == table[0]
assert head[-1] == table[2]
def test_keyword_table_tail(library, table):
tail = library.table_tail(table, count=2)
assert len(tail) == 2
assert tail[-1] == table[-1]
def test_keyword_get_table_cell(library, table):
assert library.get_table_cell(table, 0, 0) == 1
assert library.get_table_cell(table, 2, 3) == 4
def test_keyword_set_table_cell_existing(library, table):
library.set_table_cell(table, 0, 0, 123)
assert table[0, 0] == 123
library.set_table_cell(table, 1, "one", 321)
assert table[1, 0] == 321
def test_keyword_set_table_cell_new(library, table):
assert table.dimensions == (6, 4)
library.set_table_cell(table, 9, 7, ">9000")
assert table.dimensions == (10, 8)
assert table[9, 7] == ">9000"
def test_keyword_sort_table_by_column(library, table):
library.add_table_column(table, name="five", values=["bbb", 2, 3, 1, 3, "aaa"])
library.sort_table_by_column(table, "five", ascending=True)
assert library.get_table_column(table, "five") == [1, 2, 3, 3, "aaa", "bbb"]
assert library.get_table_column(table, "one") == [None, "a", 1, 1, None, 1]
library.sort_table_by_column(table, "five", ascending=False)
assert library.get_table_column(table, "five") == ["bbb", "aaa", 3, 3, 2, 1]
assert library.get_table_column(table, "one") == [1, None, 1, 1, "a", None]
def test_keyword_group_table_by_column(library, table):
groups = library.group_table_by_column(table, "three")
assert len(groups) == 3
for group in groups:
column = library.get_table_column(group, "three")
assert len(set(column)) == 1
def test_keyword_filter_table_by_column(library, table):
library.filter_table_by_column(table, "two", "==", 2)
assert len(table) == 3
assert all(row["two"] == 2 for row in table)
def test_keyword_filter_table_by_column_in(library, table):
library.filter_table_by_column(table, "two", "in", ["b", None])
assert len(table) == 3
assert all(row["two"] != 2 for | |
when "
"requesting a CHILD object."))
read_kwargs['child_id'] = uuid
# Accept parent_type as either EntryWrapper subclass or string
if not isinstance(parent_type, str):
parent_type = parent_type.schema_type
return adapter.read(parent_type, root_id=parent_uuid,
child_type=cls.schema_type, **read_kwargs)
@classmethod
def get_by_href(cls, adapter, href, **rbh_kwargs):
"""Get a wrapper or feed given a URI.
This can be useful for retrieving wrappers "associated" with other
wrappers, where the association is provided via an atom link. Some
examples are TrunkAdapter.associated_vswitch_uri and
VNICBackDev.vios_href.
:param adapter: A pypowervm.adapter.Adapter instance for REST API
communication.
:param href: The string URI (including scheme://host:port/) of the
entry or feed to retrieve.
:param rbh_kwargs: Keyword arguments to be passed directly to Adapter's
read_by_href method.
:return: EntryWrapper subclass of the appropriate type, or a list
thereof, representing the entry/feed associated with the href
parameter.
"""
return cls.wrap(adapter.read_by_href(href, **rbh_kwargs))
@classmethod
def search(cls, adapter, negate=False, xag=None, parent_type=None,
parent_uuid=None, one_result=False, parent=None, **kwargs):
"""Performs a REST API search.
Searches for object(s) of the type indicated by cls having (or not
having) the key/value indicated by the (single) kwarg.
Regular expressions, comparators, and logical operators are not
supported.
:param cls: A subclass of EntryWrapper. The wrapper class may define
a search_keys member, which is a dictionary mapping a
@property getter method name to a search key supported by
the REST API for that object type. To retrieve an XML
report of the supported search keys for object Foo,
perform: read('Foo', suffix_type='search').
If the wrapper class does not define a search_keys member,
OR if xag is None, the fallback search algorithm performs a
GET of the entire feed of the object type and loops through
it looking for (mis)matches on the @property indicated by
the search key.
:param adapter: The pypowervm.adapter.Adapter instance through which to
perform the search.
:param negate: If True, the search is negated - we find all objects of
the indicated type where the search key does *not* equal
the search value.
:param xag: List of extended attribute group names.
:param parent_type: If searching for CHILD objects, specify either
the parent parameter or BOTH parent_type and
parent_uuid. This parameter indicates the parent
ROOT object. It may be either the string schema
type or the corresponding EntryWrapper subclass.
:param parent_uuid: If searching for CHILD objects, specify either
the parent parameter or BOTH parent_type and
parent_uuid. This parameter specifies the UUID of
the parent ROOT object. If parent_type is
specified, but parent_uuid is None, all parents of
the ROOT type will be searched. This may result in
a slow response time.
:param one_result: Use when expecting (at most) one search result. If
True, this method will return the first element of
the search result list, or None if the search
produced no results.
:param parent: If searching for CHILD objects, specify either the
parent parameter or BOTH parent_type and parent_uuid.
This parameter is an EntryWrapper instance indicating
the parent ROOT object.
:param kwargs: Exactly one key=value. The key must correspond to a key
in cls.search_keys and/or the name of a getter @property
on the EntryWrapper subclass. Due to limitations of
the REST API, if specifying xags or searching for a
CHILD, the key must be the name of a getter @property.
The value is the value to match.
:return: If one_result=False (the default), a list of instances of the
cls. The list may be empty (no results were found). It may
contain more than one instance (e.g. for a negated search, or
for one where the key does not represent a unique property of
the object). If one_result=True, returns a single instance of
cls, or None if the search produced no results.
"""
def list_or_single(results, single):
"""Returns either the results list or its first entry.
:param results: The list of results from the search. May be empty.
Must not be None.
:param single: If False, return results unchanged. If True, return
only the first entry in the results list, or None if
results is empty.
"""
if not single:
return results
return results[0] if results else None
try:
parent_type, parent_uuid = util.parent_spec(parent, parent_type,
parent_uuid)
except ValueError:
# Special case where we allow parent_type without parent_uuid. The
# reverse is caught by the check below.
if parent_type is not None and type(parent_type) is not str:
parent_type = parent_type.schema_type
# parent_uuid makes no sense without parent_type
if parent_type is None and parent_uuid is not None:
raise ValueError(_('Parent UUID specified without parent type.'))
if len(kwargs) != 1:
raise ValueError(_('The search() method requires exactly one '
'key=value argument.'))
key, val = kwargs.popitem()
try:
# search API does not support xag or CHILD
if xag is not None or parent_type is not None:
# Cheater's way to cause _search_by_feed to be invoked
raise AttributeError()
search_key = cls.search_keys[key]
except (AttributeError, KeyError):
# Fallback search by [GET feed] + loop
return list_or_single(
cls._search_by_feed(adapter, cls.schema_type, negate, key, val,
xag, parent_type, parent_uuid), one_result)
op = '!=' if negate else '=='
quote = urllib.parse.quote if six.PY3 else urllib.quote
search_parm = "(%s%s'%s')" % (search_key, op, quote(str(val), safe=''))
# Let this throw HttpError if the caller got it wrong.
# Note that this path will only be hit for ROOTs.
return list_or_single(
cls.wrap(cls._read_parent_or_child(
adapter, cls.schema_type, parent_type, parent_uuid,
suffix_type='search', suffix_parm=search_parm)),
one_result)
@classmethod
def _search_by_feed(cls, adapter, target_type, negate, key, val, xag,
parent_type, parent_uuid):
if not hasattr(cls, key):
raise ValueError(_("Wrapper class %(class)s does not support "
"search key '%(key)s'.") %
{'class': cls.__name__, 'key': key})
feedwrap = cls.wrap(cls._read_parent_or_child(adapter, target_type,
parent_type, parent_uuid,
xag=xag))
retlist = []
val = str(val)
for entry in feedwrap:
entval = str(getattr(entry, key, None))
include = (entval != val) if negate else (entval == val)
if include:
retlist.append(entry)
return retlist
@staticmethod
def _read_parent_or_child(adapter, target_type, parent_type, parent_uuid,
**kwargs):
if parent_type is None:
# ROOT feed search
return adapter.read(target_type, **kwargs)
if parent_uuid is not None:
# CHILD of a specific ROOT
return adapter.read(parent_type, root_id=parent_uuid,
child_type=target_type, **kwargs)
# Search all ROOTs of the specified type.
ret = None
# Wishing there was a quick URI to get all UUIDs.
# Let EntryWrapper.wrap figure out the wrapper type. Whatever it
# is, the uuid @property is available.
for parent in EntryWrapper.wrap(adapter.read(parent_type)):
resp = adapter.read(
parent_type, root_id=parent.uuid, child_type=target_type,
**kwargs)
# This is a bit of a cheat. Technically extending the feed of
# a Response doesn't result in a legal Response (the rest of
# the metadata won't accurately reflect the feed). However
# this is guaranteed only to be used immediately by wrap() to
# extract the Entrys.
if ret is None:
ret = resp
else:
ret.feed.entries.extend(resp.feed.entries)
return ret
def create(self, parent_type=None, parent_uuid=None, timeout=-1,
parent=None):
"""Performs an adapter.create (REST API PUT) with this wrapper.
:param parent_type: If creating a CHILD, specify either the parent
parameter or BOTH parent_type and parent_uuid.
This parameter may be either the schema_type or the
EntryWrapper subclass of the parent ROOT object.
:param parent_uuid: If creating a CHILD, specify either the parent
parameter or BOTH parent_type and parent_uuid.
This parameter indicates the UUID of the parent
ROOT object.
:param timeout: (Optional) Integer number of seconds after which to
time out the PUT request. -1, the default, causes the
request to use the timeout value configured on the
Session belonging to the Adapter.
:param parent: If creating a CHILD, specify either the parent parameter
or BOTH parent_type and parent_uuid. This parameter is
an EntryWrapper representing the parent ROOT object of
the CHILD to be created.
:return: New EntryWrapper of the invoking class representing the PUT
response.
"""
service = pc.SERVICE_BY_NS[self.schema_ns]
parent_type, parent_uuid = util.parent_spec(parent, parent_type,
parent_uuid)
if parent_type is None and parent_uuid is None:
# ROOT
resp = self.adapter.create(self, self.schema_type, service=service,
timeout=timeout)
else:
# CHILD
resp = self.adapter.create(
self, parent_type, root_id=parent_uuid,
child_type=self.schema_type, service=service, timeout=timeout)
return self.wrap(resp)
def delete(self):
"""Performs an adapter.delete (REST API DELETE) with this wrapper."""
self.adapter.delete_by_href(self.href, etag=self.etag)
# TODO(IBM): Remove deprecated xag parameter
def update(self, xag='__DEPRECATED__', timeout=-1, force=False):
"""Performs adapter.update of this wrapper.
:param xag: DEPRECATED - | |
return metrics[key]
else:
return threshold_default
def _determine_threshold(self, threshold_default: float = .5) -> float:
if type(self.model) in [TabNetClassifier, MLPClassifier]:
return self._determine_threshold_nn(threshold_default)
elif isinstance(self.model, RandomForest):
self._determine_threshold_rf(threshold_default)
elif isinstance(self.model, GBDT):
self._determine_threshold_gbdt(threshold_default)
else:
return threshold_default
def _attribution_tabnet(self, data_loader: DataLoader, method: str = "default", **method_kwargs) -> Tuple[
np.ndarray, np.ndarray, np.ndarray]:
"""gets the tabnet attribution using the defined data_loader"""
attribution, probs = [], []
for inputs, labels in data_loader:
_, _probs, _, _mask, *_ = self.model(inputs.to(self.device))
attribution.append(_mask)
probs.append(_probs)
attribution = torch.cat(attribution).detach().cpu().numpy()
probs = torch.cat(probs)
preds = (probs > self.threshold).float()
preds = preds.detach().cpu().numpy()
preds = preds[:, self.label_idx] if preds.ndim > 1 else preds
postprocess = method_kwargs.pop("postprocess", None)
if method == "tabnet":
# default refers to tabnet mask
attributions = attribution
elif method == "integrated_gradients":
attributions = NNAttribution(self.model).integrated_gradients(data_loader, **method_kwargs)
elif method == "noise_tunnel_ig":
attributions = NNAttribution(self.model).noise_tunnel_ig(data_loader, **method_kwargs)
elif "saliency" in method:
attributions = NNAttribution(self.model).saliency(data_loader, **method_kwargs)
elif method == "input_x_gradient":
attributions = NNAttribution(self.model).input_x_gradient(data_loader, **method_kwargs)
elif method == "occlusion":
attributions = NNAttribution(self.model).occlusion(data_loader, **method_kwargs)
elif method == "shapley_value_sampling":
attributions = NNAttribution(self.model).shapley_value_sampling(data_loader, **method_kwargs)
elif method == "permutation":
attributions = NNAttribution(self.model).permutation(data_loader, **method_kwargs)
elif method == "lrp":
# attributions = NNAttribution(self.model).lrp(data_loader, **method_kwargs)
raise ValueError(f"lrp not implemented yet")
elif method == "deeplift":
# attributions = NNAttribution(self.model).deeplift(data_loader, **method_kwargs)
raise ValueError(f"deeplift not working properly with tabnet due to parameter sharing")
else:
raise ValueError(f"unknown attribution method {method}")
attributions = _postprocess(attributions, postprocess)
return attributions, preds, probs.detach().cpu().numpy()
def _attribution_rf(self, data_loader: DataLoader, method: str = "default", **method_kwargs) -> Tuple[
np.ndarray, np.ndarray, np.ndarray]:
preds, probs = self.model.predict(data_loader)
postprocess = method_kwargs.pop("postprocess", None)
if method == "impurity":
attributions = RFAttribution(self.model).impurity(data_loader)
elif method == "treeinterpreter":
attributions = RFAttribution(self.model).treeinterpreter(data_loader, **method_kwargs)
indices = np.expand_dims(preds, axis=(1, 2))
attributions = np.take_along_axis(attributions, indices, axis=2).squeeze()
elif method == "permutation":
attributions = RFAttribution(self.model).permutation(data_loader, **method_kwargs)
elif method == "input_x_impurity":
attributions = RFAttribution(self.model).input_x_impurity(data_loader)
elif method == "occlusion":
attributions = RFAttribution(self.model).occlusion(data_loader, **method_kwargs)
elif method == "shapley_value_sampling":
attributions = RFAttribution(self.model).shapley_value_sampling(data_loader, **method_kwargs)
else:
raise ValueError(f"unknown type {method}")
attributions = _postprocess(attributions, postprocess)
pos_probs = probs[:, -1]
return attributions, preds, pos_probs
def _attribution_mlp(self, data_loader: DataLoader, method: str = "default", **method_kwargs) -> Tuple[
np.ndarray, np.ndarray, np.ndarray]:
"""gets the mlp attribution using the defined data_loader"""
probs = []
for inputs, labels in data_loader:
_, _probs, _ = self.model(inputs.to(self.device))
probs.append(_probs)
probs = torch.cat(probs)
preds = (probs > self.threshold).float()
preds = preds.detach().cpu().numpy()
preds = preds[:, self.label_idx] if preds.ndim > 1 else preds
postprocess = method_kwargs.pop("postprocess", None)
if method == "integrated_gradients":
attributions = NNAttribution(self.model).integrated_gradients(data_loader, **method_kwargs)
elif method == "noise_tunnel_ig":
attributions = NNAttribution(self.model).noise_tunnel_ig(data_loader, **method_kwargs)
elif "saliency" in method:
attributions = NNAttribution(self.model).saliency(data_loader, **method_kwargs)
elif method == "input_x_gradient":
attributions = NNAttribution(self.model).input_x_gradient(data_loader, **method_kwargs)
elif method == "occlusion":
attributions = NNAttribution(self.model).occlusion(data_loader, **method_kwargs)
elif method == "shapley_value_sampling":
attributions = NNAttribution(self.model).shapley_value_sampling(data_loader, **method_kwargs)
elif method == "permutation":
attributions = NNAttribution(self.model).permutation(data_loader, **method_kwargs)
elif method == "lrp":
# attributions = NNAttribution(self.model).lrp(data_loader, **method_kwargs)
raise ValueError(f"lrp is not implemented - custom rules needs to be added")
elif method == "deeplift":
attributions = NNAttribution(self.model).deeplift(data_loader, **method_kwargs)
else:
raise ValueError(f"unknown attribution method {method}")
attributions = _postprocess(attributions, postprocess)
return attributions, preds, probs.detach().cpu().numpy()
def _attribution_gbdt(self, data_loader: DataLoader, method: str = "default", **method_kwargs) -> Tuple[
np.ndarray, np.ndarray, np.ndarray]:
preds, probs = self.model.predict(data_loader)
#preds = np.where(probs > self.threshold, 1, 0)
if method == "feature_importances":
attributions = GBDTAttribution(self.model).feature_importances(data_loader)
elif method == "shap":
attributions = GBDTAttribution(self.model).shap(data_loader, **method_kwargs)
else:
raise ValueError(f"unknown attribution method {method}")
return attributions, preds, probs
def _attribution(self, data_loader: DataLoader, method: str, **method_kwargs) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
if isinstance(self.model, TabNetClassifier):
return self._attribution_tabnet(data_loader, method=method, **method_kwargs)
elif isinstance(self.model, RandomForest):
return self._attribution_rf(data_loader, method=method, **method_kwargs)
elif isinstance(self.model, MLPClassifier):
return self._attribution_mlp(data_loader, method=method, **method_kwargs)
elif isinstance(self.model, GBDT):
return self._attribution_gbdt(data_loader, method=method, **method_kwargs)
else:
raise ValueError(f"model type is not supported {type(self.model)}")
def _attribute(self, data_type: str, method: str, **method_kwargs) -> Tuple[Dict, List[Dict], str, str]:
def _filter_mappings(featurizer_atomic_mappings, indices):
featurizer_atomic_mappings_filtered = []
for atomic_mappings in featurizer_atomic_mappings:
featurizer_atomic_mappings_filtered.append([atomic_mappings[idx] for idx in indices])
return featurizer_atomic_mappings_filtered
featurizer_atomic_mappings = None
if data_type == "train":
data_loader = self.dm.train_dataloader()
data = self.dm.data.iloc[self.dm.train_indices]
if self.dm.featurizer_atomic_mappings is not None:
featurizer_atomic_mappings = _filter_mappings(self.dm.featurizer_atomic_mappings, self.dm.train_indices)
elif data_type == "val":
data_loader = self.dm.val_dataloader()
data = self.dm.data.iloc[self.dm.val_indices]
if self.dm.featurizer_atomic_mappings is not None:
featurizer_atomic_mappings = _filter_mappings(self.dm.featurizer_atomic_mappings, self.dm.val_indices)
elif data_type == "test":
data_loader = self.dm.test_dataloader()
data = self.dm.data.iloc[self.dm.test_indices]
if self.dm.featurizer_atomic_mappings is not None:
featurizer_atomic_mappings = _filter_mappings(self.dm.featurizer_atomic_mappings, self.dm.test_indices)
else:
raise ValueError(f"unknown data type {data_type}")
smiles = data["smiles"].tolist()
labels = data[self.label].tolist()
attribution, preds, _ = self._attribution(data_loader, method=method, **method_kwargs)
labels = np.array(labels)
if self.nr_samples:
if featurizer_atomic_mappings is not None:
for i in range(len(featurizer_atomic_mappings)):
featurizer_atomic_mappings[i] = featurizer_atomic_mappings[i][:self.nr_samples]
smiles = smiles[:self.nr_samples]
labels = labels[:self.nr_samples, ...]
preds = preds[:self.nr_samples, ...]
attribution = attribution[:self.nr_samples, ...]
atomic_attributions = self.dm.atomic_attributions(
smiles_or_mappings=smiles if featurizer_atomic_mappings is None else featurizer_atomic_mappings,
feature_attributions=attribution)
result, reference_results, df = calculate_ranking_scores(
smiles=smiles,
references=self.references,
atomic_attributions=atomic_attributions,
labels=labels,
preds=preds,
)
out_fname = method + "-" + data_type + "_dataset" if not self.out_fname else self.out_fname
out_name = self.out_dir + out_fname + "-" + "attribution_details-" + self.label
out_path_df = out_name + ".tsv"
df.to_csv(out_path_df, sep="\t")
out_path_results = out_name + ".json"
with open(out_path_results, "w") as f:
results = [{"attribution_results": result}] + reference_results
f.write(json.dumps(results))
return result, reference_results, out_path_df, out_path_results
def attribute(self, verbose: bool = False) -> Dict:
value = lambda v: v if len(str(v)) <= 250 else "...value too long for mlflow - not inserted"
metrics = {}
for t, m in itertools.product(self.data_types, self.methods):
method_name = next(iter(m))
method_kwargs = m[method_name] if m[method_name] is not None else {}
result, reference_results, out_path_df, out_path_results = self._attribute(data_type=t, method=method_name, **method_kwargs)
if self.logger:
self.logger.experiment.log_artifact(run_id=self.logger._run_id, local_path=out_path_results)
if verbose:
self.logger.experiment.log_artifact(run_id=self.logger._run_id, local_path=out_path_df)
for i, reference_result in enumerate(reference_results):
reference_smile, reference_result_values = next(iter(reference_result.items()))
if self.logger:
try:
self.logger.experiment.log_param(run_id=self.logger._run_id, key="train/threshold-t" + str(self.label_idx),
value=value(self.threshold))
self.logger.experiment.log_param(run_id=self.logger._run_id, key="smile" + str(i), value=value(reference_smile))
except RestException as re:
print(re)
for k, v in reference_result_values.items():
key = t + "/" + "smile" + str(i) + "/" + k
key = key + "/" + method_name if method_name != "default" else key
if key in self.track_metrics and self.logger:
self.logger.experiment.log_metric(run_id=self.logger._run_id, key=key, value=v)
metrics[key] = v
for k, v in result.items():
key = <KEY>
key = key + "/" + method_name if method_name != "default" else key
if key in self.track_metrics and self.logger:
self.logger.experiment.log_metric(run_id=self.logger._run_id, key=key, value=v)
metrics[key] = v
return metrics
def _normalize(data: np.ndarray) -> np.ndarray:
_min = np.expand_dims(np.min(data, axis=1), axis=1)
_max = np.expand_dims(np.max(data, axis=1), axis=1)
data = (data - _min) / (_max - _min)
return data
def _postprocess(attributions: np.ndarray, postprocess: Optional[str] = None) -> np.ndarray:
if postprocess == "normalize":
attributions = _normalize(attributions)
elif postprocess == "positive":
attributions[attributions < .0] = .0
elif postprocess == "relative":
attributions[attributions > .0] += attributions[attributions > .0] * 2
attributions[attributions < .0] *= -1
elif postprocess == "absolute":
attributions = np.abs(attributions)
elif postprocess == "flip_sign":
attributions *= -1
return attributions
def attribution_fn(args: Namespace):
model = TabNetClassifier.load_from_checkpoint(args.checkpoint_path + args.checkpoint_name, strict=False)
# model = MLPClassifier.load_from_checkpoint(args.checkpoint_path + args.checkpoint_name, strict=False)
args = Namespace(**dict(model.hparams_initial, **vars(args)))
_experiment = MlflowClient(args.tracking_uri).get_experiment_by_name(args.experiment_name)
mlf_logger = MLFlowLogger(
experiment_name=args.experiment_name,
tracking_uri=args.tracking_uri,
artifact_location=_experiment.artifact_location if _experiment is not None else None
)
if getattr(args, "run_id", None) is not None:
mlf_logger._run_id = args.run_id
dm = HERGClassifierDataModule(
batch_size=args.batch_size,
num_workers=multiprocessing.cpu_count(),
cache_dir=args.cache_dir,
split_seed=model.hparams.split_seed,
split_type=args.split_type,
split_size=args.split_size,
use_labels=model.hparams.use_labels,
featurizer_name=model.hparams.featurizer_name,
featurizer_kwargs=model.hparams.featurizer_kwargs,
featurizer_n_jobs=args.featurizer_n_jobs,
featurizer_mp_context=args.featurizer_mp_context,
featurizer_chunksize=args.featurizer_chunksize,
)
dm.prepare_data()
dm.setup()
attributor = Attribution(
model=model,
dm=dm,
logger=mlf_logger,
**args.attribution_kwargs
)
metrics = attributor.attribute()
return metrics
def manual_args(args: Namespace) -> Namespace:
"""function only called if no arguments have been passed to the script - mostly used for dev/debugging"""
args.track_metrics = []
args.track_metrics += [
# "test/mean/avg_score_pred_active",
"test/mean/avg_score_pred_inactive/tabnet",
"test/mean/avg_score_pred_inactive/integrated_gradients",
"test/mean/avg_score_pred_inactive/saliency",
"test/mean/avg_score_pred_inactive/saliency-absolute",
"test/mean/avg_score_pred_inactive/input_x_gradient",
"test/mean/avg_score_pred_inactive/occlusion",
"test/mean/avg_score_pred_inactive/deeplift",
"test/mean/avg_score_pred_inactive/shapley_value_sampling",
"test/mean/avg_score_pred_inactive/noise_tunnel_ig",
"test/mean/avg_score_pred_active/tabnet",
"test/mean/avg_score_pred_active/integrated_gradients",
"test/mean/avg_score_pred_active/saliency",
"test/mean/avg_score_pred_active/saliency-absolute",
"test/mean/avg_score_pred_active/input_x_gradient",
"test/mean/avg_score_pred_active/occlusion",
"test/mean/avg_score_pred_active/deeplift",
"test/mean/avg_score_pred_active/shapley_value_sampling",
"test/mean/avg_score_pred_active/noise_tunnel_ig",
]
# args.track_metrics += ["test" + "/" + "smile" + str(i) + "/" + "avg_score_true_active" for i in range(20)]
# args.track_metrics += ["test" + "/" + "smile" + str(i) + "/" + "avg_score_true_inactive" for i in range(20)]
# attribution params
args.attribution_kwargs = {
"data_types": ["test"],
"methods": [
{"tabnet": {
"postprocess": None
}},
# {"deeplift": {
# "postprocess": None
# }},
{"integrated_gradients": {
"postprocess": None
}},
{"saliency": {
"postprocess": None,
"abs": False, # Returns absolute value of gradients if set to True
}},
{"saliency-absolute": {
"postprocess": None,
"abs": True,
}},
{"input_x_gradient": {
"postprocess": None
}},
{"occlusion": {
"sliding_window_shapes": (1,),
"perturbations_per_eval": 1,
"show_progress": True,
"postprocess": None
}},
{"shapley_value_sampling": {
"n_samples": 10, # The number of feature permutations tested
"perturbations_per_eval": 1,
"show_progress": | |
<filename>guangdong/workspace/tools/generate_result.py<gh_stars>0
import os
import json
import os.path as osp
from tqdm import tqdm
import torch
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from mmdet.apis import multi_gpu_test, single_gpu_test
from mmdet.datasets import (build_dataloader, build_dataset,
replace_ImageToTensor)
from mmdet.models import build_detector
import argparse
import warnings
from ensemble_boxes import *
import cv2
import numpy as np
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config1', help='test config file path')
parser.add_argument('config2', help='test config file path')
parser.add_argument('config3', help='test config file path')
parser.add_argument('checkpoint1', help='checkpoint file')
parser.add_argument('checkpoint2', help='checkpoint file')
parser.add_argument('checkpoint3', help='checkpoint file')
parser.add_argument('json_name', help='save json_name')
parser.add_argument('flag', help='which part to generate')
# parser.add_argument('task', help='which part to generate')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--show-score-thr',
type=float,
default=0.3,
help='score threshold (default: 0.3)')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function (deprecate), '
'change to --eval-options instead.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.eval_options:
raise ValueError(
'--options and --eval-options cannot be both '
'specified, --options is deprecated in favor of --eval-options')
if args.options:
warnings.warn('--options is deprecated in favor of --eval-options')
args.eval_options = args.options
return args
def visulize_result(image_path,result_path,save_path):
results = json.load(open(result_path))
im_bbox = {}
for res in results:
name = res['name']
bbox = res['bbox']
category = res['category']
if not name in im_bbox.keys():
im_bbox[name] = [bbox,category]
for im_name in tqdm(im_bbox.keys()):
img_path = osp.join(image_path,im_name)
image = cv2.imread(img_path)
for ann in im_bbox[im_name]:
bbox = ann[0]
cat = ann[1]
image = cv2.rectangle(image,(bbox[0],bbox[1]),(bbox[2],bbox[3]),(0,0,255),3)
image = cv2.puttext(image,str(cat),(bbox[0],bbox[1]),cv2.FONT_HERSHEY_SIMPLEX,10,(0,0,255),3)
img_save = osp.join(save_path,im_name)
cv2.imwrite(img_save,image)
def fuse_single(results,save_path):
# results = json.load(open(json_file))
new_result = []
for res in tqdm(results):
name = res['name']
bbox = res['bbox']
if bbox[0]==bbox[2] or bbox[1]==bbox[3]:
continue
m_ind = name.find('M')
str_after_m = name[m_ind+3:-4]
h_index,w_index = str_after_m.split('_', 1)
h_index = int(h_index)
w_index = int(w_index)
fname,ext = osp.splitext(name)
# import pdb
# pdb.set_trace()
father_name = fname[:m_ind+2]
new_name = father_name+ext
bbox = [bbox[0]+w_index*500,bbox[1]+h_index*500,bbox[2]+w_index*500,bbox[3]+h_index*500]
res['name'] = new_name
res['bbox'] = bbox
new_result.append(res)
with open(save_path, 'w') as f:
json.dump(new_result,f,indent=6)
return new_result
def py_cpu_nms(dets, thresh):
# import pdb
# pdb.set_trace()
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
areas = (y2 - y1 + 1) * (x2 - x1 + 1)
scores = dets[:, 4]
keep = []
index = scores.argsort()[::-1]
while index.size > 0:
i = index[0] # every time the first is the biggst, and add it directly
keep.append(i)
x11 = np.maximum(x1[i], x1[index[1:]]) # calculate the points of overlap
y11 = np.maximum(y1[i], y1[index[1:]])
x22 = np.minimum(x2[i], x2[index[1:]])
y22 = np.minimum(y2[i], y2[index[1:]])
w = np.maximum(0, x22 - x11 + 1) # the weights of overlap
h = np.maximum(0, y22 - y11 + 1) # the height of overlap
overlaps = w * h
ious = overlaps / (areas[i] + areas[index[1:]] - overlaps)
idx = np.where(ious <= thresh)[0]
index = index[idx + 1] # because index start from 1
return keep
def fuse_single_new(results,save_path):
# results = json.load(open(json_file))
xmin = xmax = ymin = ymax = 0
# if cam=='CAM1':
# xmin = 1366
# xmax = 6830
# ymin = 400
# ymax = 5600
# elif cam == 'CAM2':
# xmin = 1366
# xmax = 6830
# ymin = 400
# ymax = 5600
# elif cam == 'CMA3':
# xmin = 682
# xmax = 3413
# ymin = 700
# ymax = 3300
final_result = []
new_result = {}
for res in tqdm(results):
name = res['name']
bbox = res['bbox']
category = res['category']
score = res['score']
if bbox[0]==bbox[2] or bbox[1]==bbox[3]:
continue
m_ind = name.find('M')
str_after_m = name[m_ind+3:-4]
x,y = str_after_m.split('_', 1)
x = int(x)
y = int(y)
fname,ext = osp.splitext(name)
father_name = fname[:m_ind+2]
new_name = father_name+ext
if not new_name in new_result.keys():
new_result[new_name] = []
bbox = [bbox[0]+x,bbox[1]+y,bbox[2]+x,bbox[3]+y]
# if score<0.05:
# continue
# if bbox[0]<xmin or bbox[1]<ymin or bbox[2]>xmax or bbox[3]>ymax or score<0.05:
# continue
bbox.append(score)
new_result[new_name].append([bbox,category])
for img_name in new_result.keys():
res = new_result[img_name]
bboxs = np.array([re[0] for re in res])
if bboxs.shape[0]==0:
new_result[img_name] = [[], [],[]]
continue
# keep = py_cpu_nms(bboxs,0.5)
# bbox = bboxs[keep]
# bbox = bboxs
scores = bboxs[:,4].tolist()
bboxs = bboxs[:,:4].tolist()
categories= np.array([re[1] for re in res]).tolist()
new_result[img_name] = [categories,scores,bboxs]
for img_name in new_result.keys():
res = new_result[img_name]
for cat,score,bbox in zip(res[0],res[1],res[2]):
fin_res = {}
fin_res['name'] = img_name
fin_res['category'] = cat
fin_res['bbox'] = bbox
fin_res['score'] = score
final_result.append(fin_res)
with open(save_path, 'w') as f:
json.dump(final_result,f,indent=6)
return final_result
def process_result(result):
result_dict = {}
pre_name_dict = {}
for res in result:
name = res['name']
pre_name_dict[name[:8]] = name
if not res['name'] in result_dict.keys():
result_dict[name] = {}
result_dict[name]['bboxs'] = []
result_dict[name]['scores'] = []
result_dict[name]['labels'] = []
box = res['bbox']
box = [1.0 * box[0] / 8192, 1.0 * box[1] / 6000, 1.0 * box[2] / 8192, 1.0 * box[3] / 6000]
result_dict[name]['bboxs'].append(box)
result_dict[name]['scores'].append(res['score'])
result_dict[name]['labels'].append(res['category'])
return result_dict,pre_name_dict
def process_result_cam3(result):
result_dict = {}
pre_name_dict = {}
for res in result:
name = res['name']
pre_name_dict[name[:8]] = name
if not res['name'] in result_dict.keys():
result_dict[name] = {}
result_dict[name]['bboxs'] = []
result_dict[name]['scores'] = []
result_dict[name]['labels'] = []
box = res['bbox']
box = [1.0 * box[0] / 4096, 1.0 * (box[1]-500) / 3000 if box[1]>500 else 0, 1.0 * box[2] / 4096, 1.0 * (box[3]-500) / 3000 if box[3]>500 else 0]
result_dict[name]['bboxs'].append(box)
result_dict[name]['scores'].append(res['score'])
result_dict[name]['labels'].append(res['category'])
return result_dict,pre_name_dict
def split_result(res_path):
result = json.load(open(res_path))
res1 = []
res2 = []
res3 = []
for ann in result:
if 'CAM1' in ann['name']:
res1.append(ann)
if 'CAM2' in ann['name']:
res2.append(ann)
if 'CAM3' in ann['name']:
res3.append(ann)
return res1,res2,res3
def box_map_cam12(box):
box = [round(box[0]*8192,2),round(box[1]*6000,2),round(box[2]*8192,2),round(box[3]*6000,2)]
return box
def box_map_cam3(box):
box = [round(box[0]*4096,2),round(box[1]*3000+500,2),round(box[2]*4096,2),round(box[3]*3000+500,2)]
return box
def fuse_result(res1,res2,res3,save_path):
# import pdb
# pdb.set_trace()
# res1_dict,pre_name_dict1 = process_result(res1)
# res2_dict,pre_name_dict2 = process_result(res2)
# res3_dict,pre_name_dict3 = process_result(res3)
# img1_pre = pre_name_dict1.keys()
# img2_pre = pre_name_dict2.keys()
# img3_pre = pre_name_dict3.keys()
# common_img = set(img1_pre) & set(img2_pre) & set(img3_pre)
# fused_result = {}
# for img_name in tqdm(common_img):
# anno_1 = res1_dict[pre_name_dict1[img_name]]
# anno_2 = res2_dict[pre_name_dict2[img_name]]
# anno_3 = res3_dict[pre_name_dict3[img_name]]
# bbox_res1 = anno_1['bboxs']
# score_res1 = anno_1['scores']
# cat_res1 = anno_1['labels']
# bbox_res2 = anno_2['bboxs']
# score_res2 = anno_2['scores']
# cat_res2 = anno_2['labels']
# bbox_res3 = anno_3['bboxs']
# score_res3 = anno_3['scores']
# cat_res3 = anno_3['labels']
# bbox_list = [bbox_res1,bbox_res2,bbox_res3]
# score_list = [score_res1,score_res2,score_res3]
# cat_list = [cat_res1,cat_res2,cat_res3]
# weights = [2,1,1]
# iou_thr = 0.5
# skip_box_thr = 0.001
# boxes, scores, labels = weighted_boxes_fusion(bbox_list, score_list, cat_list, weights=weights,
# iou_thr=iou_thr, skip_box_thr=skip_box_thr)
# fused_result[img_name] = {}
# fused_result[img_name]['bboxes'] = boxes
# fused_result[img_name]['scores'] = scores
# fused_result[img_name]['labels'] = labels
# for i,ann in enumerate(res1):
# if ann['name'][:8] in common_img:
# res1.pop(i)
# for i, ann in enumerate(res2):
# if ann['name'][:8] in common_img:
# res2.pop(i)
# for i, ann in enumerate(res3):
# if ann['name'][:8] in common_img:
# res3.pop(i)
#
| |
def update_snmp_config(self, context):
"""Update the snmpd configuration"""
personalities = [constants.CONTROLLER]
config_uuid = self._config_update_hosts(context, personalities)
config_dict = {
"personalities": personalities,
"classes": ['platform::snmp::runtime',
'platform::fm::runtime'],
}
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
def get_ceph_pools_config(self, context):
return self._ceph.get_pools_config()
def get_controllerfs_lv_sizes(self, context):
system = self.dbapi.isystem_get_one()
system_dc_role = system.get('distributed_cloud_role', None)
lvdisplay_command = 'lvdisplay --columns --options lv_size,lv_name ' \
'--units g --noheading --nosuffix ' \
'/dev/cgts-vg/pgsql-lv /dev/cgts-vg/backup-lv ' \
'/dev/cgts-vg/platform-lv ' \
'/dev/cgts-vg/scratch-lv ' \
'/dev/cgts-vg/extension-lv ' \
'/dev/cgts-vg/docker-lv ' \
'/dev/cgts-vg/etcd-lv ' \
'/dev/cgts-vg/dockerdistribution-lv '
if (system_dc_role == constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER and
tsc.system_type != constants.TIS_AIO_BUILD):
lvdisplay_command = lvdisplay_command + '/dev/cgts-vg/patch-vault-lv '
lvdisplay_dict = {}
# Execute the command.
try:
lvdisplay_process = subprocess.Popen(lvdisplay_command,
stdout=subprocess.PIPE,
shell=True)
except Exception as e:
LOG.error("Could not retrieve lvdisplay information: %s" % e)
return lvdisplay_dict
lvdisplay_output = lvdisplay_process.communicate()[0]
lvdisplay_dict = cutils.output_to_dict(lvdisplay_output)
LOG.debug("get_controllerfs_lv_sizes lvdisplay_output %s" % lvdisplay_output)
return lvdisplay_dict
def get_cinder_gib_pv_sizes(self, context):
pvs_command = 'pvs --options pv_size,vg_name --units g --noheading ' \
'--nosuffix | grep cinder-volumes'
pvs_dict = {}
# Execute the command.
try:
pvs_process = subprocess.Popen(pvs_command,
stdout=subprocess.PIPE,
shell=True)
except Exception as e:
LOG.error("Could not retrieve pvs information: %s" % e)
return pvs_dict
pvs_output = pvs_process.communicate()[0]
pvs_dict = cutils.output_to_dict(pvs_output)
return pvs_dict
def get_ceph_object_pool_name(self, context):
"""
Get Rados Gateway object data pool name
"""
return self._ceph.get_ceph_object_pool_name()
def get_partition_size(self, context, partition):
# Use the 'blockdev' command for obtaining the size of the partition.
get_size_command = '{0} {1}'.format('blockdev --getsize64',
partition)
partition_size = None
try:
get_size_process = subprocess.Popen(get_size_command,
stdout=subprocess.PIPE,
shell=True)
except Exception as e:
LOG.error("Could not retrieve device information: %s" % e)
return partition_size
partition_size = get_size_process.communicate()[0]
partition_size = partition_size if partition_size else None
if partition_size:
# We also need to add the size of the partition table.
partition_size = int(partition_size) +\
constants.PARTITION_TABLE_SIZE
# Convert bytes to GiB and round to be sure.
partition_size = int(round(
cutils.bytes_to_GiB(partition_size)))
return partition_size
def get_cinder_partition_size(self, context):
# Obtain the active controller.
active_controller = None
hosts = self.dbapi.ihost_get_by_personality(constants.CONTROLLER)
for h in hosts:
if utils.is_host_active_controller(h):
active_controller = h
if not active_controller:
raise exception.SysinvException(_("Unable to obtain active "
"controller."))
# Obtain the cinder disk.
cinder_device = cutils._get_cinder_device(self.dbapi,
active_controller.id)
# Raise exception in case we couldn't get the cinder disk.
if not cinder_device:
raise exception.SysinvException(_(
"Unable to determine the current value of cinder_device for "
"host %s " % active_controller.hostname))
# The partition for cinder volumes is always the first.
cinder_device_partition = '{}{}'.format(cinder_device, '-part1')
cinder_size = self.get_partition_size(context, cinder_device_partition)
return cinder_size
def region_has_ceph_backend(self, context):
"""
Send a request to the primary region to see if ceph is configured
"""
return self._openstack.region_has_ceph_backend()
def get_system_tpmconfig(self, context):
"""
Retrieve the system tpmconfig object
"""
try:
tpmconfig = self.dbapi.tpmconfig_get_one()
if tpmconfig:
return tpmconfig.as_dict()
except exception.NotFound:
# No TPM configuration found
return None
def get_tpmdevice_by_host(self, context, host_id):
"""
Retrieve the tpmdevice object for this host
"""
try:
tpmdevice = self.dbapi.tpmdevice_get_by_host(host_id)
if tpmdevice and len(tpmdevice) == 1:
return tpmdevice[0].as_dict()
except exception.NotFound:
# No TPM device found
return None
def update_tpm_config(self, context, tpm_context, update_file_required=True):
"""Notify agent to configure TPM with the supplied data.
:param context: an admin context.
:param tpm_context: the tpm object context
:param update_file_required: boolean, whether file needs to be updated
"""
LOG.debug("ConductorManager.update_tpm_config: sending TPM update %s "
"to agents" % tpm_context)
rpcapi = agent_rpcapi.AgentAPI()
personalities = [constants.CONTROLLER]
# the original key from which TPM context will be derived
# needs to be present on all agent nodes, as well as
# the public cert
if update_file_required:
for fp in ['cert_path', 'public_path']:
file_name = tpm_context[fp]
with open(file_name, 'r') as content_file:
file_content = content_file.read()
config_dict = {
'personalities': personalities,
'file_names': [file_name],
'file_content': file_content,
}
# TODO(jkung): update public key info
config_uuid = self._config_update_hosts(context, personalities)
rpcapi.iconfig_update_file(context,
iconfig_uuid=config_uuid,
iconfig_dict=config_dict)
rpcapi.apply_tpm_config(context,
tpm_context=tpm_context)
def update_tpm_config_manifests(self, context, delete_tpm_file=None):
"""Apply TPM related runtime manifest changes. """
LOG.info("update_tpm_config_manifests")
personalities = [constants.CONTROLLER]
config_uuid = self._config_update_hosts(context, personalities)
if delete_tpm_file:
# Delete the TPM file from the controllers
rpcapi = agent_rpcapi.AgentAPI()
command = ['rm', '-f', delete_tpm_file]
hosts = self.dbapi.ihost_get_by_personality(constants.CONTROLLER)
for host in hosts:
rpcapi.execute_command(context, host.uuid, command)
config_dict = {
"personalities": personalities,
"classes": ['platform::haproxy::runtime',
'openstack::horizon::runtime']
}
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
def _set_tpm_config_state(self,
ihost, response_dict):
"""Update tpm configuration state. """
try:
existing_tpmdevice = \
self.dbapi.tpmdevice_get_by_host(ihost.uuid)
if (len(existing_tpmdevice) > 1):
LOG.error("Multiple tpmdevice entries found for host %s" %
ihost.uuid)
return
elif not existing_tpmdevice:
LOG.debug("TPM Audit: No tpmdevice entry found while TPM "
"configuration exists.")
return
existing_tpmdevice = existing_tpmdevice[0]
except exception.NotFound:
# No TPM configuration. No need to update status
return
updated_state = None
if response_dict['is_configured']:
updated_state = constants.TPMCONFIG_APPLIED
else:
updated_state = constants.TPMCONFIG_FAILED
if (updated_state and updated_state != existing_tpmdevice.state):
self.dbapi.tpmdevice_update(existing_tpmdevice.uuid,
{'state': updated_state})
def tpm_config_update_by_host(self, context,
host_uuid, response_dict):
"""Get TPM configuration status from Agent host.
This method allows for alarms to be raised for hosts if TPM
is not configured properly.
:param context: an admin context
:param host_uuid: host unique id
:param response_dict: configuration status
:returns: pass or fail
"""
LOG.debug("Entering tpm_config_update_by_host %s %s" %
(host_uuid, response_dict))
host_uuid.strip()
try:
tpm_host = self.dbapi.ihost_get(host_uuid)
entity_instance_id = ("%s=%s" %
(fm_constants.FM_ENTITY_TYPE_HOST,
tpm_host.hostname))
alarm_id = fm_constants.FM_ALARM_ID_TPM_INIT
if response_dict['is_configured']:
tpmdevice = self.get_tpmdevice_by_host(context, host_uuid)
# apply config manifest for tpm create/update
if (tpmdevice and
tpmdevice['state'] ==
constants.TPMCONFIG_APPLYING):
self.update_tpm_config_manifests(context)
# update the system configuration state
self._set_tpm_config_state(tpm_host, response_dict)
# do a blind clear on any TPM alarm
# for this host.
self.fm_api.clear_fault(alarm_id,
entity_instance_id)
else:
# update the system configuration state
self._set_tpm_config_state(tpm_host, response_dict)
# set an alarm for this host and tell
# mtce to degrade this node
if not self.fm_api.get_fault(alarm_id, entity_instance_id):
fault = fm_api.Fault(
alarm_id=alarm_id,
alarm_state=fm_constants.FM_ALARM_STATE_SET,
entity_type_id=fm_constants.FM_ENTITY_TYPE_HOST,
entity_instance_id=entity_instance_id,
severity=fm_constants.FM_ALARM_SEVERITY_MAJOR,
reason_text="TPM configuration failed "
"or device not found.",
# equipment
alarm_type=fm_constants.FM_ALARM_TYPE_4,
# procedural-error
probable_cause=fm_constants.ALARM_PROBABLE_CAUSE_64,
proposed_repair_action="reinstall HTTPS certificate; "
"if problem persists",
service_affecting=False)
self.fm_api.set_fault(fault)
except Exception:
raise exception.SysinvException(_(
"Invalid host_uuid: %s") % host_uuid)
def tpm_device_update_by_host(self, context,
host_uuid, tpmdevice_dict):
"""Synchronously, have the conductor create or update
a tpmdevice per host.
:param context: request context.
:param host_uuid: uuid or id of the host
:param tpmdevice_dict: a dicitionary of tpm device attributes
:returns tpmdevice object
"""
try:
tpm_host = self.dbapi.ihost_get(host_uuid)
except exception.ServerNotFound:
LOG.error("Cannot find host by id %s" % host_uuid)
return
tpm_devices = self.dbapi.tpmdevice_get_by_host(tpm_host.id)
if tpm_devices:
tpmdevice = self.dbapi.tpmdevice_update(tpm_devices[0].uuid,
tpmdevice_dict)
# update table tpmconfig updated_at as its visible from tpmconfig-show
try:
tpm_obj = self.dbapi.tpmconfig_get_one()
updated_at = timeutils.utcnow()
self.dbapi.tpmconfig_update(tpm_obj.uuid,
{'updated_at': updated_at})
LOG.info("TPM config updated at: %s" % updated_at)
except exception.NotFound:
LOG.error("tpm_device_update_by_host tpmconfig NotFound")
else:
try:
# create new tpmdevice
tpmdevice_dict.update({'host_uuid': tpm_host['uuid']})
tpmdevice = self.dbapi.tpmdevice_create(tpm_host['id'],
tpmdevice_dict)
except Exception:
LOG.exception("Cannot create TPM device for host %s" % host_uuid)
return
return tpmdevice
def cinder_prepare_db_for_volume_restore(self, context):
"""
Send a request to cinder to remove all volume snapshots and set all
volumes to error state in preparation for restoring all volumes.
This is needed for cinder disk replacement.
"""
response = self._openstack.cinder_prepare_db_for_volume_restore(context)
return response
def get_software_upgrade_status(self, context):
"""
Software upgrade status is needed by ceph-manager to take ceph specific
upgrade actions
"""
upgrade = {
'from_version': None,
'to_version': None,
'state': None}
try:
row = self.dbapi.software_upgrade_get_one()
upgrade['from_version'] = row.from_release
upgrade['to_version'] = row.to_release
upgrade['state'] = row.state
except exception.NotFound:
# No upgrade in progress
pass
return upgrade
def distribute_ceph_external_config(self, context, ceph_conf_filename):
"""Notify agent to distribute Ceph configuration file for external
cluster.
"""
LOG.debug("ceph_conf_file: %s" % ceph_conf_filename)
# Retriving the ceph config file that is stored in the /opt/platform/config
# during the file upload stage.
opt_ceph_conf_file = os.path.join(tsc.PLATFORM_CEPH_CONF_PATH,
ceph_conf_filename)
if not os.path.exists(opt_ceph_conf_file):
raise exception.SysinvException(
_("Could not find the uploaded ceph config file %s in %s")
% (ceph_conf_filename, tsc.PLATFORM_CEPH_CONF_PATH))
try:
f = open(opt_ceph_conf_file, "r")
f.seek(0, os.SEEK_SET)
contents = f.read()
except IOError:
msg = _("Failed to read ceph config file from %s " %
tsc.PLATFORM_CEPH_CONF_PATH)
raise exception.SysinvException(msg)
ceph_conf_file = os.path.join(constants.CEPH_CONF_PATH,
ceph_conf_filename)
personalities = [constants.CONTROLLER, constants.WORKER]
config_uuid = self._config_update_hosts(context, personalities)
config_dict = {
'personalities': personalities,
'file_names': [ceph_conf_file],
'file_content': contents,
}
self._config_update_file(context, config_uuid, config_dict)
def store_ceph_external_config(self, context, contents, ceph_conf_filename):
"""Store the uploaded external ceph config file in /opt/platform/config
"""
# Once this directory is created at installation time, we can
# remove this code.
if not os.path.exists(tsc.PLATFORM_CEPH_CONF_PATH):
os.makedirs(tsc.PLATFORM_CEPH_CONF_PATH)
opt_ceph_conf_file = os.path.join(tsc.PLATFORM_CEPH_CONF_PATH,
ceph_conf_filename)
# Because user needs root permission to manually delete ceph config file
# from /opt/platform/config/version/ceph-config directory if the file
# | |
<reponame>landportal/data
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import json
def getUNM49code(label):
return {
'Africa': "002",
'Americas': "019",
'Asia': "142",
'Caribbean': "029",
'Central Africa': "017",
'Central America': "013",
'Central Asia': "143",
'CIS (Commonwealth of Independent States)': [], #['ARM', 'BLR', 'KAZ', 'KGZ', 'MDA', 'RUS', 'TJK', 'TKM', 'UKR', 'UZB'],
'Eastern Africa': "014",
'Eastern Asia': "030",
'Eastern Europe': "151",
'Europe': "150",
'European Union Countries': [],#["AUT", "BEL", "BGR", "CYP", "CZE", "DEU", "DNK", "ESP", "EST", "FIN", "FRA", "GBR", "GRC", "HRV", "HUN", "IRL", "ITA", "LTU", "LUX", "LVA", "MLT", "NLD", "POL", "PRT", "ROU", "SVK", "SVN", "SWE"],
'Landlocked Developing Countries': [],# ["AFG","ARM","AZE","BDI","BFA","BOL","BTN","BWA","CAF","ETH","KAZ","KGZ","LAO", "LSO","MDA","MKD","MLI","MNG","MWI","NER","NPL","PRY","RWA","SSD","SWZ","TCD","TJK","TKM","UGA","UZB","ZMB","ZWE"],
'Least Developed Countries': None,
'North America': "021",
'North Asia': ['RUS'],
'Northern Africa': "015",
'Northern Europe': "154",
'Oceania': "009",
'Small Island Developing States': [], #["ABW", "AIA", "ASM", "ATG", "BHR", "BHS", "BLZ", "BRB", "COK", "COM", "CPV", "CUB", "CUW", "DMA", "DOM", "FJI", "FSM", "GNB", "GRD", "GUM", "GUY", "HTI", "JAM", "KIR", "KNA", "LCA", "MDV", "MHL", "MNP", "MSR", "NCL", "NIU", "NRU", "PLW", "PNG", "PRI", "PYF", "SGP", "SLB", "STP", "SUR", "SXM", "SYC", "TLS", "TON", "TTO", "TUV", "VCT", "VGB", "VIR", "VUT", "WSM"],
'South America': "005",
'South-Eastern Asia': "035",
'Southern Africa': "018",
'Southern Asia': "034",
'Southern Europe': "039",
'Western Africa': "011",
'Western Asia': "145",
'Middle East': "145",
'Western Europe': "155",
}[label]
def get_publisher(label):
if "austlii.edu.au" in label:
label = "www.austlii.edu.au"
return {
"www.legis-palop.org" : u"Projecto Apoio ao Desenvolvimento dos Sistemas Judiciários",
"www.qbz.gov.al" : u"Government of Albania",
"www.qpz.gov.al" : u"Government of Albania",
"www.bopa.ad" : u"Government of Andorra",
"www.laws.gov.ag" : u"Government of Antigua and Barbuda",
"www.ris.bka.gv.at" : u"Austrian Federal Government",
"www.e-qanun.az" : u"Government of Azerbaijan",
"http://www.presidence.gov.bi/" : u"Government of Burundi",
"www.assemblee.bi" : u"Government of Burundi",
"www.just.fgov.be" : u"Government of Belgium",
"www.droit-afrique" : u"Droit-Afrique",
"www.legiburkina.bf" : u"Government of Burkina Faso",
"bdlaws.minlaw.gov.bd" : u"Government of Bangladesh",
"www.parliament.bg" : u"Government of Bulgaria",
"www.legalaffairs.gov.bh" : u"Government of Bahrain",
"www.bahamas.gov.bs" : u"Government of the Bahamas",
"laws.bahamas.gov.bs" : u"Government of the Bahamas",
"www.skupstinabd.ba" : u"Government of Bosnia and Herzegovina",
"http://president.gov.by" : u"Government of Belarus",
"pravo.by" : u"Government of Belarus",
"www.bermudalaws.bm" : u"Government of Bermuda",
"www.planalto.gov.br" : u"Government of Brazil",
"www.senado.gov.br" : u"Government of Brazil",
"www.caricomlaw.org" : u"Legal Services of the Caribbean Community",
"www.agc.gov.bn" : u"Government of the Sultanate of Brunei Darussalam",
"www.nab.gov.bt" : u"Royal Government of Bhutan",
"www.laws.gov.bw" : u"Government of Botswana",
"www.gc.ca" : u"Government of Canada",
"www.admin.ch" : u"Government of Switzerland",
"www.gov.cn" : u"Government of the People's Republic of China",
"www.chinaacc.com" : u"Government of the People's Republic of China",
"www.chinalawedu.com" : u"Government of the People's Republic of China",
"www.leganet.cd" : u"Partenariat pour le développement social",
"www.paclii.org" : u"Pacific Islands Legal Information Institute",
"www.imprenta.gov.co" : u"Government of Colombia",
"www.legis-palop.org" : u"Projecto Apoio ao Desenvolvimento dos Sistemas Judiciários",
"www.gaceta.go.cr" : u"Government of Costa Rica",
"www.imprentanacional.go.cr" : u"Government of Costa Rica",
"www.gacetaoficial.cu" : u"Government of Cuba",
"www.gaceteaoficial.cu" : u"Government of Cuba",
"www.gov.ky" : u"Government of the Cayman Islands",
"www.cylaw.org" : u"CyLaw",
"www.bgbl.de" : u"Government of Germany",
"www.retsinfo.dk" : u"Government of Denmark",
"www.retsinformation.dk" : u"Government of Denmark",
"www.suprema.gov.do" : u"Government of the Dominican Republic",
"www.joradp.dz" : u"Government of Algeria",
"www.registroficial.gob.ec" : u"Government of Ecuador",
"www.registroficial.gob.ecu" : u"Government of Ecuador",
"www.registroficial.gov.ec" : u"Government of Ecuador",
"www.tribunalconstitucional.gov.ec" : u"Government of Ecuador",
"www.boe.es" : u"Government of Spain",
"www.ethiopar.net" : u"Government of Ethiopia",
"www.finlex.fi" : u"FINLEX",
"www.fiji.gov.fj" : u"Government of Fiji",
"www.paclii.org" : u"Pacific Islands Legal Information Institute",
"www.legifrance.gouv.fr" : u"Government of France",
"www.opsi.gov.uk" : u"Government of the United Kingdom",
"www.scotland-legislation.hmso.gov.uk" : u"Government of the United Kingdom",
"www.epa.gov.gh" : u"Environmental Protection Agency Ghana",
"www.nawasa.gd" : u"Grenada National Water & Sewerage Authority",
"www.guamcourts.org" : u"Government of Guam",
"www.guamlegislature.com" : u"Government of Guam",
"legalaffairs.gov.gy" : u"Government of Guyana",
"www.legalaffairs.gov.gy" : u"Government of Guyana",
"www.hah.hr" : u"Government of Croatia",
"www.nn.hr" : u"Government of Croatia",
"http://njt.hu/" : u"Government of Hungary",
"www.magyarorszag.hu" : u"Government of Hungary",
"http://www.indolaw.org/" : u"Government of Indonesia",
"www.downtoearth-indonesia.org" : u"Government of Indonesia",
"www.indolaw.org" : u"Government of Indonesia",
"india.gov.in" : u"Government of India",
"www.commonlii.org" : u"Government of India",
"www.india.gov.in" : u"Government of India",
"www.agriculture.gov.ie" : u"Government of Ireland",
"www.bailii.org" : u"British and Irish Legal Information Institute",
"www.bailli.org" : u"British and Irish Legal Information Institute",
"www.irishstatutebook.ie" : u"Government of Ireland",
"www.oireachtas.ie" : u"Government of Ireland",
"http://rc.majlis.ir/fa" : u"Government of Ireland",
"http://www.dastour.ir/" : u"Government of Ireland",
"www.japarliament.gov.jm" : u"Government of Jamaica",
"www.jerseylaw.je" : u"Jersey Legal Information Board",
"www.japaneselawtranslation.go.jp" : u"Government of Japan",
"http://adilet.zan.kz" : u"Government of the Republic of Kazakhstan",
"http://www.government.kz" : u"Government of the Republic of Kazakhstan",
"zher.kz" : u"Government of the Republic of Kazakhstan",
"kenyalaw.org" : u"Kenya Law",
"www.kenyalaw.org" : u"Kenya Law",
"http://cbd.minjust.gov.kg" : u"Government of Kyrgyzstan",
"www.minjust.gov.kg" : u"Government of Kyrgyzstan",
"www.klri.re.kr" : u"Government of South Korea",
"http://erml.moe.gov.lb" : u"Government of Lebanon",
"http://jo.pcm.gov.lb" : u"Government of Lebanon",
"www.moe.gov.lb" : u"Government of Lebanon",
"fornis.net" : u"Forestry Research Network of Sub-Saharan Africa",
"legislature.gov.lr" : u"Government of Republic of Liberia",
"www.fda.gov.lr" : u"Forestry Development Authority (FDA) - Republic of Liberia",
"www.gesetze.li" : u"Government of Liechtenstein",
"www.commonlii.org" : u"Commonwealth Legal Information Institute",
"www.documents.gov.lk" : u"Government of Sri Lanka",
"www.legilux.lu" : u"Government of Luxembourg",
"http://www.sgg.gov.ma/Législation/BulletinsOfficiels.aspx" : u"Government of Morocco",
"www.sgg.gov.ma" : u"Government of Morocco",
"http://www.cnlegis.gov.mg/" : u"Government of Madagascar",
"www.assemblee-nationale.mg" : u"Government of Madagascar",
"dof.gob.mx/" : u"Government of Mexico",
"www.vanuatu.usp.ac.fj" : u"University of the South Pacific",
"http://mali.eregulations.org" : u"Government of Mali",
"http://www.journal-officiel.ml/1/56/fr/journal-officiel/le-journal-officiel.html" : u"Government of Mali",
"http://www.sgg-mali.ml/JO/2016/mali-jo-2016-16.pdf" : u"Government of Mali",
"www.journal-officiel.ml/1/56/fr/journal-officiel/le-journal-officiel.html" : u"Government of Mali",
"www.sgg-mali.ml" : u"Government of Mali",
"www.doi.gov.mt" : u"Government of Malta",
"www.mrt.gov.me" : u"Government of Montenegro",
"www.cnmileg.gov.mp" : u"Government of the Northern Mariana Islands",
"www.legis-palop.org" : u"Projecto Apoio ao Desenvolvimento dos Sistemas Judiciários",
"http://www.rimgerddes.org/" : u"Groupe d'Etudes et de Recherches sur la Démocratie et le Développement Economique et Social en Mauritanie",
"agc.gov.ms" : u"Government of Montserrat",
"montserrat.worldlegislation.com" : u"Government of Montserrat",
"attorneygeneral.govmu.org" : u"Government of Mauritius",
"mauritiusassembly.govmu.org" : u"Government of Mauritius",
"supremecourt.intnet.mu" : u"Government of Mauritius",
"www1.gov.mu" : u"Government of Mauritius",
"www.africanlawlibrary.net" : u"African Law Library",
"www.gov.mu" : u"Government of Mauritius",
"www.malawilii.org" : u"Malawi Legal Information Institute",
"www.cljlaw.com" : u"CLJ Legal Network Malaysia",
"www.lac.org.na" : u"Legal Assistance Center Namibia",
"www.parliament.gov.na" : u"Government of the Republic of Namibia",
"www.saflii.org" : u"South African Legal Information Institute",
"www.droit-afrique.com" : u"Droit-Afrique",
"www.aksgonline.com" : u"Government of Nigeria",
"www.oyohouseofassembly.org" : u"Government of Nigeria",
"www.placng.org" : u"Government of Nigeria",
"www.asamblea.gob.ni" : u"Government of Nicaragua",
"www.lagaceta.gob.ni" : u"Government of Nicaragua",
"www.vanuatu.usp.ac.fj" : u"University of the South Pacific",
"www.overheid.nl" : u"Government of the Netherlands",
"www.lovdata.no" : u"Government of Norway",
"www.moad.gov.np" : u"Government of Nepal",
"www.legislation.govt.nz" : u"Government of New Zealand",
"www.asamblea.gob.pa" : u"Government of Panama",
"www.gaceetaoficial.gob.pa" : u"Government of Panama",
"www.gacetaoficial.gob.pa" : u"Government of Panama",
"www.minjus.gob.pe" : u"Government of Peru",
"www.chanrobles.com" : u"<NAME>",
"www.denr.gov.ph" : u"Government of Philippines",
"www.gov.ph" : u"Government of Philippines",
"www.paclii.org" : u"Pacific Islands Legal Information Institute",
"www.pacli.org" : u"Pacific Islands Legal Information Institute",
"www.paclii.org" : u"Pacific Islands Legal Information Institute",
"www.lexjuris.com" : u"Government of Puerto Rico",
"www.dre.pt" : u"Government of Portugal",
"dzr.nnov.ru" : u"Government of Russia",
"http://7law.info" : u"Government of Russia",
"http://admlip.ru" : u"Government of Russia",
"http://arhangelsklaw.ru" : u"Government of Russia",
"http://docipedia.ru" : u"Government of Russia",
"http://docs.cntd.ru" : u"Government of Russia",
"http://info-ecology.ru" | |
<gh_stars>10-100
"""
The context object for evaluating script code. Most of the implementation
of TworldPy lives in the EvalPropContext module.
"""
import re
import random
import ast
import operator
import itertools
import tornado.gen
import bson
from bson.objectid import ObjectId
import motor
import twcommon.misc
from twcommon.excepts import MessageException, ErrorMessageException
from twcommon.excepts import SymbolError, ExecRunawayException, ExecSandboxException
from twcommon.excepts import ReturnException, LoopBodyException, BreakException, ContinueException
import two.task
# Options for evaluating a thingy -- what kind of thingy is it?
EVALTYPE_SYMBOL = 0 # name of a symbol to look up
EVALTYPE_RAW = 1 # raw value
EVALTYPE_CODE = 2 # string containing code
EVALTYPE_TEXT = 3 # string containing marked-up text
EVALTYPE_GENTEXT = 4 # string containing gentext code
# Bitmask capability flags
EVALCAP_RUN = 0x01 # do anything at all
EVALCAP_DATAMOD = 0x02 # cause data changes
EVALCAP_MOVE = 0x04 # move the player
EVALCAP_ALL = 0x07 # all of above
# Evaluation levels
LEVEL_EXECUTE = 5
LEVEL_DISPSPECIAL = 4
LEVEL_DISPLAY = 3
LEVEL_MESSAGE = 2
LEVEL_FLAT = 1
LEVEL_RAW = 0
# Regexp: Check whether a string starts with a vowel.
re_vowelstart = re.compile('^[aeiou]', re.IGNORECASE)
class EvalPropFrame:
"""One stack frame in the EvalPropContext. Note that depth starts at 1.
The locals map, if provided, is used "live" (not copied).
We add a stack frame for every function call, {code} invocation, and
{text} interpolation. Nested sub-contexts have their own stack
list, so we don't create a frame in that case, but the sub-context
parentdepth field will be one higher than our total depth.
"""
def __init__(self, depth, locals=None):
self.depth = depth
if locals is None:
self.locals = {}
else:
self.locals = locals
def __repr__(self):
return '<EvalPropFrame depth=%d>' % (self.depth,)
class EvalPropContext(object):
"""EvalPropContext is a context for evaluating one symbol, piece of code,
or piece of marked-up text, during a task.
("EvalPropContext" is a misnomer at this point. The item being evaluated
may not be a property.)
When setting up an EvalPropContext you must provide a LocContext, which
is the identity and location of the player who is the center of the
action. (Sorry about all the "context"s.) Or you can provide an existing
EvalPropContext to clone.
"""
# We'll push contexts on here as we nest them. (It is occasionally
# necessary to find the "current" context without a handy reference.)
context_stack = []
@staticmethod
def get_current_context():
if not EvalPropContext.context_stack:
raise Exception('get_current_context: no current context!')
return EvalPropContext.context_stack[-1]
# Used as a long-running counter in build_action_key.
link_code_counter = 0
@staticmethod
def build_action_key():
"""Return a random (hex digit) string which will never repeat.
Okay, it is vastly unlikely to repeat.
"""
EvalPropContext.link_code_counter = EvalPropContext.link_code_counter + 1
return str(EvalPropContext.link_code_counter) + hex(random.getrandbits(32))[2:]
def __init__(self, task, parent=None, loctx=None, parentdepth=0, forbid=None, level=LEVEL_MESSAGE):
"""Caller must provide either parent (an EvalPropContext) or
a loctx and parentdepth. If there is an effective parent context,
parentdepth should be ctx.parentdepth+ctx.depth+1. If not, leave
it as zero.
The forbid argument is a bitmask of EVALCAPs which this context
cannot do. The parent's restrictions are also inherited.
### A way to pass in argument bindings?
"""
self.task = task
self.app = self.task.app
assert (parent or loctx)
assert not (parent and loctx)
if parent is not None:
assert self.task == parent.task
self.parentdepth = parent.parentdepth + parent.depth + 1
self.loctx = parent.loctx
self.uid = parent.uid
self.caps = parent.caps
elif loctx is not None:
self.parentdepth = parentdepth
self.loctx = loctx
self.uid = loctx.uid
self.caps = EVALCAP_ALL
# What kind of evaluation is going on.
self.level = level
# Any caps modifications.
if forbid:
self.caps &= (~forbid)
if level < LEVEL_EXECUTE:
self.caps &= (~(EVALCAP_DATAMOD|EVALCAP_MOVE))
# Execution context state.
self.frame = None
self.frames = None
self.accum = None
self.cooked = False
self.textstate = RunOnNode
# Text generation state.
self.gentexting = False
self.genseed = None
self.gencount = None
self.genparams = None
# Accumulating the state dependencies and action keys for the
# client.
self.linktargets = None
self.dependencies = None
@property
def depth(self):
"""Shortcut implementation of ctx.depth.
"""
if self.frame:
assert len(self.frames) == self.frame.depth
else:
assert len(self.frames) == 0
return len(self.frames)
@depth.setter
def depth(self, val):
raise Exception('EvalPropContext.depth is immutable')
def updateacdepends(self, ctx):
"""Merge in the actions and dependencies from a subcontext.
"""
assert self.accum is not None, 'EvalPropContext.accum should not be None here'
if ctx.linktargets:
self.linktargets.update(ctx.linktargets)
if ctx.dependencies:
self.dependencies.update(ctx.dependencies)
@tornado.gen.coroutine
def eval(self, key, evaltype=EVALTYPE_SYMBOL, locals=None):
"""Look up and return a symbol, in this context. If EVALTYPE_TEXT,
the argument is treated as an already-looked-up {text} value
(a string with interpolations). If EVALTYPE_CODE, the argument
is treated as a snippet of {code}. If EVALTYPE_RAW, the argument
must be a dict object with a meaningful type field.
The locals (if provided) form the initial locals dict for any
invoked stack frame. These currently must be symbols beginning
with underscore. ###generalize for function {code} args?
The locals dict is used "live", not copied.
This is the top-level entry point to Doing Stuff in this context.
After the call, dependencies will contain the symbol (and any
others checked when putting together the result).
The result type depends on the level:
RAW: Python object direct from Mongo.
FLAT: A string. ({text} objects produce strings directly, without
interpolation or interpretation.)
MESSAGE: A string. ({text} objects produce strings from the flattened,
de-styled, de-linked description.)
DISPLAY: A string or, for {text} objects, a description.
DISPSPECIAL: A string; for {text}, a description; for other {}
objects, special client objects. (Used only for focus.)
EXECUTE: The returned type or, for {text} objects, a description.
(A description is an array of strings and tag-arrays, JSONable and
passable to the client.)
For the {text} case, this also accumulates a set of link-targets
which are found in links in the description. Dependencies are
also accumulated.
"""
self.task.tick()
if not (self.caps & EVALCAP_RUN):
raise Exception('EvalPropContext does not have permissions to do anything!')
# Initialize per-invocation fields.
self.accum = None
self.cooked = False
self.textstate = RunOnNode
self.linktargets = None
self.dependencies = set()
self.wasspecial = False
# These will be filled in if and when a gentext starts.
self.gentexting = False
self.genseed = None
self.gencount = None
self.genparams = None
# We start with no frames and a depth of zero. (When we add frames,
# the self.frame will always be the current stack frame, which is
# the last entry of self.frames.)
self.frame = None
self.frames = []
try:
EvalPropContext.context_stack.append(self)
res = yield self.evalobj(key, evaltype=evaltype, locals=locals)
finally:
assert (self.depth == 0) and (self.frame is None), 'EvalPropContext did not pop all the way!'
assert (EvalPropContext.context_stack[-1] is self), 'EvalPropContext.context_stack did not nest properly!'
EvalPropContext.context_stack.pop()
# At this point, if the value was a {text}, the accum will contain
# the desired description.
if (self.level == LEVEL_RAW):
return res
if (self.level == LEVEL_FLAT):
if twcommon.misc.is_typed_dict(res, 'text'):
res = res.get('text', '')
return str_or_null(res)
if (self.level == LEVEL_MESSAGE):
if self.accum:
# Skip all styles, links, etc. Just paste together strings.
return ''.join([ val for val in self.accum if type(val) is str ]) + str_or_null(res)
return str_or_null(res)
if (self.level == LEVEL_DISPLAY):
if self.accum:
if not (res is None or res == ''):
self.accum.append(str(res))
optimize_accum(self.accum)
return self.accum
return str_or_null(res)
if (self.level == LEVEL_DISPSPECIAL):
if self.wasspecial:
return res
if self.accum:
if not (res is None or res == ''):
self.accum.append(str(res))
optimize_accum(self.accum)
return self.accum
return str_or_null(res)
if (self.level == LEVEL_EXECUTE):
if self.accum:
if not (res is None or res == ''):
self.accum.append(str(res))
optimize_accum(self.accum)
return self.accum
return res
raise Exception('unrecognized eval level: %d' % (self.level,))
@tornado.gen.coroutine
def evalobj(self, key, evaltype=EVALTYPE_SYMBOL, symbol=None, locals=None):
"""Look up a symbol, adding it to the accumulated content. If the
result contains interpolated strings, this calls itself recursively.
For EVALTYPE_SYMBOL, the key is the symbol (and the symbol argument
is ignored). For other types, the symbol may be provided as handy
context.
Returns an object, or fills out a description array and returns that.
(The latter only at MESSAGE/DISPLAY/DISPSPECIAL/EXECUTE level.)
The top-level call to evalobj() may set up the description accumulator
and linktargets. Lower-level calls use the existing ones.
A call to here will increment the stack depth *if* it goes into a
code/text interpolation. For static | |
self.format == 1 or self.format == 2:
print(" format = %d" % (self.format))
gid = 1
for r in self.Range1:
names = []
for _ in range(r.nLeft + 1):
names.append(self.glyph_names[gid])
gid += 1
print(f" ({r.first}, {r.nLeft}): {', '.join(names)}")
elif self.format == 2:
print(" format = %d" % (self.format))
gid = 1
for r in self.Range2:
names = []
for _ in range(r.nLeft + 1):
names.append(self.glyph_names[gid])
gid += 1
print(f" ({r.first}, {r.nLeft}): {', '.join(names)}")
else:
raise
class CffCharsetsRange1(object):
def __init__(self, buf):
self.buf = self.parse(buf)
def parse(self, buf):
self.first, buf = ValUtil.ushortpop(buf)
self.nLeft, buf = ValUtil.ucharpop(buf)
return buf
def __str__(self):
return f"first={self.first} nLeft={self.nLeft}"
class CffCharsetsRange2(CffCharsetsRange1):
def __init__(self, buf):
super().__init__(buf)
def parse(self, buf):
self.first, buf = ValUtil.ushortpop(buf)
self.nLeft, buf = ValUtil.ushortpop(buf)
return buf
# 5176.CFF.pdf p.10
class CffDecorder(object):
@staticmethod
def decodeInteger(buf, b0 = None):
if b0 is None:
b0, buf = ValUtil.ucharpop(buf)
if 32 <= b0 <= 246:
return b0-139, buf
else:
b1, buf = ValUtil.ucharpop(buf)
if 247 <= b0 <= 250:
return (b0-247)*256+b1+108, buf
elif 251 <= b0 <= 254:
return -(b0-251)*256-b1-108, buf
else:
b2, buf = ValUtil.ucharpop(buf)
if b0 == 28:
return ValUtil.signed(b1)<<8|b2, buf
elif b0 == 29:
b3, buf = ValUtil.ucharpop(buf)
b4, buf = ValUtil.ucharpop(buf)
return ValUtil.signed(b1)<<24|b2<<16|b3<<8|b4, buf
else:
raise
# 5176.CFF.pdf 19 FDSelect (p.28)
class CffFDSelect(object):
def __init__(self, buf, nGlyphs):
self.nGlyphs = nGlyphs
self.buf = self.parse(buf)
def parse(self, buf):
format = ValUtil.uchar(buf)
if format == 0:
self.select = FDSelect0(buf, self.nGlyphs)
elif format == 3:
self.select = FDSelect3(buf)
else:
self.select = FDSelectDummy(buf)
def show(self):
self.select.show()
class FDSelectDummy(object):
def __init__(self, buf):
self.buf = self.parse(buf)
def parse(self, buf):
self.format, buf = ValUtil.ucharpop(buf)
return buf
def show(self):
print(" [FDSelect%d]" % (self.format))
print(" format = %d" % (self.format))
class FDSelect0(object):
def __init__(self, buf, nGlyphs):
self.nGlyphs = nGlyphs
self.buf = self.parse(buf)
def parse(self, buf):
self.format, buf = ValUtil.ucharpop(buf)
self.fds, buf = ValUtil.bytespop(buf, self.nGlyphs)
return buf
def show(self):
print(" [FDSelect0]")
print(" format = %d" % (self.format))
print(" fds = {0}".format(self.fds))
class FDSelect3(object):
def __init__(self, buf):
self.buf = self.parse(buf)
def parse(self, buf):
self.format, buf = ValUtil.ucharpop(buf)
self.nRanges, buf = ValUtil.ushortpop(buf)
self.Range3 = []
for i in range(self.nRanges):
ran3 = CffFontSelectRange3(buf)
self.Range3.append(ran3)
buf = ran3.buf
self.sentinel, buf = ValUtil.ushortpop(buf)
return buf
def show(self):
print(" [FDSelect3]")
print(" format = %d" % (self.format))
print(" nRanges = %d" % (self.nRanges))
print(" Range3 = {0}".format([(r.first, r.fd) for r in self.Range3]))
print(" sentinel = %d" % (self.sentinel))
class CffFontSelectRange3(object):
def __init__(self, buf):
self.buf = self.parse(buf)
def parse(self, buf):
self.first, buf = ValUtil.ushortpop(buf)
self.fd, buf = ValUtil.ucharpop(buf)
return buf
class FontDictIndex(CffINDEXData):
def __init__(self, buf):
self.fontDict = []
super(FontDictIndex, self).__init__(buf, "Font DICT INDEX")
def parse(self, buf):
buf = super(FontDictIndex, self).parse(buf)
self.fontDict = [CffDictData(data) for data in self.data]
return buf
def show(self, stringIndex = None):
super(FontDictIndex, self).show()
if self.count != 0:
for fontDict in self.fontDict:
print(" -----")
print(" [Font DICT]")
for op, args in fontDict.items():
if stringIndex is None:
print(" {0} = {1}".format(TopDictOp.to_s(op), args))
else:
if op == TopDictOp.FontName:
s = stringIndex.data[args[0] - StdStr.nStdStr] if args[0] >= StdStr.nStdStr else StdStr.to_s(args[0])
print(" {0} = {1} << {2} >>".format(TopDictOp.to_s(op), args, s))
else:
print(" {0} = {1}".format(TopDictOp.to_s(op), args))
# CFF
##################################################
# GPOS
# https://www.microsoft.com/typography/otspec/gpos.htm
# http://partners.adobe.com/public/developer/opentype/index_table_formats2.html
class GposTable(Table):
def __init__(self, buf, tag):
super(GposTable, self).__init__(buf, tag)
def parse(self, buf):
super(GposTable, self).parse(buf)
self.header = GposHeader(buf)
self.scriptList = ScriptList(buf[self.header.ScriptList:])
def show(self):
print("[Table(%s)]" % (self.tag))
self.header.show()
self.scriptList.show()
class GposHeader(object):
def __init__(self, buf):
self.buf = self.parse(buf)
def parse(self, buf):
self.Version, buf = OTData.Fixed(buf)
self.ScriptList, buf = OTData.Offset(buf)
self.FeatureList, buf = OTData.Offset(buf)
self.LookupList, buf = OTData.Offset(buf)
return buf
def show(self):
print(" [GposHeader]")
print(" Version = 0x%08x" % (self.Version))
print(" ScriptList = %d" % (self.ScriptList))
print(" FeatureList = %d" % (self.FeatureList))
print(" LookupList = %d" % (self.LookupList))
# https://www.microsoft.com/typography/otspec/chapter2.htm
# http://partners.adobe.com/public/developer/opentype/index_table_formats.html
class ScriptList(object):
def __init__(self, buf):
self.buf_head = buf
self.parse(buf)
def parse(self, buf):
self.ScriptCount, buf = ValUtil.ushortpop(buf)
self.ScriptRecord = []
for i in range(self.ScriptCount):
record = ScriptRecord(buf, self.buf_head)
buf = record.buf
self.ScriptRecord.append(record)
def show(self):
print(" [ScriptList]")
print(" ScriptCount = %d" % (self.ScriptCount))
for record in self.ScriptRecord:
record.show()
class ScriptRecord(object):
def __init__(self, buf, scriptListHead = None):
self.buf = self.parse(buf, scriptListHead)
def parse(self, buf, scriptListHead = None):
self.ScriptTag, buf = OTData.Tag(buf)
self.Script, buf = OTData.Offset(buf)
self.ScriptTable = None
if scriptListHead:
self.ScriptTable = ScriptTable(scriptListHead[self.Script:])
return buf
def show(self):
print(" [ScriptRecord]")
print(" ScriptTag = %s" % (self.ScriptTag))
print(" Script = %d" % (self.Script))
if self.ScriptTable:
self.ScriptTable.show()
class ScriptTable(object):
def __init__(self, buf):
self.buf_head = buf
self.parse(buf)
def parse(self, buf):
self.DefaultLangSys, buf = OTData.Offset(buf)
self.LangSysCount, buf = ValUtil.ushortpop(buf)
self.DefaultLangSysTable = None
if not OTData.isNullOffset(self.DefaultLangSys):
self.DefaultLangSysTable = LangSysTable(self.buf_head[self.DefaultLangSys:])
self.LangSysTable = []
for i in range(self.LangSysCount):
langSysTable = LangSysRecord(buf, self.buf_head)
buf = langSysTable.buf
self.LangSysTable.append(langSysTable)
return buf
def show(self):
print(" [ScriptTable]")
print(" DefaultLangSys = %s" % (self.DefaultLangSys))
print(" LangSysCount = %d" % (self.LangSysCount))
if self.DefaultLangSysTable:
print(" [DefaultLangSysTable]")
self.DefaultLangSysTable.show()
for langSysTable in self.LangSysTable:
langSysTable.show()
class LangSysRecord(object):
def __init__(self, buf, scriptTableHead = None):
self.buf = self.parse(buf, scriptTableHead)
def parse(self, buf, scriptTableHead):
self.LangSysTag, buf = OTData.Tag(buf)
self.LangSys, buf = OTData.Offset(buf)
self.LangSysTable = None
if scriptTableHead:
self.LangSysTable = LangSysTable(scriptTableHead[self.LangSys:])
return buf
def show(self):
print(" [LangSysRecord]")
print(" LangSysTag = %s" % (self.LangSysTag))
print(" LangSys = %d" % (self.LangSys))
if self.LangSysTable:
self.LangSysTable.show()
class LangSysTable(object):
def __init__(self, buf):
self.buf = self.parse(buf)
def parse(self, buf):
self.LookupOrder, buf = OTData.Offset(buf)
self.ReqFeatureIndex, buf = ValUtil.ushortpop(buf)
self.FeatureCount, buf = ValUtil.ushortpop(buf)
self.FeatureIndex = []
for i in range(self.FeatureCount):
index, buf = ValUtil.ushortpop(buf)
self.FeatureIndex.append(index)
return buf
def show(self):
print(" [LangSysTable]")
print(" LookupOrder = %d" % (self.LookupOrder))
print(" ReqFeatureIndex = 0x%04x" % (self.ReqFeatureIndex))
print(" FeatureCount = %d" % (self.FeatureCount))
if self.FeatureIndex:
print(" FeatureIndex = {0}".format(", ".join([str(index) for index in self.FeatureIndex])))
# GPOS
##################################################
# GSUB
class GsubTable(Table):
def __init__(self, buf, tag):
super(GsubTable, self).__init__(buf, tag)
def parse(self, buf):
super(GsubTable, self).parse(buf)
def show(self):
print("[Table(%s)]" % (self.tag))
print("%s" % (self.buf))
# GSUB
##################################################
## TTF has a loca table
class LocaTable(Table):
def __init__(self, buf, tag):
super(LocaTable, self).__init__(buf, tag)
def show(self):
print("[Table(%s)]" % (self.tag))
print(" abbreviated...")
def parse(self, buf):
super(LocaTable, self).parse(buf)
##################################################
## TTF has a glyp table
class GlypTable(Table):
def __init__(self, buf, tag):
super(GlypTable, self).__init__(buf, tag)
def show(self):
print("[Table(%s)]" % (self.tag))
print(" abbreviated...")
def parse(self, buf):
super(GlypTable, self).parse(buf)
##################################################
# Debug
class Pen(object):
def __init__(self):
pass
def init(self):
pass
def term(self):
pass
def moveto(self, x, y):
raise MyError("MUST implement")
def closepath(self):
raise MyError("MUST implement")
def lineto(self, x, y):
raise MyError("MUST implement")
def hlineto(self, x):
raise MyError("MUST implement")
def vlineto(self, y):
raise MyError("MUST implement")
def curveto(self, x1, y1, x2, y2, x, y):
raise MyError("MUST implement")
class SvgPen(Pen):
def __init__(self):
super(SvgPen, self).__init__()
self.pos = [0, 0]
self.path = ""
def init(self):
pass
def term(self):
self.path = self.path.rstrip()
print("<path d=\"{0}\" stroke=\"black\" stroke-width=\"1\">".format(self.path))
self.path = ""
def moveto(self, x, y):
self.path += "M {0} {1} ".format(x, y)
self.pos = [x, y]
def closepath(self):
if self.path:
self.path += "Z "
def lineto(self, x, y):
self.path += "L {0} {1} ".format(x, y)
self.pos = [x, y]
def hlineto(self, x):
self.path += "H {0} ".format(x)
self.pos[0] = x
def vlineto(self, y):
self.path += "V {0} ".format(y)
self.pos[1] = y
def curveto(self, x1, y1, x2, y2, x, y):
self.path += "C {0} {1} {2} {3} {4} {5} ".format(x1, y1, x2, y2, x, y)
self.pos = [x, y]
class PsPen(Pen):
def __init__(self):
super(PsPen, self).__init__()
self.pos = [0, 0]
def init(self):
print("% color -> black")
print("0 setgray")
print("")
def term(self):
print("fill")
print("")
print("showpage")
def moveto(self, x, y):
print("{0} {1} moveto".format(x, y))
self.pos = [x, y]
def closepath(self):
print("closepath")
print("")
def lineto(self, x, y):
print("{0} {1} lineto".format(x, y))
self.pos = [x, y]
def hlineto(self, x):
self.lineto(x, self.pos[1])
def vlineto(self, y):
self.lineto(self.pos[0], y)
def curveto(self, x1, y1, x2, y2, x, y):
print("{0} {1} {2} {3} {4} {5} curveto".format(x1, y1, x2, y2, x, y))
self.pos = [x, y]
# Debug
##################################################
## main class of otfparser
class OtfParser(object):
def __init__(self):
self.__header = None
self.__table_record = []
self.__table = []
self.numberOfHMetrics = 0
self.numOfLongVerMetrics = 0
self.numGlyphs = 0
def parse(self, file):
self.__parse(file)
def show(self):
self.__header.show()
print("--------------------")
i = 0
for tbl_record in self.__table_record:
tbl_record.show()
tbl = self.__table[i].show()
print("--------------------")
i += 1
def __parse(self, file):
# print("---> " + file)
with open(file, "rb") as infile:
bin_buf = infile.read(12)
self.__header = Header(bin_buf)
num_tables = self.__header.get_num_tables()
self.__table_record = self.__create_table_record(infile, num_tables)
for table_record in self.__table_record:
self.__create_table(table_record, infile)
def __create_table_record(self, infile, num_tables):
record = []
| |
"role":
log_data[
"message"
] = "ARN type not supported for generating resource policy changes."
log.debug(log_data)
return extended_request
resource_policy = {"Version": "2012-10-17", "Statement": []}
resource_policy_sha = sha256(
json.dumps(resource_policy, escape_forward_slashes=False).encode()
).hexdigest()
if not arn_parsed.get("resource_path") or not arn_parsed.get("service"):
return extended_request
primary_principal_resource_model = ResourceModel(
arn=principal_arn,
name=arn_parsed["resource_path"].split("/")[-1],
account_id=role_account_id,
resource_type=arn_parsed["service"],
)
auto_generated_resource_policy_changes = []
# Create resource policy stubs for current resources that are used
for policy_change in extended_request.changes.changes:
if policy_change.change_type == "inline_policy":
policy_change.resources = await get_resources_from_policy_change(
policy_change
)
for resource in policy_change.resources:
resource_account_id = await get_resource_account(resource.arn)
if (
resource_account_id != role_account_id
and resource.resource_type != "iam"
and resource.resource_type in supported_resource_policies
):
# Cross account
auto_generated_resource_policy_changes.append(
ResourcePolicyChangeModel(
arn=resource.arn,
policy=PolicyModel(
policy_document=resource_policy,
policy_sha256=resource_policy_sha,
),
change_type="resource_policy",
principal=extended_request.principal,
status=Status.not_applied,
source_change_id=policy_change.id,
id=str(uuid.uuid4()),
resources=[primary_principal_resource_model],
autogenerated=True,
)
)
elif (
resource_account_id != role_account_id
and resource.resource_type == "iam"
):
resource_added = False
for statement in policy_change.policy.policy_document.get(
"Statement", []
):
if resource.arn in statement.get("Resource"):
# check if action includes supported trust policy permissions
statement_actions = statement.get("Action", [])
statement_actions = (
statement_actions
if isinstance(statement_actions, list)
else [statement_actions]
)
for action in statement_actions:
if action in supported_trust_policy_permissions:
# Cross account sts policy
auto_generated_resource_policy_changes.append(
ResourcePolicyChangeModel(
arn=resource.arn,
policy=PolicyModel(
policy_document=resource_policy,
policy_sha256=resource_policy_sha,
),
change_type="sts_resource_policy",
principal=extended_request.principal,
status=Status.not_applied,
source_change_id=policy_change.id,
id=str(uuid.uuid4()),
resources=[
primary_principal_resource_model
],
autogenerated=True,
)
)
resource_added = True
break
if resource_added:
break
extended_request.changes.changes.extend(auto_generated_resource_policy_changes)
if len(auto_generated_resource_policy_changes) > 0:
extended_request.cross_account = True
log_data["message"] = "Finished generating resource policies"
log_data["request"] = extended_request.dict()
log.debug(log_data)
return extended_request
async def validate_inline_policy_change(
change: InlinePolicyChangeModel, user: str, role: ExtendedAwsPrincipalModel
):
log_data: dict = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"user": user,
"principal": change.principal.dict(),
"policy_name": change.policy_name,
"request": change.dict(),
"message": "Validating inline policy change",
}
log.debug(log_data)
if (
await invalid_characters_in_policy(change.policy.policy_document)
or await invalid_characters_in_policy(change.policy_name)
or await invalid_characters_in_policy(change.policy.version)
):
log_data["message"] = "Invalid characters were detected in the policy."
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
# Can't detach a new policy
if change.new and change.action == Action.detach:
log_data["message"] = "Can't detach an inline policy that is new."
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
seen_policy_name = False
for existing_policy in role.inline_policies:
# Check if a new policy is being created, ensure that we don't overwrite another policy with same name
if change.new and change.policy_name == existing_policy.get("PolicyName"):
log_data[
"message"
] = f"Inline Policy with the name {change.policy_name} already exists."
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
# Check if policy being updated is the same as existing policy.
if (
not change.new
and change.policy.policy_document == existing_policy.get("PolicyDocument")
and change.policy_name == existing_policy.get("PolicyName")
and change.action == Action.attach
):
log_data[
"message"
] = f"No changes were found between the updated and existing policy for policy {change.policy_name}."
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
if change.policy_name == existing_policy.get("PolicyName"):
seen_policy_name = True
# Trying to detach inline policy with name that isn't attached
if change.action == Action.detach and not seen_policy_name:
log_data[
"message"
] = f"An inline policy named '{seen_policy_name}' is not attached, so we cannot remove it"
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
if change.action == Action.attach and not seen_policy_name and not change.new:
log_data[
"message"
] = f"Inline policy {change.policy_name} not seen but request claims change is not new"
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
# TODO: check sha in the request (future feature)
# If here, then that means inline policy is validated
async def validate_permissions_boundary_change(
change: PermissionsBoundaryChangeModel, user: str, role: ExtendedAwsPrincipalModel
):
log_data: dict = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"user": user,
"principal": change.principal.dict(),
"request": change.dict(),
"message": "Validating permissions boundary change",
}
log.info(log_data)
policy_name = change.arn.split("/")[-1]
if await invalid_characters_in_policy(policy_name):
log_data["message"] = "Invalid characters were detected in the policy name."
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
if change.action == Action.attach:
if not role.permissions_boundary:
return
log_data["message"] = (
"A permissions boundary is already attached to this role. "
"Only one permission boundary can be attached to a role."
)
log.error(log_data)
raise InvalidRequestParameter(
"A permissions boundary is already attached to this role. "
"Only one permission boundary can be attached to a role."
)
elif change.action == Action.detach:
# check to make sure permissions boundary is actually attached to the role
if change.arn == role.permissions_boundary.get("PermissionsBoundaryArn"):
return
log_data[
"message"
] = "The Permissions Boundary you are trying to detach is not attached to this role."
log.error(log_data)
raise InvalidRequestParameter(
f"{change.arn} is not attached to this role as a permissions boundary"
)
async def validate_managed_policy_change(
change: ManagedPolicyChangeModel, user: str, role: ExtendedAwsPrincipalModel
):
log_data: dict = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"user": user,
"principal": change.principal.dict(),
"request": change.dict(),
"message": "Validating managed policy change",
}
log.info(log_data)
policy_name = change.arn.split("/")[-1]
if await invalid_characters_in_policy(policy_name):
log_data["message"] = "Invalid characters were detected in the policy name."
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
if change.action == Action.attach:
# check to make sure managed policy is not already attached
for existing_policy in role.managed_policies:
if change.arn == existing_policy.get("PolicyArn"):
log_data[
"message"
] = "Managed Policy with that ARN already attached to this role."
log.error(log_data)
raise InvalidRequestParameter(
f"{change.arn} already attached to this role"
)
elif change.action == Action.detach:
# check to make sure managed policy is actually attached to role
seen = False
for existing_policy in role.managed_policies:
if change.arn == existing_policy.get("PolicyArn"):
seen = True
break
if not seen:
log_data[
"message"
] = "The Managed Policy you are trying to detach is not attached to this role."
log.error(log_data)
raise InvalidRequestParameter(f"{change.arn} is not attached to this role")
# TODO: check policy name is same what ARN claims
async def validate_managed_policy_resource_change(
change: ManagedPolicyResourceChangeModel,
policy_name: str,
user: str,
managed_policy_resource: Dict,
):
log_data: dict = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"user": user,
"principal": change.principal.dict(),
"request": change.dict(),
"message": "Validating managed policy resource change",
}
log.info(log_data)
if await invalid_characters_in_policy(
policy_name
) or await invalid_characters_in_policy(change.policy.policy_document):
log_data[
"message"
] = "Invalid characters were detected in the policy name or document."
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
if change.new and managed_policy_resource:
# change is claiming to be a new policy, but it already exists in AWS
log_data["message"] = "Managed policy with that ARN already exists"
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
elif not change.new and not managed_policy_resource:
# change is claiming to update policy, but it doesn't exist in AWS
log_data["message"] = "Managed policy with that ARN doesn't exist"
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
if not change.new:
if change.policy.policy_document == managed_policy_resource:
log_data[
"message"
] = "No changes detected between current and proposed policy"
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
async def validate_resource_tag_change(
change: ResourceTagChangeModel, user: str, role: ExtendedAwsPrincipalModel
):
log_data: dict = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"user": user,
"principal": change.principal.dict(),
"request": change.dict(),
"role": role,
"message": "Validating resource tag change",
}
log.debug(log_data)
# TODO: Add validation here
return
async def validate_assume_role_policy_change(
change: AssumeRolePolicyChangeModel, user: str, role: ExtendedAwsPrincipalModel
):
log_data: dict = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"user": user,
"principal": change.principal.dict(),
"request": change.dict(),
"message": "Validating assume role policy change",
}
log.debug(log_data)
if await invalid_characters_in_policy(
change.policy.policy_document
) or await invalid_characters_in_policy(change.policy.version):
log_data["message"] = "Invalid characters were detected in the policy."
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
# Check if policy being updated is the same as existing policy.
if change.policy.policy_document == role.assume_role_policy_document:
log_data[
"message"
] = "No changes were found between the updated and existing assume role policy."
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
async def apply_changes_to_role(
extended_request: ExtendedRequestModel,
response: Union[RequestCreationResponse, PolicyRequestModificationResponseModel],
user: str,
specific_change_id: str = None,
) -> None:
"""
Applies changes based on the changes array in the request, in a best effort manner to a role
Caution: this method applies changes blindly... meaning it assumes before calling this method,
you have validated the changes being made are authorized.
:param extended_request: ExtendedRequestModel
:param user: Str - requester's email address
:param response: RequestCreationResponse
:param specific_change_id: if this function is being used to apply only one specific change
if not provided, all non-autogenerated, supported changes are applied
"""
log_data: dict = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"user": user,
"request": extended_request.dict(),
"message": "Applying request changes",
"specific_change_id": specific_change_id,
}
log.info(log_data)
arn_parsed = parse_arn(extended_request.principal.principal_arn)
# Principal ARN must be a role for this function
if arn_parsed["service"] != "iam" or arn_parsed["resource"] not in ["role", "user"]:
log_data[
"message"
] = "Resource not found, or ARN type not supported for inline/managed/assume role policy changes."
log.error(log_data)
response.errors += 1
response.action_results.append(
ActionResult(status="error", message=log_data["message"])
)
return
principal_name = arn_parsed["resource_path"].split("/")[-1]
account_id = await get_resource_account(extended_request.principal.principal_arn)
iam_client = await sync_to_async(boto3_cached_conn)(
"iam",
service_type="client",
account_number=account_id,
region=config.region,
assume_role=config.get("policies.role_name"),
session_name="principal-updater-v2-" + user,
retry_max_attempts=2,
sts_client_kwargs=dict(
region_name=config.region,
endpoint_url=f"https://sts.{config.region}.amazonaws.com",
),
client_kwargs=config.get("boto3.client_kwargs", {}),
)
for change in extended_request.changes.changes:
if change.status == Status.applied:
# This change has already been applied, this can happen in the future when we have a multi-change request
# that an | |
<filename>robotframework-ls/src/robotframework_ls/impl/keyword_argument_analysis.py
from robotframework_ls.impl.protocols import (
IKeywordArg,
IRobotToken,
)
from typing import Optional, List, Deque, Iterator, Dict, Union, Sequence
import itertools
from robocorp_ls_core.lsp import Error
from robocorp_ls_core.constants import Null, NULL
class _Match:
def __init__(
self, definition_arg: IKeywordArg, token_definition_id_to_index: Dict[int, int]
):
self._definition_arg = definition_arg
self._token_definition_id_to_index = token_definition_id_to_index
def get_active_parameter_in_definition(self):
return self._token_definition_id_to_index.get(id(self._definition_arg), -1)
class SkipAnalysisControlFlowException(Exception):
pass
class UsageInfoForKeywordArgumentAnalysis:
def __init__(self, node, token_to_report_missing_argument, argument_usage_index=-1):
self.node = node
self._token_to_report_missing_argument = token_to_report_missing_argument
self.argument_usage_index = argument_usage_index
def get_token_to_report_argument_missing(self):
return self._token_to_report_missing_argument
class KeywordArgumentAnalysis:
def __init__(self, keyword_args: Sequence[IKeywordArg]) -> None:
args = self._keyword_args = keyword_args
self.found_star_arg: Optional[IKeywordArg] = None
self.found_keyword_arg: Optional[IKeywordArg] = None
self._star_arg_index = -1
self._keyword_arg_index = -1
for i, arg in enumerate(args):
if arg.is_star_arg:
self.found_star_arg = arg
self._star_arg_index = i
elif arg.is_keyword_arg:
self.found_keyword_arg = arg
self._keyword_arg_index = i
def _compute_active_parameter_fallback(
self,
usage_info_argument_index: int,
usage_info_arg_tokens: Sequence[IRobotToken],
) -> int:
"""
If we didn't have an exact match (because the current call would
probably be inconsistent as the user may be typing it), provide a
fallback which works better is such situations.
"""
from robotframework_ls.impl.text_utilities import is_variable_text
from robotframework_ls.impl.text_utilities import normalize_robot_name
if usage_info_argument_index < 0:
return -1
if usage_info_argument_index >= len(usage_info_arg_tokens):
# This happens when the user is starting to type an argument.
return usage_info_argument_index
active_parameter: int = usage_info_argument_index
if self._keyword_arg_index == -1 and self._star_arg_index >= 0:
if active_parameter >= self._star_arg_index:
return self._star_arg_index
caller_arg_value = usage_info_arg_tokens[active_parameter].value
definition_keyword_args = self._keyword_args
# Now, it's also possible that we're dealing with an assign here... let's
# see if this is the case.
eq: int = caller_arg_value.find("=")
if eq != -1:
name = normalize_robot_name(caller_arg_value[:eq])
for i, keyword_arg in enumerate(definition_keyword_args):
arg_name = keyword_arg.original_arg
if is_variable_text(arg_name):
arg_name = arg_name[2:-1]
arg_name = normalize_robot_name(arg_name)
if name == arg_name:
active_parameter = i
break
else:
# We do NOT have a match (match keyword arg / star arg if present...)
if self._keyword_arg_index >= 0:
active_parameter = self._keyword_arg_index
elif self._star_arg_index >= 0:
active_parameter = self._star_arg_index
else:
# This is actually off (error in call).
active_parameter = -1
else:
saw_eq: bool = False
for arg in usage_info_arg_tokens[:active_parameter]:
saw_eq = "=" in arg.value
if saw_eq:
break
if saw_eq and self._keyword_arg_index >= 0:
return self._keyword_arg_index
# Ok, does not have an assign, let's inspect the original signature
# to detect where this should be put there (positional arg or
# stararg).
for i, definition_arg in enumerate(definition_keyword_args):
if i == active_parameter:
break
if definition_arg.is_star_arg:
active_parameter = i
break
if definition_arg.is_keyword_arg:
# This is actually off (error in call).
active_parameter = -1
break
return active_parameter
def _iter_args(self, tokens, argument_usage_index: int):
from robot.api import Token
for token in tokens:
if token.type == Token.ARGUMENT:
# In a Run Keyword Some Keyword Arguments
# We want to skip the `Some Keyword` (which is itself an argument).
if argument_usage_index > -1:
argument_usage_index -= 1
continue
if token.value.startswith("&{") or token.value.startswith("@{"):
# All bets are off in this case (it may match anything...)
raise SkipAnalysisControlFlowException()
yield token
def _collect_keyword_usage_errors_and_build_definition_map(
self,
usage_info: UsageInfoForKeywordArgumentAnalysis,
usage_token_id_to_definition_arg_match: Union[Dict[int, _Match], Null] = NULL,
collect_errors=True,
) -> Iterator[Error]:
try:
yield from self._collect_keyword_usage_errors_and_build_definition_map_raises_exc(
usage_info, usage_token_id_to_definition_arg_match, collect_errors
)
except SkipAnalysisControlFlowException:
pass
def _collect_keyword_usage_errors_and_build_definition_map_raises_exc(
self,
usage_info: UsageInfoForKeywordArgumentAnalysis,
usage_token_id_to_definition_arg_match: Union[Dict[int, _Match], Null] = NULL,
collect_errors=True,
) -> Iterator[Error]:
"""
In this function we build the contents of usage_token_id_to_definition_arg_match
and collect the errors (if collect_errors=True).
"""
from robotframework_ls.impl.ast_utils import create_error_from_node
from collections import deque
from robotframework_ls.impl.text_utilities import normalize_robot_name
from robotframework_ls.impl.text_utilities import is_variable_text
# Pre-requisite.
keyword_token = usage_info.get_token_to_report_argument_missing()
if not keyword_token:
return
# deque (initially with all args -- args we match are removed
# as we go forward).
definition_keyword_args_deque: Deque[IKeywordArg] = deque()
# id(arg) -> index in the definition.
token_definition_id_to_index: Dict[int, int] = {}
# Contains all names we can match -> the related keyword arg.
definition_keyword_name_to_arg: Dict[str, IKeywordArg] = {}
# The ones that are matched are filled as we go.
definition_arg_matched: Dict[IKeywordArg, bool] = {}
# Fill our basic structures.
for i, definition_arg in enumerate(self._keyword_args):
definition_keyword_args_deque.append(definition_arg)
token_definition_id_to_index[id(definition_arg)] = i
if definition_arg.is_star_arg:
# Skip not matched by name
continue
if definition_arg.is_keyword_arg:
# Skip not matched by name
continue
arg_name = definition_arg.arg_name
if is_variable_text(arg_name):
arg_name = arg_name[2:-1]
definition_keyword_name_to_arg[
normalize_robot_name(arg_name)
] = definition_arg
tokens_args_to_iterate = self._iter_args(
usage_info.node.tokens, usage_info.argument_usage_index
)
# Fill positional args
for token_arg in tokens_args_to_iterate:
if not definition_keyword_args_deque:
# No more arguments to consume...
# Add it back as it still wasn't consumed (this is an error).
tokens_args_to_iterate = itertools.chain(
iter([token_arg]), tokens_args_to_iterate
)
break
eq_index = token_arg.value.find("=")
if eq_index == -1:
matched_keyword_arg: IKeywordArg = (
definition_keyword_args_deque.popleft()
)
if matched_keyword_arg.is_star_arg:
# Add star arg back as we may keep on matching it.
definition_keyword_args_deque.appendleft(matched_keyword_arg)
usage_token_id_to_definition_arg_match[id(token_arg)] = _Match(
matched_keyword_arg, token_definition_id_to_index
)
continue
if matched_keyword_arg.is_keyword_arg:
if collect_errors:
error = create_error_from_node(
usage_info.node,
f"Unexpected positional argument: {token_arg.value}",
tokens=[token_arg],
)
yield error
# Add it (just because the user may be typing it...)
usage_token_id_to_definition_arg_match[id(token_arg)] = _Match(
matched_keyword_arg, token_definition_id_to_index
)
# Finish as it's inconsistent now.
return
definition_arg_matched[matched_keyword_arg] = True
usage_token_id_to_definition_arg_match[id(token_arg)] = _Match(
matched_keyword_arg, token_definition_id_to_index
)
else:
if self.found_keyword_arg is not None:
# Something with '=' always matches keyword args, even if
# no named args would be matched. Add it back to go to
# the part where we match arguments by name.
tokens_args_to_iterate = itertools.chain(
iter([token_arg]), tokens_args_to_iterate
)
break
name = normalize_robot_name(token_arg.value[:eq_index])
found_definition_arg = definition_keyword_name_to_arg.get(name, None)
if found_definition_arg is not None:
if definition_arg_matched.get(found_definition_arg):
error = create_error_from_node(
usage_info.node,
f"Multiple values for argument: {name}",
tokens=[token_arg],
)
yield error
return
# First with eq (named argument) that matched something. Add
# it back to go to the part where we match arguments by name.
tokens_args_to_iterate = itertools.chain(
iter([token_arg]), tokens_args_to_iterate
)
break
matched_keyword_arg = definition_keyword_args_deque[0]
usage_token_id_to_definition_arg_match[id(token_arg)] = _Match(
matched_keyword_arg, token_definition_id_to_index
)
if matched_keyword_arg.is_star_arg:
# Special-case, if the last thing we have is a star-arg, it'll
# consume everything (even equals) as long as no named arguments
# were matched first.
pass
else:
# Matched some argument (the '=' became a part of the value).
definition_arg_matched[matched_keyword_arg] = True
definition_keyword_args_deque.popleft()
if not definition_keyword_args_deque:
if collect_errors:
# If we have no more args to consume, everything else is an error.
for token_arg in tokens_args_to_iterate:
if collect_errors:
error = create_error_from_node(
usage_info.node,
f"Unexpected argument: {token_arg.value}",
tokens=[token_arg],
)
yield error
return
# Ok, from this point onwards we need to match only by name / stararg / keyword_token arg
# Now, consume all the ones given by name.
for token_arg in tokens_args_to_iterate:
eq_index = token_arg.value.find("=")
if eq_index >= 0:
name = normalize_robot_name(token_arg.value[:eq_index])
found_definition_arg = definition_keyword_name_to_arg.pop(name, None)
if not found_definition_arg:
if self.found_keyword_arg is not None:
usage_token_id_to_definition_arg_match[id(token_arg)] = _Match(
self.found_keyword_arg, token_definition_id_to_index
)
else:
if collect_errors:
error = create_error_from_node(
usage_info.node,
f"Unexpected named argument: {name}",
tokens=[token_arg],
)
yield error
else:
usage_token_id_to_definition_arg_match[id(token_arg)] = _Match(
found_definition_arg, token_definition_id_to_index
)
else:
if collect_errors:
error = create_error_from_node(
usage_info.node,
f"Positional argument not allowed after named arguments: {token_arg.value}",
tokens=[token_arg],
)
yield error
return
if collect_errors:
# To finish, give errors on unmatched arguments.
for (
definition_name,
definition_arg,
) in definition_keyword_name_to_arg.items():
if (
not definition_arg.is_default_value_set()
and not definition_arg_matched.get(definition_arg)
):
error = create_error_from_node(
usage_info.node,
f"Mandatory argument missing: {definition_name}",
tokens=[keyword_token],
)
yield error
# --------------------------------------------------------------- Public API
def compute_active_parameter(
self, usage_info: UsageInfoForKeywordArgumentAnalysis, lineno: int, col: int
) -> int:
token_to_report_argument_missing = (
usage_info.get_token_to_report_argument_missing()
)
if token_to_report_argument_missing.lineno - 1 > lineno or (
token_to_report_argument_missing.lineno - 1 == lineno
and token_to_report_argument_missing.end_col_offset >= col
):
return -1
from robot.api import Token
usage_info_argument_index: int = 0
# We need to find out the current arg/separator.
after_last_arg: List[Token] = []
usage_info_arg_tokens: List[Token] = []
for token in usage_info.node.tokens:
if token.type == Token.ARGUMENT:
usage_info_arg_tokens.append(token)
usage_info_argument_index += 1
del after_last_arg[:]
elif token.type in (Token.SEPARATOR, Token.EOL, Token.EOS):
after_last_arg.append(token)
else:
# Keyword name token
del after_last_arg[:]
if token.lineno - 1 == lineno:
if (token.end_col_offset - 1) >= col:
break
if token.type == Token.ARGUMENT:
usage_info_argument_index -= 1
elif after_last_arg:
# Check if we are in prev/next based on the number of spaces found
# up to the current cursor position.
# i.e.: in `Call arg ` we still need to have an usage_info_argument_index == 0
# i.e.: in `Call arg ` we need to have an usage_info_argument_index == 1
whitespaces_found | |
:meth:`pikepdf.Pdf.get_warnings()` to
retrieve warnings.
attempt_recovery: If True (default), attempt to recover
from PDF parsing errors.
inherit_page_attributes: If True (default), push attributes
set on a group of pages to individual pages
access_mode: If ``.default``, pikepdf will
decide how to access the file. Currently, it will always
selected stream access. To attempt memory mapping and fallback
to stream if memory mapping failed, use ``.mmap``. Use
``.mmap_only`` to require memory mapping or fail
(this is expected to only be useful for testing). Applications
should be prepared to handle the SIGBUS signal on POSIX in
the event that the file is successfully mapped but later goes
away.
allow_overwriting_input: If True, allows calling ``.save()``
to overwrite the input file. This is performed by loading the
entire input file into memory at open time; this will use more
memory and may recent performance especially when the opened
file will not be modified.
Raises:
pikepdf.PasswordError: If the password failed to open the
file.
pikepdf.PdfError: If for other reasons we could not open
the file.
TypeError: If the type of ``filename_or_stream`` is not
usable.
FileNotFoundError: If the file was not found.
Note:
When *filename_or_stream* is a stream and the stream is located on a
network, pikepdf assumes that the stream using buffering and read caches
to achieve reasonable performance. Streams that fetch data over a network
in response to every read or seek request, no matter how small, will
perform poorly. It may be easier to download a PDF from network to
temporary local storage (such as ``io.BytesIO``), manipulate it, and
then re-upload it.
.. versionchanged:: 3.0
Keyword arguments now mandatory for everything except the first
argument.
"""
if isinstance(filename_or_stream, bytes) and filename_or_stream.startswith(
b'%PDF-'
):
warn(
"It looks like you called with Pdf.open(data) with a bytes-like object "
"containing a PDF. This will probably fail because this function "
"expects a filename or opened file-like object. Instead, please use "
"Pdf.open(BytesIO(data))."
)
tmp_stream, original_filename = None, False
if allow_overwriting_input:
try:
Path(filename_or_stream)
except TypeError as error:
raise ValueError(
'"allow_overwriting_input=True" requires "open" first argument '
'to be a file path'
) from error
original_filename = Path(filename_or_stream)
with open(original_filename, 'rb') as pdf_file:
tmp_stream = BytesIO()
shutil.copyfileobj(pdf_file, tmp_stream)
pdf = Pdf._open(
tmp_stream or filename_or_stream,
password=password,
hex_password=<PASSWORD>,
ignore_xref_streams=ignore_xref_streams,
suppress_warnings=suppress_warnings,
attempt_recovery=attempt_recovery,
inherit_page_attributes=inherit_page_attributes,
access_mode=access_mode,
)
pdf._tmp_stream = tmp_stream
pdf._original_filename = original_filename
return pdf
@augments(_ObjectMapping)
class Extend_ObjectMapping:
def get(self, key, default=None) -> Object:
try:
return self[key]
except KeyError:
return default
def keys(self):
return KeysView(self)
def values(self):
return (v for _k, v in self.items())
def check_is_box(obj) -> None:
try:
if obj.is_rectangle:
return
except AttributeError:
pass
try:
pdfobj = Array(obj)
if pdfobj.is_rectangle:
return
except Exception as e:
raise ValueError("object is not a rectangle") from e
raise ValueError("object is not a rectangle")
@augments(Page)
class Extend_Page:
@property
def mediabox(self):
"This page's /MediaBox, in PDF units."
return self._get_mediabox(True)
@mediabox.setter
def mediabox(self, value):
check_is_box(value)
self.obj['/MediaBox'] = value
@property
def cropbox(self):
"""This page's effective /CropBox, in PDF units.
If the /CropBox is not defined, the /MediaBox is returned.
"""
return self._get_cropbox(True)
@cropbox.setter
def cropbox(self, value):
check_is_box(value)
self.obj['/CropBox'] = value
@property
def trimbox(self):
"""This page's effective /TrimBox, in PDF units.
If the /TrimBox is not defined, the /CropBox is returned (and if
/CropBox is not defined, /MediaBox is returned).
"""
return self._get_trimbox(True)
@trimbox.setter
def trimbox(self, value):
check_is_box(value)
self.obj['/TrimBox'] = value
@property
def images(self) -> _ObjectMapping:
"""Return all images associated with this page."""
return self._images
@property
def resources(self) -> Dictionary:
"""Return this page's resources dictionary."""
return self.obj['/Resources']
def add_resource(
self,
res: Object,
res_type: Name,
name: Optional[Name] = None,
*,
prefix: str = '',
replace_existing: bool = True,
) -> Name:
"""Adds a new resource to the page's Resources dictionary.
If the Resources dictionaries do not exist, they will be created.
Args:
self: The object to add to the resources dictionary.
res: The dictionary object to insert into the resources
dictionary.
res_type: Should be one of the following Resource dictionary types:
ExtGState, ColorSpace, Pattern, Shading, XObject, Font, Properties.
name: The name of the object. If omitted, a random name will be
generated with enough randomness to be globally unique.
prefix: A prefix for the name of the object. Allows conveniently
namespacing when using random names, e.g. prefix="Im" for images.
Mutually exclusive with name parameter.
replace_existing: If the name already exists in one of the resource
dictionaries, remove it.
Returns:
The name of the object.
Example:
>>> resource_name = pdf.pages[0].add_resource(formxobj, Name.XObject)
.. versionadded:: 2.3
.. versionchanged:: 2.14
If *res* does not belong to the same `Pdf` that owns this page,
a copy of *res* is automatically created and added instead. In previous
versions, it was necessary to change for this case manually.
"""
if Name.Resources not in self.obj:
self.obj.Resources = Dictionary()
elif not isinstance(self.obj.Resources, Dictionary):
raise TypeError("Page /Resources exists but is not a dictionary")
resources = self.obj.Resources
if res_type not in resources:
resources[res_type] = Dictionary()
if name is not None and prefix:
raise ValueError("Must specify one of name= or prefix=")
if name is None:
name = Name.random(prefix=prefix)
for res_dict in resources.as_dict().values():
if not isinstance(res_dict, Dictionary):
continue
if name in res_dict:
if replace_existing:
del res_dict[name]
else:
raise ValueError(f"Name {name} already exists in page /Resources")
resources[res_type][name] = res.with_same_owner_as(self.obj)
return name
def _over_underlay(
self, other, rect: Optional[Rectangle], under: bool = True
) -> None:
formx = None
if isinstance(other, Page):
page = other
formx = other.as_form_xobject()
elif isinstance(other, Dictionary) and other.get(Name.Type) == Name.Page:
page = Page(other)
formx = page.as_form_xobject()
elif (
isinstance(other, Stream)
and other.get(Name.Type) == Name.XObject
and other.get(Name.Subtype) == Name.Form
):
formx = other
if formx is None:
raise TypeError("other object is not something we can convert to FormX")
if rect is None:
rect = Rectangle(page.trimbox)
formx_placed_name = self.add_resource(formx, Name.XObject)
cs = self.calc_form_xobject_placement(formx, formx_placed_name, rect)
self.contents_add(cs, prepend=under)
def add_overlay(self, other: Union[Object, Page], rect: Optional[Rectangle] = None):
"""Overlay another object on this page.
Overlays will be drawn after all previous content, potentially drawing on top
of existing content.
Args:
other: A Page or Form XObject to render as an overlay on top of this
page.
rect: The PDF rectangle (in PDF units) in which to draw the overlay.
If omitted, this page's trimbox, cropbox or mediabox will be used.
.. versionadded:: 2.14
"""
return self._over_underlay(other, rect, under=False)
def add_underlay(
self, other: Union[Object, Page], rect: Optional[Rectangle] = None
):
"""Underlay another object beneath this page.
Underlays will be drawn before all other content, so they may be overdrawn
partially or completely.
Args:
other: A Page or Form XObject to render as an underlay underneath this
page.
rect: The PDF rectangle (in PDF units) in which to draw the underlay.
If omitted, this page's MediaBox will be used.
.. versionadded:: 2.14
"""
return self._over_underlay(other, rect, under=True)
def contents_add(self, contents: Union[Stream, bytes], *, prepend: bool = False):
"""Append or prepend to an existing page's content stream.
Args:
contents: An existing content stream to append or prepend.
prepend: Prepend if true, append if false (default).
.. versionadded:: 2.14
"""
return self._contents_add(contents, prepend=prepend)
def __getattr__(self, name):
return getattr(self.obj, name)
@augment_override_cpp
def __setattr__(self, name, value):
if hasattr(self.__class__, name):
return object.__setattr__(self, name, value)
setattr(self.obj, name, value)
@augment_override_cpp
def __delattr__(self, name):
if hasattr(self.__class__, name):
return object.__delattr__(self, name)
delattr(self.obj, name)
def __getitem__(self, key):
return self.obj[key]
def __setitem__(self, key, value):
self.obj[key] = value
def __delitem__(self, key):
del self.obj[key]
def __contains__(self, key):
return key in self.obj
def __eq__(self, other):
return self.obj == other.obj
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def emplace(self, other: Page, retain=(Name.Parent,)):
return self.obj.emplace(other.obj, retain=retain)
def __repr__(self):
return (
repr(self.obj)
.replace('Dictionary', 'Page', 1)
.replace('(Type="/Page")', '', 1)
)
def _repr_mimebundle_(self, include=None, exclude=None):
data = {}
bundle = {'application/pdf', 'image/png'}
if include:
bundle = {k for k in bundle if k in include}
if exclude:
bundle = {k for k in bundle if k not in exclude}
pagedata = _single_page_pdf(self.obj)
if 'application/pdf' in bundle:
data['application/pdf'] = pagedata
if 'image/png' in bundle:
try:
data['image/png'] = _mudraw(pagedata, 'png')
except (FileNotFoundError, RuntimeError):
pass
return | |
assert class_mock is not user_mock
assert_equal("instance", user.method())
assert_equal("class", user2.method())
def test_should_call_with_class_default_attributes(self):
"""Flexmock should not allow mocking class default attributes like
__call__ on an instance.
"""
class WithCall:
def __call__(self, a):
return a
instance = WithCall()
with assert_raises(
FlexmockError,
re.compile(r".+<locals>\.WithCall object at 0x.+> does not have attribute '__call__'"),
):
flexmock(instance).should_call("__call__")
def test_should_call_on_class_mock(self):
class User:
def __init__(self):
self.value = "value"
def foo(self):
return "class"
def bar(self):
return self.value
# Access class-level method
user1 = User()
user2 = User()
flexmock(User).should_call("foo").once()
with assert_raises(
MethodCallError, "foo() expected to be called exactly 1 time, called 0 times"
):
self._tear_down()
flexmock(User).should_call("foo").twice()
assert_equal("class", user1.foo())
assert_equal("class", user2.foo())
# Access instance attributes
flexmock(User).should_call("bar").once()
with assert_raises(
MethodCallError, "bar() expected to be called exactly 1 time, called 0 times"
):
self._tear_down()
flexmock(User).should_call("bar").twice()
assert_equal("value", user1.bar())
assert_equal("value", user2.bar())
# Try resetting the expectation
flexmock(User).should_call("bar").once()
assert_equal("value", user1.bar())
def test_mock_proxied_class(self):
# pylint: disable=not-callable
SomeClassProxy = Proxy(SomeClass)
flexmock(SomeClassProxy).should_receive("class_method").and_return(2).twice()
assert SomeClassProxy().class_method() == 2
assert SomeClassProxy.class_method() == 2
flexmock(SomeClassProxy).should_receive("static_method").and_return(3).twice()
assert SomeClassProxy().static_method() == 3
assert SomeClassProxy.static_method() == 3
instance = SomeClassProxy()
flexmock(instance).should_receive("instance_method").and_return(4).once()
assert instance.instance_method() == 4
def test_mock_proxied_class_with_args(self):
# pylint: disable=not-callable
SomeClassProxy = Proxy(SomeClass)
flexmock(SomeClassProxy).should_receive("class_method_with_args").with_args("a").and_return(
2
).twice()
assert SomeClassProxy().class_method_with_args("a") == 2
assert SomeClassProxy.class_method_with_args("a") == 2
flexmock(SomeClassProxy).should_receive("static_method_with_args").with_args(
"b"
).and_return(3).twice()
assert SomeClassProxy().static_method_with_args("b") == 3
assert SomeClassProxy.static_method_with_args("b") == 3
instance = SomeClassProxy()
flexmock(instance).should_receive("instance_method_with_args").with_args("c").and_return(
4
).once()
assert instance.instance_method_with_args("c") == 4
def test_spy_proxied_class(self):
# pylint: disable=not-callable
SomeClassProxy = Proxy(SomeClass)
flexmock(SomeClassProxy).should_call("class_method").and_return("class_method").twice()
assert SomeClassProxy().class_method() == "class_method"
assert SomeClassProxy.class_method() == "class_method"
flexmock(SomeClassProxy).should_call("static_method").and_return("static_method").twice()
assert SomeClassProxy().static_method() == "static_method"
assert SomeClassProxy.static_method() == "static_method"
instance = SomeClassProxy()
flexmock(instance).should_call("instance_method").and_return("instance_method").once()
assert instance.instance_method() == "instance_method"
def test_spy_proxied_class_with_args(self):
# pylint: disable=not-callable
SomeClassProxy = Proxy(SomeClass)
flexmock(SomeClassProxy).should_call("class_method_with_args").with_args("a").and_return(
"a"
).twice()
assert SomeClassProxy().class_method_with_args("a") == "a"
assert SomeClassProxy.class_method_with_args("a") == "a"
flexmock(SomeClassProxy).should_call("static_method_with_args").with_args("b").and_return(
"b"
).twice()
assert SomeClassProxy().static_method_with_args("b") == "b"
assert SomeClassProxy.static_method_with_args("b") == "b"
instance = SomeClassProxy()
flexmock(instance).should_call("instance_method_with_args").with_args("c").and_return(
"c"
).once()
assert instance.instance_method_with_args("c") == "c"
def test_mock_proxied_derived_class(self):
# pylint: disable=not-callable
DerivedClassProxy = Proxy(DerivedClass)
flexmock(DerivedClassProxy).should_receive("class_method").and_return(2).twice()
assert DerivedClassProxy().class_method() == 2
assert DerivedClassProxy.class_method() == 2
flexmock(DerivedClassProxy).should_receive("static_method").and_return(3).twice()
assert DerivedClassProxy().static_method() == 3
assert DerivedClassProxy.static_method() == 3
instance = DerivedClassProxy()
flexmock(instance).should_receive("instance_method").and_return(4).once()
assert instance.instance_method() == 4
def test_mock_proxied_module_function(self):
# pylint: disable=not-callable
some_module_proxy = Proxy(some_module)
flexmock(some_module_proxy).should_receive("module_function").and_return(3).once()
assert some_module_proxy.module_function() == 3
def test_spy_proxied_module_function(self):
# pylint: disable=not-callable
some_module_proxy = Proxy(some_module)
flexmock(some_module_proxy).should_receive("module_function").and_return(0).once()
assert some_module_proxy.module_function(2, 2) == 0
def test_mock_proxied_derived_class_with_args(self):
# pylint: disable=not-callable
DerivedClassProxy = Proxy(DerivedClass)
flexmock(DerivedClassProxy).should_receive("class_method_with_args").with_args(
"a"
).and_return(2).twice()
assert DerivedClassProxy().class_method_with_args("a") == 2
assert DerivedClassProxy.class_method_with_args("a") == 2
flexmock(DerivedClassProxy).should_receive("static_method_with_args").with_args(
"b"
).and_return(3).twice()
assert DerivedClassProxy().static_method_with_args("b") == 3
assert DerivedClassProxy.static_method_with_args("b") == 3
instance = DerivedClassProxy()
flexmock(instance).should_receive("instance_method_with_args").with_args("c").and_return(
4
).once()
assert instance.instance_method_with_args("c") == 4
def test_spy_proxied_derived_class(self):
# pylint: disable=not-callable
DerivedClassProxy = Proxy(DerivedClass)
flexmock(DerivedClassProxy).should_call("class_method").and_return("class_method").twice()
assert DerivedClassProxy().class_method() == "class_method"
assert DerivedClassProxy.class_method() == "class_method"
flexmock(DerivedClassProxy).should_call("static_method").and_return("static_method").twice()
assert DerivedClassProxy().static_method() == "static_method"
assert DerivedClassProxy.static_method() == "static_method"
instance = DerivedClassProxy()
flexmock(instance).should_call("instance_method").and_return("instance_method").once()
assert instance.instance_method() == "instance_method"
def test_spy_proxied_derived_class_with_args(self):
# pylint: disable=not-callable
DerivedClassProxy = Proxy(DerivedClass)
flexmock(DerivedClassProxy).should_call("class_method_with_args").with_args("a").and_return(
"a"
).twice()
assert DerivedClassProxy().class_method_with_args("a") == "a"
assert DerivedClassProxy.class_method_with_args("a") == "a"
flexmock(DerivedClassProxy).should_call("static_method_with_args").with_args(
"b"
).and_return("b").twice()
assert DerivedClassProxy().static_method_with_args("b") == "b"
assert DerivedClassProxy.static_method_with_args("b") == "b"
instance = DerivedClassProxy()
flexmock(instance).should_call("instance_method_with_args").with_args("c").and_return(
"c"
).once()
assert instance.instance_method_with_args("c") == "c"
def test_with_args_with_instance_method(self):
flexmock(SomeClass).should_receive("instance_method_with_args").with_args("red").once()
flexmock(SomeClass).should_receive("instance_method_with_args").with_args("blue").once()
instance = SomeClass()
instance.instance_method_with_args("red")
instance.instance_method_with_args("blue")
def test_with_args_with_class_method(self):
flexmock(SomeClass).should_receive("class_method_with_args").with_args("red").once()
flexmock(SomeClass).should_receive("class_method_with_args").with_args("blue").once()
SomeClass.class_method_with_args("red")
SomeClass.class_method_with_args("blue")
def test_with_args_with_static_method(self):
flexmock(SomeClass).should_receive("static_method_with_args").with_args("red").once()
flexmock(SomeClass).should_receive("static_method_with_args").with_args("blue").once()
SomeClass.static_method_with_args("red")
SomeClass.static_method_with_args("blue")
def test_mock_class_method_on_derived_class(self):
flexmock(DerivedClass).should_receive("class_method").and_return(2).twice()
assert DerivedClass().class_method() == 2
assert DerivedClass.class_method() == 2
def test_mock_class_method_on_derived_class_after_mocking_base_class(self):
flexmock(SomeClass).should_receive("class_method").and_return(1).once()
assert SomeClass.class_method() == 1
flexmock(DerivedClass).should_receive("class_method").and_return(2).twice()
assert DerivedClass().class_method() == 2
assert DerivedClass.class_method() == 2
def test_mock_static_method_on_derived_class(self):
flexmock(DerivedClass).should_receive("static_method").and_return(4).twice()
assert DerivedClass().static_method() == 4
assert DerivedClass.static_method() == 4
def test_mock_static_method_on_derived_class_after_mocking_base_class(self):
flexmock(SomeClass).should_receive("static_method").and_return(3).once()
assert SomeClass.static_method() == 3
flexmock(DerivedClass).should_receive("static_method").and_return(4).twice()
assert DerivedClass().static_method() == 4
assert DerivedClass.static_method() == 4
def test_mock_class_method_with_args_on_derived_class(self):
flexmock(DerivedClass).should_receive("class_method_with_args").with_args(2).and_return(
3
).twice()
assert DerivedClass().class_method_with_args(2) == 3
assert DerivedClass.class_method_with_args(2) == 3
def test_mock_class_method_with_args_on_derived_class_after_mocking_base_class(self):
flexmock(SomeClass).should_receive("class_method_with_args").with_args(1).and_return(
2
).once()
assert SomeClass.class_method_with_args(1) == 2
flexmock(DerivedClass).should_receive("class_method_with_args").with_args(2).and_return(
3
).twice()
assert DerivedClass().class_method_with_args(2) == 3
assert DerivedClass.class_method_with_args(2) == 3
def test_mock_static_method_with_args_on_derived_class(self):
flexmock(DerivedClass).should_receive("static_method_with_args").with_args(4).and_return(
5
).twice()
assert DerivedClass().static_method_with_args(4) == 5
assert DerivedClass.static_method_with_args(4) == 5
def test_mock_static_method_with_args_on_derived_class_after_mocking_base_class(self):
flexmock(SomeClass).should_receive("static_method_with_args").with_args(2).and_return(
3
).once()
assert SomeClass.static_method_with_args(2) == 3
flexmock(DerivedClass).should_receive("static_method_with_args").with_args(4).and_return(
5
).twice()
assert DerivedClass().static_method_with_args(4) == 5
assert DerivedClass.static_method_with_args(4) == 5
def test_spy_class_method_on_derived_class(self):
flexmock(DerivedClass).should_call("class_method").and_return("class_method").twice()
assert DerivedClass().class_method() == "class_method"
assert DerivedClass.class_method() == "class_method"
def test_spy_class_method_on_derived_class_after_spying_base_class(self):
flexmock(SomeClass).should_call("class_method").and_return("class_method").times(
3
) # TODO: Should be once #80
assert SomeClass.class_method() == "class_method"
flexmock(DerivedClass).should_call("class_method").and_return("class_method").twice()
assert DerivedClass().class_method() == "class_method"
assert DerivedClass.class_method() == "class_method"
def test_spy_static_method_on_derived_class(self):
flexmock(DerivedClass).should_call("static_method").and_return("static_method").twice()
assert DerivedClass().static_method() == "static_method"
assert DerivedClass.static_method() == "static_method"
def test_spy_static_method_on_derived_class_after_spying_base_class(self):
flexmock(SomeClass).should_call("static_method").and_return("static_method").times(
3
) # TODO: Should be once #80
assert SomeClass.static_method() == "static_method"
flexmock(DerivedClass).should_call("static_method").and_return("static_method").twice()
assert DerivedClass().static_method() == "static_method"
assert DerivedClass.static_method() == "static_method"
def test_spy_class_method_with_args_on_derived_class(self):
flexmock(DerivedClass).should_call("class_method_with_args").with_args(2).and_return(2)
assert DerivedClass().class_method_with_args(2) == 2
assert DerivedClass.class_method_with_args(2) == 2
@assert_raises(MethodSignatureError, match=None) # TODO: Should not raise exception #79
def test_spy_class_method_with_args_on_derived_class_after_spying_base_class(self):
flexmock(SomeClass).should_call("class_method_with_args").with_args(1).and_return(1)
assert SomeClass.class_method_with_args(1) == 1
flexmock(DerivedClass).should_call("class_method_with_args").with_args(2).and_return(2)
assert DerivedClass().class_method_with_args(2) == 2
assert DerivedClass.class_method_with_args(2) == 2
def test_spy_static_method_with_args_on_derived_class(self):
flexmock(DerivedClass).should_call("static_method_with_args").with_args(4).and_return(
4
).twice()
assert DerivedClass().static_method_with_args(4) == 4
assert DerivedClass.static_method_with_args(4) == 4
@assert_raises(MethodSignatureError, match=None) # TODO: Should not raise exception #79
def test_spy_static_method_with_args_on_derived_class_after_spying_base_class(self):
flexmock(SomeClass).should_call("static_method_with_args").with_args(2).and_return(2).once()
assert SomeClass.static_method_with_args(2) == 2
flexmock(DerivedClass).should_call("static_method_with_args").with_args(4).and_return(
4
).once() # should be twice
assert DerivedClass().static_method_with_args(4) == 4
assert DerivedClass.static_method_with_args(4) == 4
def test_flexmock_should_not_blow_up_on_should_call_for_class_methods(self):
class User:
@classmethod
def foo(cls):
return "class"
flexmock(User).should_call("foo")
assert_equal("class", User.foo())
def test_flexmock_should_not_blow_up_on_should_call_for_static_methods(self):
class User:
@staticmethod
def foo():
return "static"
flexmock(User).should_call("foo")
assert_equal("static", User.foo())
def test_flexmock_should_mock_new_instances_with_multiple_params(self):
class User:
pass
class Group:
def __init__(self, arg, arg2):
pass
user = User()
flexmock(Group).new_instances(user)
assert user is Group(1, 2)
def test_flexmock_should_revert_new_instances_on_teardown(self):
class User:
pass
class Group:
pass
user = User()
group = Group()
flexmock(Group).new_instances(user)
assert user is Group()
self._tear_down()
assert_equal(group.__class__, Group().__class__)
def test_flexmock_should_cleanup_added_methods_and_attributes(self):
class Group:
pass
group = Group()
flexmock(Group)
assert "should_receive" in Group.__dict__
assert "should_receive" not in group.__dict__
flexmock(group)
assert "should_receive" in group.__dict__
self._tear_down()
for method in UPDATED_ATTRS:
assert method not in Group.__dict__
assert method not in group.__dict__
def test_class_attributes_are_unchanged_after_mocking(self):
class Base:
@classmethod
def class_method(cls):
pass
@staticmethod
def static_method():
pass
def instance_method(self):
pass
class Child(Base):
pass
instance = Base()
base_attrs = list(vars(Base).keys())
instance_attrs = list(vars(instance).keys())
child_attrs = list(vars(Child).keys())
flexmock(Base).should_receive("class_method").once()
flexmock(Base).should_receive("static_method").once()
Base.class_method()
Base.static_method()
flexmock(instance).should_receive("class_method").once()
flexmock(instance).should_receive("static_method").once()
flexmock(instance).should_receive("instance_method").once()
instance.class_method()
instance.static_method()
instance.instance_method()
flexmock(Child).should_receive("class_method").once()
flexmock(Child).should_receive("static_method").once()
Child.class_method()
Child.static_method()
self._tear_down()
assert base_attrs == list(vars(Base).keys())
assert instance_attrs == list(vars(instance).keys())
assert child_attrs == list(vars(Child).keys())
def test_class_attributes_are_unchanged_after_spying(self):
class Base:
@classmethod
def class_method(cls):
pass
@staticmethod
def static_method():
pass
def instance_method(self):
pass
class Child(Base):
pass
instance = Base()
base_attrs = list(vars(Base).keys())
instance_attrs = list(vars(instance).keys())
child_attrs = list(vars(Child).keys())
flexmock(Base).should_call("class_method").times(3) # TODO: should be once #80
flexmock(Base).should_call("static_method").times(3) # TODO: should be once #80
Base.class_method()
Base.static_method()
flexmock(instance).should_call("class_method").once()
flexmock(instance).should_call("static_method").once()
flexmock(instance).should_call("instance_method").once()
instance.class_method()
instance.static_method()
instance.instance_method()
flexmock(Child).should_call("class_method").once()
flexmock(Child).should_call("static_method").once()
Child.class_method()
Child.static_method()
self._tear_down()
assert base_attrs == list(vars(Base).keys())
assert instance_attrs == list(vars(instance).keys())
assert child_attrs == list(vars(Child).keys())
def test_flexmock_should_cleanup_after_exception(self):
class User:
def method2(self):
pass
class Group:
def method1(self):
pass
flexmock(Group)
flexmock(User)
Group.should_receive("method1").once()
User.should_receive("method2").once()
with assert_raises(
MethodCallError, "method1() expected to be called exactly 1 time, called 0 times"
):
self._tear_down()
for method in UPDATED_ATTRS:
assert method not in dir(Group)
for method in UPDATED_ATTRS:
assert method not in dir(User)
def test_flexmock_should_call_respects_matched_expectations(self):
class Group:
def method1(self, arg1, arg2="b"):
return f"{arg1}:{arg2}"
def method2(self, arg):
return arg
group = Group()
flexmock(group).should_call("method1").twice()
assert_equal("a:c", group.method1("a", arg2="c"))
assert_equal("a:b", group.method1("a"))
group.should_call("method2").once().with_args("c")
assert_equal("c", group.method2("c"))
self._tear_down()
def test_flexmock_should_call_respects_unmatched_expectations(self):
class Group:
def method1(self, arg1, arg2="b"):
return f"{arg1}:{arg2}"
def method2(self, a):
pass
group = Group()
flexmock(group).should_call("method1").at_least().once()
with assert_raises(
MethodCallError, "method1() expected to be called at least 1 time, called 0 times"
):
self._tear_down()
flexmock(group)
group.should_call("method2").with_args("a").once()
group.should_receive("method2").with_args("not a")
group.method2("not a")
with assert_raises(
MethodCallError, 'method2(a="a") expected to be called exactly 1 time, called 0 times'
):
self._tear_down()
def test_flexmock_doesnt_error_on_properly_ordered_expectations(self):
class Foo:
def foo(self):
pass
def method1(self, a):
pass
def bar(self):
pass
def baz(self):
pass
foo = Foo()
flexmock(foo).should_receive("foo")
flexmock(foo).should_receive("method1").with_args("a").ordered()
flexmock(foo).should_receive("bar")
flexmock(foo).should_receive("method1").with_args("b").ordered()
flexmock(foo).should_receive("baz")
foo.bar()
foo.method1("a")
foo.method1("b")
foo.baz()
foo.foo()
def test_flexmock_errors_on_improperly_ordered_expectations(self):
class Foo:
def method1(self, a):
pass
foo = Foo()
flexmock(foo)
foo.should_receive("method1").with_args("a").ordered()
foo.should_receive("method1").with_args("b").ordered()
with assert_raises(CallOrderError, 'method1("b") called before method1(a="a")'):
foo.method1("b")
def test_flexmock_should_accept_multiple_return_values(self):
class Foo:
def method1(self):
pass
foo = Foo()
flexmock(foo).should_receive("method1").and_return(1, 5).and_return(2)
assert_equal((1, 5), foo.method1())
assert_equal(2, foo.method1())
assert_equal((1, 5), foo.method1())
assert_equal(2, foo.method1())
def test_flexmock_should_accept_multiple_return_values_with_shortcut(self):
class Foo:
def method1(self):
pass
foo = Foo()
flexmock(foo).should_receive("method1").and_return(1, 2).one_by_one()
assert_equal(1, foo.method1())
assert_equal(2, foo.method1())
assert_equal(1, foo.method1())
assert_equal(2, foo.method1())
def test_flexmock_should_accept_multiple_return_values_with_one_by_one(self):
mocked = flexmock()
flexmock(mocked).should_receive("method1").and_return(2).and_return(3).one_by_one()
assert_equal(2, mocked.method1())
assert_equal(3, mocked.method1())
assert_equal(2, mocked.method1())
assert_equal(3, mocked.method1())
def test_one_by_one_called_before_and_return_multiple_values(self):
mocked = flexmock()
mocked.should_receive("method1").one_by_one().and_return(3, 4)
assert_equal(3, mocked.method1())
assert_equal(4, mocked.method1())
assert_equal(3, mocked.method1())
assert_equal(4, mocked.method1())
def test_one_by_one_called_before_and_return_one_value(self):
mocked = flexmock()
mocked.should_receive("method1").one_by_one().and_return(4).and_return(5)
assert_equal(4, mocked.method1())
assert_equal(5, mocked.method1())
assert_equal(4, mocked.method1())
assert_equal(5, mocked.method1())
def test_flexmock_should_mix_multiple_return_values_with_exceptions(self):
class Foo:
def method1(self):
pass
foo = Foo()
flexmock(foo).should_receive("method1").and_return(1).and_raise(Exception)
assert_equal(1, foo.method1())
with assert_raises(Exception, ""):
foo.method1()
assert_equal(1, foo.method1())
with assert_raises(Exception, ""):
foo.method1()
def test_flexmock_should_match_types_on_multiple_arguments(self):
class Foo:
def method1(self, a, b):
pass
| |
<reponame>happyharrycn/vatic_fpv<filename>video_tools/pympi/Elan.py
# -*- coding: utf-8 -*-
from xml.etree import cElementTree as etree
import os
import re
import sys
import time
VERSION = '1.69'
class Eaf:
"""Read and write Elan's Eaf files.
.. note:: All times are in milliseconds and can't have decimals.
:var dict adocument: Annotation document TAG entries.
:var list licenses: Licences included in the file of the form:
``(name, url)``.
:var dict header: XML header.
:var list media_descriptors: Linked files, where every file is of the
form: ``{attrib}``.
:var list properties: Properties, where every property is of the form:
``(key, value)``.
:var list linked_file_descriptors: Secondary linked files, where every
linked file is of the form: ``{attrib}``.
:var dict timeslots: Timeslot data of the form: ``{id -> time(ms)}``.
:var dict tiers: Tiers, where every tier is of the form:
``{tier_name -> (aligned_annotations, reference_annotations,
attributes, ordinal)}``,
aligned_annotations of the form: ``[{id -> (begin_ts, end_ts, value,
svg_ref)}]``,
reference annotations of the form: ``[{id -> (reference, value,
previous, svg_ref)}]``.
:var list linguistic_types: Linguistic types, where every type is of the
form: ``{id -> attrib}``.
:var dict locales: Locales, of the form:
``{lancode -> (countrycode, variant)}``.
:var dict languages: Languages, of the form:
``{langid -> (langdef, langlabel)}``.
:var dict constraints: Constraints, every constraint is of the form:
``{stereotype -> description}``.
:var dict controlled_vocabularies: Controlled vocabulary, where every
controlled vocabulary is of the form: ``{id -> (descriptions, entries,
ext_ref)}``,
descriptions of the form: ``[(value, lang_ref, description)]``,
entries of the form: ``{id -> (values, ext_ref)}``,
values of the form: ``[(lang_ref, description, text)]``.
:var list external_refs: External references of the form:
``{id -> (type, value)}``.
:var list lexicon_refs: Lexicon references, where every reference is of
the form: ``{id -> {attribs}}``.
:var dict annotations: Dictionary of annotations of the form:
``{id -> tier}``, this is only used internally.
"""
ETYPES = {'iso12620', 'ecv', 'cve_id', 'lexen_id', 'resource_url'}
CONSTRAINTS = {
'Time_Subdivision': "Time subdivision of parent annotation's time inte"
'rval, no time gaps allowed within this interval',
'Symbolic_Subdivision': 'Symbolic subdivision of a parent annotation. '
'Annotations refering to the same parent are ordered',
'Symbolic_Association': '1-1 association with a parent annotation',
'Included_In': 'Time alignable annotations within the parent annotatio'
"n's time interval, gaps are allowed"}
MIMES = {'wav': 'audio/x-wav', 'mpg': 'video/mpeg', 'mpeg': 'video/mpg',
'xml': 'text/xml'}
def __init__(self, file_path=None, author='pympi'):
"""Construct either a new Eaf file or read on from a file/stream.
:param str file_path: Path to read from, - for stdin. If ``None`` an
empty Eaf file will be created.
:param str author: Author of the file.
"""
ctz = -time.altzone if time.localtime(time.time()).tm_isdst and\
time.daylight else -time.timezone
self.maxts = 1
self.maxaid = 1
self.adocument = {
'AUTHOR': author,
'DATE': time.strftime('%Y-%m-%dT%H:%M:%S{:0=+3d}:{:0=2d}').format(
ctz // 3600, ctz % 3600),
'VERSION': '2.8',
'FORMAT': '2.8',
'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance',
'xsi:noNamespaceSchemaLocation':
'http://www.mpi.nl/tools/elan/EAFv2.8.xsd'}
self.annotations = {}
self.constraints = {}
self.controlled_vocabularies = {}
self.external_refs = {}
self.header = {}
self.languages = {}
self.lexicon_refs = {}
self.linguistic_types = {}
self.locales = {}
self.tiers = {}
self.timeslots = {}
self.licenses = []
self.linked_file_descriptors = []
self.media_descriptors = []
self.properties = []
if file_path is None:
self.add_linguistic_type('default-lt')
self.constraints = self.CONSTRAINTS.copy()
self.properties.append(('lastUsedAnnotation', 0))
self.add_tier('default')
else:
parse_eaf(file_path, self)
def add_annotation(self, id_tier, start, end, value='', svg_ref=None):
"""Add an annotation.
:param str id_tier: Name of the tier.
:param int start: Start time of the annotation.
:param int end: End time of the annotation.
:param str value: Value of the annotation.
:param str svg_ref: Svg reference.
:raises KeyError: If the tier is non existent.
:raises ValueError: If one of the values is negative or start is bigger
then end or if the tiers already contains ref
annotations.
"""
if self.tiers[id_tier][1]:
raise ValueError('Tier already contains ref annotations...')
if start == end:
raise ValueError('Annotation length is zero...')
if start > end:
raise ValueError('Annotation length is negative...')
if start < 0:
raise ValueError('Start is negative...')
start_ts = self.generate_ts_id(start)
end_ts = self.generate_ts_id(end)
aid = self.generate_annotation_id()
self.annotations[aid] = id_tier
self.tiers[id_tier][0][aid] = (start_ts, end_ts, value, svg_ref)
def add_controlled_vocabulary(self, cv_id, ext_ref=None):
"""Add a controlled vocabulary. This will initialize the controlled
vocabulary without entries.
:param str cv_id: Name of the controlled vocabulary.
:param str ext_ref: External reference.
"""
self.controlled_vocabularies[cv_id] = ([], {}, ext_ref)
def add_cv_entry(self, cv_id, cve_id, values, ext_ref=None):
"""Add an entry to a controlled vocabulary.
:param str cv_id: Name of the controlled vocabulary to add an entry.
:param str cve_id: Name of the entry.
:param list values: List of values of the form:
``(value, lang_ref, description)`` where description can be
``None``.
:param str ext_ref: External reference.
:throws KeyError: If there is no controlled vocabulary with that id.
:throws ValueError: If a language in one of the entries doesn't exist.
"""
for value, lang_ref, description in values:
if lang_ref not in self.languages:
raise ValueError('Language not present: {}'.format(lang_ref))
self.controlled_vocabularies[cv_id][1][cve_id] = (values, ext_ref)
def add_cv_description(self, cv_id, lang_ref, description=None):
"""Add a description to a controlled vocabulary.
:param str cv_id: Name of the controlled vocabulary to add the
description.
:param str lang_ref: Language reference.
:param str description: Description, this can be none.
:throws KeyError: If there is no controlled vocabulary with that id.
:throws ValueError: If the language provided doesn't exist.
"""
if lang_ref not in self.languages:
raise ValueError('Language not present: {}'.format(lang_ref))
self.controlled_vocabularies[cv_id][0].append((lang_ref, description))
def add_external_ref(self, eid, etype, value):
"""Add an external reference.
:param str eid: Name of the external reference.
:param str etype: Type of the external reference, has to be in
``['iso12620', 'ecv', 'cve_id', 'lexen_id', 'resource_url']``.
:param str value: Value of the external reference.
:throws KeyError: if etype is not in the list of possible types.
"""
if etype not in self.ETYPES:
raise KeyError('etype not in {}'.format(self.ETYPES))
self.external_refs[eid] = (etype, value)
def add_language(self, lang_id, lang_def=None, lang_label=None):
"""Add a language.
:param str lang_id: ID of the language.
:param str lang_def: Definition of the language(preferably ISO-639-3).
:param str lang_label: Label of the language.
"""
self.languages[lang_id] = (lang_def, lang_label)
def add_lexicon_ref(self, lrid, name, lrtype, url, lexicon_id,
lexicon_name, datcat_id=None, datcat_name=None):
"""Add lexicon reference.
:param str lrid: Lexicon reference internal ID.
:param str name: Lexicon reference display name.
:param str lrtype: Lexicon reference service type.
:param str url: Lexicon reference service location
:param str lexicon_id: Lexicon reference service id.
:param str lexicon_name: Lexicon reference service name.
:param str datacat_id: Lexicon reference identifier of data category.
:param str datacat_name: Lexicon reference name of data category.
"""
self.lexicon_refs[lrid] = {
'LEX_REF_ID': lrid,
'NAME': name,
'TYPE': lrtype,
'URL': url,
'LEXICON_ID': lexicon_id,
'LEXICON_NAME': lexicon_name,
'DATCAT_ID': datcat_id,
'DATCAT_NAME': datcat_name
}
def add_license(self, name, url):
"""Add a license
:param str name: Name of the license.
:param str url: URL of the license.
"""
self.licenses.append((name, url))
def add_linguistic_type(self, lingtype, constraints=None,
timealignable=True, graphicreferences=False,
extref=None, param_dict=None):
"""Add a linguistic type.
:param str lingtype: Name of the linguistic type.
:param str constraints: Constraint name.
:param bool timealignable: Flag for time alignable.
:param bool graphicreferences: Flag for graphic references.
:param str extref: External reference.
:param dict param_dict: TAG attributes, when this is not ``None`` it
will ignore all other options. Please only use
dictionaries coming from the
:func:`get_parameters_for_linguistic_type`
:raises KeyError: If a constraint is not defined
"""
if param_dict:
self.linguistic_types[lingtype] = param_dict
else:
if constraints:
self.constraints[constraints]
self.linguistic_types[lingtype] = {
'LINGUISTIC_TYPE_ID': lingtype,
'TIME_ALIGNABLE': str(timealignable).lower(),
'GRAPHIC_REFERENCES': str(graphicreferences).lower(),
'CONSTRAINTS': constraints}
if extref is not None:
self.linguistic_types[lingtype]['EXT_REF'] = extref
def add_linked_file(self, file_path, relpath=None, mimetype=None,
time_origin=None, ex_from=None):
"""Add a linked file.
:param str file_path: Path of the file.
:param str relpath: Relative path of the file.
:param str mimetype: Mimetype of the file, if ``None`` it tries to
guess it according to the file extension which currently only works
for wav, mpg, mpeg and xml.
:param int time_origin: Time origin for the media file.
:param str ex_from: Extracted from field.
:raises KeyError: If mimetype had to be guessed and a non standard
extension or an unknown mimetype.
"""
if mimetype is None:
mimetype = self.MIMES[file_path.split('.')[-1]]
self.media_descriptors.append({
'MEDIA_URL': file_path, 'RELATIVE_MEDIA_URL': relpath,
'MIME_TYPE': mimetype, 'TIME_ORIGIN': time_origin,
'EXTRACTED_FROM': ex_from})
def add_locale(self, language_code, country_code=None, variant=None):
"""Add a locale.
:param str language_code: The language code of the locale.
:param str country_code: The | |
from __future__ import absolute_import, division, print_function
"""
Author : Lyubimov, A.Y.
Created : 10/10/2014
Last Changed: 10/31/2019
Description : IOTA I/O module. Reads PHIL input, also creates reasonable IOTA
and PHIL defaults if selected. PHILFixer can be used to ensure
backwards compatibility with older param sets.
"""
import os
from multiprocessing import cpu_count
import iotbx.phil as ip
from iota.components.iota_utils import convert_phil_to_text
from iota.components.iota_processing import cctbx_str
master_phil_str = """
description = None
.type = str
.help = Run description (optional).
.alias = Description
input = None
.type = path
.multiple = True
.help = Path to folder with raw data in pickle format, list of files or single file
.help = Can be a tree with folders
.style = input_list
output = $PWD
.type = path
.multiple = False
.help = Base output directory
.alias = Project Folder
.style = path:folder permissions:read
data_selection
{
image_triage
.help = Discard images that lack minimum number of Bragg spots
.style = grid:auto has_scope_switch
{
flag_on = True
.type = bool
.help = Perform a round of spotfinding to see if image has diffraction
.alias = Perform image triage
.style = scope_switch
minimum_Bragg_peaks = 10
.type = int
.help = Minimum number of Bragg peaks to establish diffraction
.alias = Min. number of Bragg Peaks
strong_sigma = 5.0
.type = float
.help = Sigma level to define a strong reflection
.alias = Strong reflection sigma
}
image_range
.help = Use a range of images, e.g. 5 - 1000
.expert_level = 0
.style = grid:auto has_scope_switch
{
flag_on = False
.type = bool
.style = scope_switch
range = None
.type = str
.alias = Image range
.help = Can input multiple ranges, e.g. "5, 60-100, 200-1500"
}
random_sample
.help = Use a randomized subset of images (or -r <number> option)
.expert_level = 0
.style = grid:auto has_scope_switch
{
flag_on = False
.type = bool
.style = scope_switch
number = 0
.type = int
.alias = Random subset
.help = Number of images in random sample
}
}
image_import
.help = Parameters for image importing (& triage)
.alias = Import Options
{
mask = None
.type = path
.help = Mask for ignored pixels
.style = path:file permissions:read
.alias = Beamstop Mask
beamstop = 0
.type = float
.help = Beamstop shadow threshold, zero to skip
.alias = Shadow threshold
.expert_level = 1
distance = 0
.type = float
.help = Alternate crystal-to-detector distance (set to zero to leave the same)
.alias = Detector distance
.expert_level = 1
beam_center
.help = Alternate beam center coordinates (in PIXELS)
.help = Set to zero to leave the same
.alias = Beam Center (pixels)
.style = grid:2x1
.expert_level = 1
{
x = 0
.type = int
.alias = X
.expert_level = 1
y = 0
.type = int
.alias = Y
.expert_level = 1
}
estimate_gain = False
.type = bool
.help = Estimates detector gain (helps improve spotfinding)
.alias = Estimate detector gain
.expert_level = 1
}
analysis
.help = "Analysis / visualization options."
.alias = Analysis Options
{
clustering
.help = Hierarchical clustering of integration results
.alias = Clustering
.style = grid:auto has_scope_switch
{
flag_on = False
.type = bool
.help = Set to True to turn on hierarchical clustering of unit cells
.style = scope_switch
threshold = 5000
.type = int
.help = threshold value for unit cell clustering
.alias = Threshold
n_images = None
.type = int
.help = How many images to cluster? (Useful for huge datasets.)
.alias = No. images
limit = 5
.type = int
.help = "Major clusters" are defined as clusters with over n members
.alias = Max. clusters
write_files = True
.type = bool
.help = Set to True to write lists of images belonging to major clusters
.alias = Write files
}
viz = None *no_visualization integration cv_vectors
.type = choice
.help = Set to "integration" to visualize spotfinding and integration results.
.help = Set to "cv_vectors" to visualize accuracy of CV vectors
.alias = Visualization
charts = False
.type = bool
.help = If True, outputs PDF files w/ charts of mosaicity, rmsd, etc.
.alias = Output integration charts
summary_graphs = False
.type = bool
.help = If True: spot-finding heatmap, res. histogram and beamXY graph
.alias = Output summary charts
}
advanced
.help = "Advanced, debugging and experimental options."
.alias = Advanced
{
processing_backend = *cctbx_xfel ha14
.type = choice
.help = Choose image processing backend software
.expert_level = 1
.alias = Processing Backend
.optional = False
prime_prefix = prime
.type = str
.help = Prefix for the PRIME script filename
.expert_level = 0
.alias = PRIME Prefix
temporary_output_folder = None
.type = path
.help = If None, temp output goes to <output>/integration/###/tmp/
.alias = Temp Folder
.expert_level = 1
reference_geometry = None
.type = path
.help = Detector geometry from ensemble refinement
.alias = Reference Geometry
.expert_level = 1
}
mp
.help = Multiprocessing options
.alias = Multiprocessing
{
n_processors = 0
.type = int
.help = No. of processing units
.alias = No. Processors
method = *multiprocessing mpi lsf torq custom
.type = choice
.help = Multiprocessing method
.alias = Method
.optional = False
queue = None
.type = str
.help = Multiprocessing queue
.alias = Queue
submit_command = None
.type = str
.help = Command to submit IOTA job to a queue
.alias = Submit Command
kill_command = None
.type = str
.help = Command to kill the current IOTA job submitted to a queue
.alias = Kill Command
}
"""
master_phil = ip.parse(master_phil_str + cctbx_str, process_includes=True)
def get_input_phil(paramfile=None, phil_args=None, ha14=False, gui=False):
"""Generate PHIL from file, master, and/or command arguments.
:param args: command line arguments
:param phil_args: PHIL settings as command line arguments
:param paramfile: file with input settings in PHIL format
:return:
"""
from libtbx.phil.command_line import argument_interpreter
from libtbx.utils import Sorry
# Depending on mode, either read input from file, or generate defaults
if paramfile:
with open(paramfile, "r") as inpf:
user_phil = ip.parse(inpf.read())
phil_fixer = PHILFixer()
working_phil = phil_fixer.run(old_phil=user_phil, write_file=True)
else:
if ha14:
from iota.components.iota_cctbx_ha14 import ha14_str
working_phil = ip.parse(master_phil_str + ha14_str, process_includes=True)
else:
working_phil = master_phil
if gui:
from libtbx.phil import find_scope
if not find_scope(working_phil, "gui"):
from iota.components.gui.base import gui_phil
working_phil.adopt_scope(gui_phil)
# Parse in-line params into phil
bad_args = []
if phil_args:
argument_interpreter = argument_interpreter(master_phil=working_phil)
for arg in phil_args:
try:
command_line_params = argument_interpreter.process(arg=arg)
working_phil = working_phil.fetch(sources=[command_line_params])
except Sorry:
bad_args.append(arg)
# Self-fetch to resolve variables
working_phil = working_phil.fetch(source=working_phil)
return working_phil, bad_args
def process_ui_input(args, phil_args, paramfile, mode="auto"):
"""Read and parse parameter file and/or command-line args for IOTA GUI.
:param args: command-line arguments upon launch
:param phil_args: command-line arguments pertaining to IOTA parameters
:param paramfile: text file with IOTA parameters
:return:
"""
working_phil, bad_args = get_input_phil(
phil_args=phil_args, paramfile=paramfile, gui=True
)
params = working_phil.extract()
# Check for -r option and set random subset parameter
if args.random > 0:
params.data_selection.random_sample.flag_on = True
params.data_selection.random_sample.number = args.random[0]
if args.range:
params.data_selection.image_range.flag_on = True
params.data_selection.image_range.range = args.range
if args.watch > 0:
params.gui.monitor_mode = True
params.gui.monitor_mode_timeout = True
params.gui.monitor_mode_timeout_length = args.watch[0]
if args.tmp is not None:
params.advanced.temporary_output_folder = args.tmp[0]
# Check for -n option and set number of processors override
# (for parallel map only, for now)
max_proc = cpu_count() - 2
if args.nproc > 0:
if args.nproc >= max_proc:
params.mp.n_processors = max_proc
else:
params.mp.n_processors = args.nproc[0]
elif params.mp.method == "multiprocessing":
if params.mp.n_processors >= max_proc or params.mp.n_processors == 0:
params.mp.n_processors = int(max_proc / 2)
return working_phil.format(python_object=params), bad_args
def process_input(args, phil_args, paramfile=None, gui=False, write_files=False):
"""Read and parse parameter file and/or command-line args; if none found,
create a default parameter object.
:param args: command-line arguments upon launch
:param phil_args: command-line arguments pertaining to IOTA parameters
:param input_source: text file with IOTA parameters (if 'file' mode) or
source of images (if 'auto' mode)
:param mode: Mode of XtermIOTA run. See the InitAll base class
:param gui: Set to True to initialize GUI parameters
:return: PHIL-formatted parameters
"""
working_phil, bad_args = get_input_phil(
phil_args=phil_args, ha14=args.ha14, paramfile=paramfile, gui=gui
)
# Perform command line check and modify params accordingly
params = working_phil.extract()
if | |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# =================================================================
# =================================================================
"""
IBMPowerVMBaseVIFDriver
"""
import time
import traceback
from nova import context as ctx
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova.db.sqlalchemy import api as db_session
from oslo.config import cfg
#from powervc_k2.k2operator import K2Error
from paxes_nova.db import api as dom_api
from paxes_nova.db.network import models as dom
from paxes_nova.virt.ibmpowervm.vif.common import exception as excp
from paxes_nova.virt.ibmpowervm.vif.common import ras
from paxes_nova.virt.ibmpowervm.vif.common import utils
from paxes_nova.network.ibmpowervm import adapter_mapping as mapping_task
from pprint import pformat
from paxes_nova import _
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class IBMPowerVMBaseVIFDriver(object):
"""
This is the IBMPowerVM base VIF driver class.
The intended way to set IBM nova VIF driver up is to instantiate the real
VIF driver in the powervm driver's __init__ method based on the specific
VIF driver configuration defined in the nova.conf. Then the vif plug/unplug
method will be ready for use.
"""
def __init__(self):
"""
Base initialization of driver variables
"""
self.current_topo = None
self.current_topo_time = None
@lockutils.synchronized('powervm_vifs', 'nova-vif-powervm-')
def plug(self, instance, vifs, dom_factory=dom.DOM_Factory()):
"""
IBMPowerVMVlanVIFDriver's plug method. Will plug each VIF into the
VIOS SEA.
:param instance: network instance
:param vifs: The list of VIFs that will be plugged. Each should have
a vlan attribute that tells us what is being plugged.
:param dom_factory: Used to create DOM objects.
"""
LOG.debug('Enter plug with vifs: %s' % vifs)
# Walk through each VIF entry in the list for the deploy
succesful_vif_plugs = []
host = None
try:
host = self._get_current_topo()
# Get all the seas from the host_dom
all_seas = host.find_all_primary_seas()
except Exception as e:
# This will happen in a scenario where Paxes is restarted, at
# the same time as a POWER CEC is down. Should ignore.
pass
if host is None:
LOG.debug("Plug is passing due to Host being none. May be due to"
" CEC being powered off upon reboot.")
return
try:
for vif_entry in vifs:
# Call _parse_vif in a try block so we catch an RMC busy
# exception and can retry topo collection
try:
vswitch, data_vlan_id, sea, needs_provision =\
self._parse_vif(host, vif_entry, True)
except excp.IBMPowerVMRMCBusy:
# Give it one retry. If THIS fails, we're done.
LOG.warn(_('RMC was busy, retrying topo'))
host = self._get_current_topo()
all_seas = host.find_all_primary_seas()
vswitch, data_vlan_id, sea, needs_provision =\
self._parse_vif(host, vif_entry, True)
# First, see if we have some fixing up of SEAs to do. If
# we're not going to deploy the vlan because it's already
# there, we want to be sure we still fix up the config if
# the user altered it outside Paxes.
self._check_and_correct_vlan_config(data_vlan_id, dom_factory,
host, sea)
# If _parse_vif determined this vlan doesn't need provisioning,
# or a vswitch wasn't returned, skip this vif
if not needs_provision or vswitch is None:
LOG.debug('Skipping provision (needs_provision: %s, '
'vswitch: %s) for vif_entry: %s',
needs_provision, vswitch, vif_entry)
continue
start_tb = time.time()
# We only need to pass the first SEA in the chain. IVM does
# not support redundant VIOS (therefore, no chain) and HMC will
# automatically handle the redundant SEA.
self._plug_data_vlan_into_sea(host=host,
sea_dev=sea,
vlan_id=data_vlan_id,
dom_factory=dom_factory)
ras.trace(LOG, __name__, ras.TRACE_INFO,
_('Successfully plugged data vlan %(vid)d into '
'SEA') % {'vid': data_vlan_id}, start_tb)
succesful_vif_plugs.append(vif_entry)
except Exception as e:
if 'host' in locals() and host is not None:
LOG.error(_('host:'))
LOG.error(pformat(host.to_dictionary()))
if not 'data_vlan_id' in locals():
data_vlan_id = -1
# If this was a K2 error, we want to log more info about it
# if isinstance(e, K2Error) and e.k2response is not None: Bug0002104,NameError: global name 'K2Error' is not defined
# LOG.error(_('Request headers:'))
# LOG.error(e.k2response.reqheaders)
# LOG.error(_('Request body:'))
# LOG.error(e.k2response.reqbody)
# LOG.error(_('Response headers:'))
# LOG.error(e.k2response.headers)
# LOG.error(_('Response body:'))
# LOG.error(e.k2response.body)
ras.trace(LOG, __name__, ras.TRACE_ERROR,
ras.msg('error', 'SEA_FAILTOPLUG') %
{'vid': data_vlan_id})
ras.trace(LOG, __name__, ras.TRACE_ERROR, traceback.format_exc())
# Do a clean up by calling unplug
for vif_entry in succesful_vif_plugs:
vswitch, failed_vlan_id, sea, needs_provision = \
self._parse_vif(host, vif_entry)
if needs_provision:
self._unplug_data_vlan_from_sea(host=host,
sea=sea,
vlan_id=failed_vlan_id,
dom_factory=dom_factory)
# Reraise the exception that got us here
raise
@lockutils.synchronized('powervm_vifs', 'nova-vif-powervm-')
def unplug(self, instance, vifs, dom_factory=dom.DOM_Factory()):
"""
IBMPowerVMVlanVIFDriver unplug method. Will unplug each VIF from the
VIOS SEA
:param instance: network instance
:param vifs: The list of VIFs that will be unplugged. Each should have
a bridge attribute that contains the vSwitch/VLAN
:param dom_factory: Used to create DOM objects.
"""
LOG.debug('Enter unplug with vifs: %s' % vifs)
try:
# Walk through each VIF entry in the list for the deploy
host = self._get_current_topo()
except Exception as exc:
# This can happen if K2 throws an error on the VIOS read, the
# Client Network Adapter read or some other K2 error.
# As part of the 'do not block unplug' strategy, we should catch
# this error, log and return.
LOG.warn(_('Ignoring Unplug Error: %s') % exc)
return
try:
for vif_entry in vifs:
vswitch, data_vlan_id, sea, needs_provision =\
self._parse_vif(host, vif_entry)
if not needs_provision or vswitch is None:
LOG.debug('Skipping provision (needs_provision: %s, '
'vswitch: %s) for vif_entry: %s',
needs_provision, vswitch, vif_entry)
continue
force = vif_entry['meta'].get('force', False)
start_tb = time.time()
# We only need to pass the first SEA in the chain. IVM does
# not support redundant VIOS (therefore, no chain) and HMC will
# automatically handle the redundant SEA.
self._unplug_data_vlan_from_sea(host=host,
sea=sea,
vlan_id=data_vlan_id,
dom_factory=dom_factory,
force=force)
ras.trace(LOG, __name__, ras.TRACE_INFO,
_('Successfully unplugged data vlan %(vid)d from '
'SEA') % {'vid': data_vlan_id}, start_tb)
except Exception as e:
if 'host' in locals() and host is not None:
LOG.error(_('host:'))
LOG.error(pformat(host.to_dictionary()))
# If this was a K2 error, we want to log more info about it
# if isinstance(e, K2Error) and e.k2response is not None: Bug0002104,NameError: global name 'K2Error' is not defined
# LOG.error(_('Request headers:'))
# LOG.error(e.k2response.reqheaders)
# LOG.error(_('Request body:'))
# LOG.error(e.k2response.reqbody)
# LOG.error(_('Response headers:'))
# LOG.error(e.k2response.headers)
# LOG.error(_('Response body:'))
# LOG.error(e.k2response.body)
#
# # Reraise the exception that got us here
# raise
def _get_current_topo(self):
"""
Will return the topology for the system. If it has not been
invalidated, or has been less than a period of time allowed, will
return a cached value. Otherwise will grab from the system.
"""
# If the time was set, and the time is less than X seconds, we can
# just return the current topology.
if self.current_topo_time and\
time.time() - self.current_topo_time < 10:
LOG.debug(self.current_topo.to_dictionary())
ras.trace(LOG, __name__, ras.TRACE_INFO,
_('Using cached system topology'))
ras.trace(LOG, __name__, ras.TRACE_DEBUG, 'Cached host:\n%s' %
self.current_topo.to_dictionary())
return self.current_topo
# Either the topo wasn't set or its too old. Lets go get it again.
# Always get the time AFTER the topo, since the topo could take some
# multiple seconds. Also note we are thread safe by the invokers of
# this method.
self.current_topo = self._topo.get_current_config()
self.current_topo_time = time.time()
return self.current_topo
def _invalidate_topo_cache(self):
"""
Will invalidate the topology cache from _get_current_topo()
"""
self.current_topo = None
self.current_topo_time = None
def _get_ctx(self):
"""
Returns the context that should be used for database calls.
"""
return ctx.get_admin_context()
def _get_host(self):
"""
Returns the host that this process is running on.
"""
return CONF.host
def _get_host_display_name(self):
"""
Returns the display name of the host that this process is running on.
"""
return CONF.host_display_name
def _parse_vif(self, host, vif, here_for_plug=False):
"""
Parses out the VIF information to return the vSwitch and VLAN
:param host: The Host DOM object that represents the endpoint (IVM or
HMC)
:param vif: The VIF that will contain the VLAN and vSwitch via the
bridge attribute.
:param here_for_plug: Boolean to indicate whether the caller is plug or
unplug
:return vSwitch: the vSwitch attribute (string)
:return vlan: the VLAN for the element (int)
:return sea: The Primary SEA to provision against
"""
LOG.debug('Enter _parse_vif with vif: %s' % vif)
if not vif:
LOG.warn(_('No vif passed in'))
return None, None, None, False
try:
context = self._get_ctx()
host_name = self._get_host()
host_display_name = self._get_host_display_name()
meta = vif['meta']
netmeta = vif['network']['meta']
if meta and ('vlan' in meta):
vlan_string = meta['vlan']
elif netmeta and ('vlan' in netmeta):
vlan_string = str(netmeta['vlan'])
else:
# Occasionally happens, shouldn't fail but just continue
LOG.warn(_('Failed to find vlan in vif: %s') % pformat(vif))
return None, None, None, False
# First try to get the network-mapping
net_id = vif.get('network').get('id')
session = db_session.get_session()
with session.begin():
net_assn = dom_api.network_association_find(context, host_name,
net_id, session)
# Check to see if the | |
'numpy.ndarray'>
"""
data = ndarray.ravel(self._data)
if self._mask is not nomask:
data = data.compress(np.logical_not(ndarray.ravel(self._mask)))
return data
def compress(self, condition, axis=None, out=None):
"""
Return `a` where condition is ``True``.
If condition is a `MaskedArray`, missing values are considered as ``False``.
Parameters
----------
condition : var
Boolean 1-d array selecting which entries to return. If len(condition)
is less than the size of a along the axis, then output is truncated
to length of condition array.
axis : {None, int}, optional
Axis along which the operation must be performed.
out : {None, ndarray}, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
Returns
-------
result : MaskedArray
A :class:`MaskedArray` object.
Warnings
--------
Please note the difference with :meth:`compressed` !
The output of :meth:`compress` has a mask, the output of :meth:`compressed` does not.
"""
# Get the basic components
(_data, _mask) = (self._data, self._mask)
# Force the condition to a regular ndarray (forget the missing values...)
condition = np.array(condition, copy=False, subok=False)
#
_new = _data.compress(condition, axis=axis, out=out).view(type(self))
_new._update_from(self)
if _mask is not nomask:
_new._mask = _mask.compress(condition, axis=axis)
return _new
#............................................
def __str__(self):
"""String representation.
"""
if masked_print_option.enabled():
f = masked_print_option
if self is masked:
return str(f)
m = self._mask
if m is nomask:
res = self._data
else:
if m.shape == ():
if m.dtype.names:
m = m.view((bool, len(m.dtype)))
if m.any():
r = np.array(self._data.tolist(), dtype=object)
np.putmask(r, m, f)
return str(tuple(r))
else:
return str(self._data)
elif m:
return str(f)
else:
return str(self._data)
# convert to object array to make filled work
names = self.dtype.names
if names is None:
res = self._data.astype("|O8")
res[m] = f
else:
rdtype = [list(_) for _ in self.dtype.descr]
for r in rdtype:
r[1] = '|O8'
rdtype = [tuple(_) for _ in rdtype]
res = self._data.astype(rdtype)
for field in names:
np.putmask(res[field], m[field], f)
else:
res = self.filled(self.fill_value)
return str(res)
def __repr__(self):
"""Literal string representation.
"""
with_mask = """\
masked_%(name)s(data =
%(data)s,
mask =
%(mask)s,
fill_value=%(fill)s)
"""
with_mask1 = """\
masked_%(name)s(data = %(data)s,
mask = %(mask)s,
fill_value=%(fill)s)
"""
with_mask_flx = """\
masked_%(name)s(data =
%(data)s,
mask =
%(mask)s,
fill_value=%(fill)s,
dtype=%(dtype)s)
"""
with_mask1_flx = """\
masked_%(name)s(data = %(data)s,
mask = %(mask)s,
fill_value=%(fill)s
dtype=%(dtype)s)
"""
n = len(self.shape)
name = repr(self._data).split('(')[0]
parameters = dict(name=name, data=str(self), mask=str(self._mask),
fill=str(self.fill_value), dtype=str(self.dtype))
if self.dtype.names:
if n<= 1:
return with_mask1_flx % parameters
return with_mask_flx % parameters
elif n <= 1:
return with_mask1 % parameters
return with_mask % parameters
#............................................
def __add__(self, other):
"Add other to self, and return a new masked array."
return add(self, other)
#
def __radd__(self, other):
"Add other to self, and return a new masked array."
return add(self, other)
#
def __sub__(self, other):
"Subtract other to self, and return a new masked array."
return subtract(self, other)
#
def __rsub__(self, other):
"Subtract other to self, and return a new masked array."
return subtract(other, self)
#
def __mul__(self, other):
"Multiply other by self, and return a new masked array."
return multiply(self, other)
#
def __rmul__(self, other):
"Multiply other by self, and return a new masked array."
return multiply(other, self)
#
def __div__(self, other):
"Divide other into self, and return a new masked array."
return divide(self, other)
#
def __truediv__(self, other):
"Divide other into self, and return a new masked array."
return true_divide(self, other)
#
def __floordiv__(self, other):
"Divide other into self, and return a new masked array."
return floor_divide(self, other)
#
def __pow__(self, other):
"Raise self to the power other, masking the potential NaNs/Infs"
return power(self, other)
#............................................
def __iadd__(self, other):
"Add other to self in-place."
ndarray.__iadd__(self._data, getdata(other))
m = getmask(other)
if self._mask is nomask:
self._mask = m
elif m is not nomask:
self._mask += m
return self
#....
def __isub__(self, other):
"Subtract other from self in-place."
ndarray.__isub__(self._data, getdata(other))
m = getmask(other)
if self._mask is nomask:
self._mask = m
elif m is not nomask:
self._mask += m
return self
#....
def __imul__(self, other):
"Multiply self by other in-place."
ndarray.__imul__(self._data, getdata(other))
m = getmask(other)
if self._mask is nomask:
self._mask = m
elif m is not nomask:
self._mask += m
return self
#....
def __idiv__(self, other):
"Divide self by other in-place."
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.divide]
other_data = np.where(dom_mask, fval, other_data)
ndarray.__idiv__(self._data, other_data)
self._mask = mask_or(self._mask, new_mask)
return self
#...
def __ipow__(self, other):
"Raise self to the power other, in place"
_data = self._data
other_data = getdata(other)
other_mask = getmask(other)
ndarray.__ipow__(_data, other_data)
invalid = np.logical_not(np.isfinite(_data))
new_mask = mask_or(other_mask, invalid)
self._mask = mask_or(self._mask, new_mask)
# The following line is potentially problematic, as we change _data...
np.putmask(self._data,invalid,self.fill_value)
return self
#............................................
def __float__(self):
"Convert to float."
if self.size > 1:
raise TypeError("Only length-1 arrays can be converted "\
"to Python scalars")
elif self._mask:
warnings.warn("Warning: converting a masked element to nan.")
return np.nan
return float(self.item())
def __int__(self):
"Convert to int."
if self.size > 1:
raise TypeError("Only length-1 arrays can be converted "\
"to Python scalars")
elif self._mask:
raise MAError, 'Cannot convert masked element to a Python int.'
return int(self.item())
#............................................
def get_imag(self):
result = self._data.imag.view(type(self))
result.__setmask__(self._mask)
return result
imag = property(fget=get_imag,doc="Imaginary part")
def get_real(self):
result = self._data.real.view(type(self))
result.__setmask__(self._mask)
return result
real = property(fget=get_real,doc="Real part")
#............................................
def count(self, axis=None):
"""Count the non-masked elements of the array along the given
axis.
Parameters
----------
axis : int, optional
Axis along which to count the non-masked elements. If
not given, all the non masked elements are counted.
Returns
-------
result : MaskedArray
A masked array where the mask is True where all data are
masked. If axis is None, returns either a scalar ot the
masked singleton if all values are masked.
"""
m = self._mask
s = self.shape
ls = len(s)
if m is nomask:
if ls == 0:
return 1
if ls == 1:
return s[0]
if axis is None:
return self.size
else:
n = s[axis]
t = list(s)
del t[axis]
return np.ones(t) * n
n1 = np.size(m, axis)
n2 = m.astype(int).sum(axis)
if axis is None:
return (n1-n2)
else:
return narray(n1 - n2)
#............................................
flatten = _arraymethod('flatten')
#
def ravel(self):
"""
Returns a 1D version of self, as a view.
Returns
-------
MaskedArray
Output view is of shape ``(self.size,)`` (or
``(np.ma.product(self.shape),)``).
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print x
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> print x.ravel()
[1 -- 3 -- 5 -- 7 -- 9]
"""
r = ndarray.ravel(self._data).view(type(self))
r._update_from(self)
if self._mask is not nomask:
r._mask = ndarray.ravel(self._mask).reshape(r.shape)
else:
r._mask = nomask
return r
#
repeat = _arraymethod('repeat')
#
def reshape (self, *s, **kwargs):
"""
Returns a masked array containing the data of a, but with a new shape.
The result is a view to the original array; if this is not possible,
a ValueError is raised.
Parameters
----------
shape : shape tuple or int
The new shape should be compatible with the original shape. If an
integer, then the result will be a 1D array of that length.
order : {'C', 'F'}, optional
Determines whether the array data should be viewed as in C
(row-major) order or FORTRAN (column-major) order.
Returns
-------
reshaped_array : array
A new view to the array.
Notes
-----
If you want to modify the shape in place, please use ``a.shape = s``
Examples
--------
>>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1])
>>> print x
[[-- 2]
[3 --]]
>>> x = x.reshape((4,1))
>>> print x
[[--]
[2]
[3]
[--]]
"""
kwargs.update(order=kwargs.get('order','C'))
result = self._data.reshape(*s, **kwargs).view(type(self))
result._update_from(self)
mask = self._mask
if mask is not nomask:
result._mask = mask.reshape(*s, **kwargs)
return result
#
def resize(self, newshape, refcheck=True, order=False):
"""Attempt to modify the size and the shape of the array in place.
The array must own its own memory and not be referenced by
other arrays.
Returns
-------
None.
"""
try:
self._data.resize(newshape, refcheck, order)
if self.mask is | |
from __future__ import absolute_import
import wx
from wx.lib.pubsub import pub
import matplotlib
from matplotlib.figure import Figure
from matplotlib.widgets import AxesWidget
try:
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
except IOError:
# on some linux installations this import needs to be done twice as the first time raises an error:
# IOError: [Errno 2] No such file or directory: '/tmp/matplotlib-parallels/fontList.cache'
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
from matplotlib.patches import Circle, Wedge
try:
from scipy.optimize import curve_fit
scipy_install_is_ok = True
except ImportError, e:
scipy_install_is_ok = False
from .quick_phot import centroid, aperture_phot
from .ztv_wx_lib import validate_textctrl_str, textctrl_output_only_background_color, set_textctrl_background_color
from .ztv_lib import send_to_stream
from astropy import units
import numpy as np
import sys
class PhotPlotPanel(wx.Panel):
def __init__(self, parent, dpi=None, **kwargs):
wx.Panel.__init__(self, parent, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, **kwargs)
self.ztv_frame = self.GetTopLevelParent()
self.figure = Figure(dpi=None, figsize=(1.,1.))
self.axes = self.figure.add_subplot(111)
self.canvas = FigureCanvasWxAgg(self, -1, self.figure)
self.Bind(wx.EVT_SIZE, self._onSize)
self.axes_widget = AxesWidget(self.figure.gca())
self.axes_widget.connect_event('motion_notify_event', self.on_motion)
self.axes_widget.connect_event('button_press_event', self.on_button_press)
self.axes_widget.connect_event('button_release_event', self.on_button_release)
self.axes_widget.connect_event('figure_leave_event', self.on_cursor_leave)
self.button_down = False
def on_button_press(self, event):
self.aper_names = ['aprad', 'skyradin', 'skyradout']
self.aper_last_radii = np.array([self.ztv_frame.phot_panel.aprad,
self.ztv_frame.phot_panel.skyradin,
self.ztv_frame.phot_panel.skyradout])
self.button_press_xdata = event.xdata
self.cur_aper_index = np.abs(self.aper_last_radii - event.xdata).argmin()
self.cur_aper_name = self.aper_names[self.cur_aper_index]
# but, click must be within +-N pix to be valid
if np.abs(event.xdata - self.aper_last_radii[self.cur_aper_index]) <= 20:
self.button_down = True
def on_motion(self, event):
if self.button_down:
if event.xdata is not None:
if self.cur_aper_name == 'aprad':
self.ztv_frame.phot_panel.aprad = (self.aper_last_radii[self.cur_aper_index] +
(event.xdata - self.button_press_xdata))
self.ztv_frame.phot_panel.aprad_textctrl.SetValue('{0:.2f}'.format(self.ztv_frame.phot_panel.aprad))
set_textctrl_background_color(self.ztv_frame.phot_panel.aprad_textctrl, 'ok')
elif self.cur_aper_name == 'skyradin':
self.ztv_frame.phot_panel.skyradin = (self.aper_last_radii[self.cur_aper_index] +
(event.xdata - self.button_press_xdata))
self.ztv_frame.phot_panel.skyradin_textctrl.SetValue('{0:.2f}'.format(
self.ztv_frame.phot_panel.skyradin))
set_textctrl_background_color(self.ztv_frame.phot_panel.skyradin_textctrl, 'ok')
elif self.cur_aper_name == 'skyradout':
self.ztv_frame.phot_panel.skyradout = (self.aper_last_radii[self.cur_aper_index] +
(event.xdata - self.button_press_xdata))
self.ztv_frame.phot_panel.skyradout_textctrl.SetValue('{0:.2f}'.format(
self.ztv_frame.phot_panel.skyradout))
set_textctrl_background_color(self.ztv_frame.phot_panel.skyradout_textctrl, 'ok')
self.ztv_frame.phot_panel.recalc_phot()
def on_button_release(self, event):
if self.button_down:
if event.xdata is not None:
if self.cur_aper_name == 'aprad':
self.ztv_frame.phot_panel.aprad = (self.aper_last_radii[self.cur_aper_index] +
(event.xdata - self.button_press_xdata))
self.ztv_frame.phot_panel.aprad_textctrl.SetValue('{0:.2f}'.format(self.ztv_frame.phot_panel.aprad))
set_textctrl_background_color(self.ztv_frame.phot_panel.aprad_textctrl, 'ok')
elif self.cur_aper_name == 'skyradin':
self.ztv_frame.phot_panel.skyradin = (self.aper_last_radii[self.cur_aper_index] +
(event.xdata - self.button_press_xdata))
self.ztv_frame.phot_panel.skyradin_textctrl.SetValue('{0:.2f}'.format(
self.ztv_frame.phot_panel.skyradin))
set_textctrl_background_color(self.ztv_frame.phot_panel.skyradin_textctrl, 'ok')
elif self.cur_aper_name == 'skyradout':
self.ztv_frame.phot_panel.skyradout = (self.aper_last_radii[self.cur_aper_index] +
(event.xdata - self.button_press_xdata))
self.ztv_frame.phot_panel.skyradout_textctrl.SetValue('{0:.2f}'.format(
self.ztv_frame.phot_panel.skyradout))
set_textctrl_background_color(self.ztv_frame.phot_panel.skyradout_textctrl, 'ok')
self.ztv_frame.phot_panel.recalc_phot()
self.button_down = False
def on_cursor_leave(self, event):
if self.button_down:
if self.cur_aper_name == 'aprad':
self.ztv_frame.phot_panel.aprad = self.aper_last_radii[self.cur_aper_index]
self.ztv_frame.phot_panel.aprad_textctrl.SetValue('{0:.2f}'.format(self.ztv_frame.phot_panel.aprad))
set_textctrl_background_color(self.ztv_frame.phot_panel.aprad_textctrl, 'ok')
elif self.cur_aper_name == 'skyradin':
self.ztv_frame.phot_panel.skyradin = self.aper_last_radii[self.cur_aper_index]
self.ztv_frame.phot_panel.skyradin_textctrl.SetValue('{0:.2f}'.format(
self.ztv_frame.phot_panel.skyradin))
set_textctrl_background_color(self.ztv_frame.phot_panel.skyradin_textctrl, 'ok')
elif self.cur_aper_name == 'skyradout':
self.ztv_frame.phot_panel.skyradout = self.aper_last_radii[self.cur_aper_index]
self.ztv_frame.phot_panel.skyradout_textctrl.SetValue('{0:.2f}'.format(
self.ztv_frame.phot_panel.skyradout))
set_textctrl_background_color(self.ztv_frame.phot_panel.skyradout_textctrl, 'ok')
self.ztv_frame.phot_panel.recalc_phot()
self.button_down=False
def _onSize(self, event):
self._SetSize()
def _SetSize(self):
pixels = tuple(self.GetClientSize())
self.SetSize(pixels)
self.canvas.SetSize(pixels)
self.figure.set_size_inches(float(pixels[0])/self.figure.get_dpi(), float(pixels[1])/self.figure.get_dpi())
def fixed_gauss(x, fwhm, peakval):
"""
Fit FWHM & peakval for a gaussian fixed at 0 and that baseline is 0.
"""
c = fwhm / (2. * np.sqrt(2. * np.log(2.)))
xc = 0.
return peakval * np.exp(-((x - xc)**2) / (2.*c**2))
class PhotPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize)
self.ztv_frame = self.GetTopLevelParent()
self.ztv_frame.primary_image_panel.popup_menu_cursor_modes.append('Phot')
self.ztv_frame.primary_image_panel.available_cursor_modes['Phot'] = {
'set-to-mode':self.set_cursor_to_phot_mode,
'on_button_press':self.on_button_press}
self.star_center_patch = None
self.star_aperture_patch = None
self.sky_aperture_patch = None
self.last_string_values = {'aprad':'', 'skyradin':'', 'skyradout':''}
self.xclick = None
self.yclick = None
self.xcentroid = 0.
self.ycentroid = 0.
self.aprad = 10.
self.skyradin = 20.
self.skyradout = 30.
self.phot_info = None
self.aprad_color = 'blue'
self.skyrad_color = 'red'
self.alpha = 0.25
self._need_to_recalc_phot_on_next_activation = False
textentry_font = wx.Font(14, wx.FONTFAMILY_MODERN, wx.NORMAL, wx.FONTWEIGHT_LIGHT, False)
values_sizer = wx.FlexGridSizer( 3, 5, 0, 0 )
values_sizer.SetFlexibleDirection( wx.BOTH )
values_sizer.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.aprad_static_text = wx.StaticText( self, wx.ID_ANY, u"Aperture radius", wx.DefaultPosition, wx.DefaultSize, wx.ALIGN_RIGHT )
self.aprad_static_text.Wrap( -1 )
values_sizer.Add(self.aprad_static_text, 0, wx.ALL|wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL, 0)
self.aprad_textctrl = wx.TextCtrl(self, wx.ID_ANY, str(self.aprad), wx.DefaultPosition, wx.DefaultSize,
wx.TE_PROCESS_ENTER)
self.aprad_textctrl.SetFont(textentry_font)
values_sizer.Add(self.aprad_textctrl, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 2)
self.aprad_textctrl.Bind(wx.EVT_TEXT, self.aprad_textctrl_changed)
self.aprad_textctrl.Bind(wx.EVT_TEXT_ENTER, self.aprad_textctrl_entered)
values_sizer.AddSpacer((30,0), 0, wx.EXPAND)
self.x_static_text = wx.StaticText(self, wx.ID_ANY, u"x", wx.DefaultPosition, wx.DefaultSize,
wx.ALIGN_CENTER_HORIZONTAL )
self.x_static_text.Wrap( -1 )
values_sizer.Add(self.x_static_text, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_BOTTOM, 0)
self.y_static_text = wx.StaticText(self, wx.ID_ANY, u"y", wx.DefaultPosition, wx.DefaultSize,
wx.ALIGN_CENTER_HORIZONTAL )
self.y_static_text.Wrap( -1 )
values_sizer.Add(self.y_static_text, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_BOTTOM, 0)
self.skyradin_static_text = wx.StaticText(self, wx.ID_ANY, u"Sky inner radius",
wx.DefaultPosition, wx.DefaultSize, wx.ALIGN_RIGHT )
self.skyradin_static_text.Wrap( -1 )
values_sizer.Add(self.skyradin_static_text, 0, wx.ALL|wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL, 0)
self.skyradin_textctrl = wx.TextCtrl(self, wx.ID_ANY, str(self.skyradin), wx.DefaultPosition, wx.DefaultSize,
wx.TE_PROCESS_ENTER)
self.skyradin_textctrl.SetFont(textentry_font)
values_sizer.Add(self.skyradin_textctrl, 0, wx.ALL|wx.EXPAND|wx.ALIGN_CENTER_VERTICAL, 2)
self.skyradin_textctrl.Bind(wx.EVT_TEXT, self.skyradin_textctrl_changed)
self.skyradin_textctrl.Bind(wx.EVT_TEXT_ENTER, self.skyradin_textctrl_entered)
self.clicked_static_text = wx.StaticText(self, wx.ID_ANY, u"Clicked", wx.DefaultPosition,
wx.DefaultSize, wx.ALIGN_RIGHT )
self.clicked_static_text.Wrap( -1 )
values_sizer.Add(self.clicked_static_text, 0, wx.ALL|wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL, 0)
self.xclick_textctrl = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize,
wx.TE_READONLY)
self.xclick_textctrl.SetFont(textentry_font)
self.xclick_textctrl.SetBackgroundColour(textctrl_output_only_background_color)
values_sizer.Add(self.xclick_textctrl, 0, wx.ALL|wx.EXPAND|wx.ALIGN_CENTER_VERTICAL, 2)
self.yclick_textctrl = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize,
wx.TE_READONLY)
self.yclick_textctrl.SetFont(textentry_font)
self.yclick_textctrl.SetBackgroundColour(textctrl_output_only_background_color)
values_sizer.Add(self.yclick_textctrl, 0, wx.ALL|wx.EXPAND|wx.ALIGN_CENTER_VERTICAL, 2)
self.skyradout_static_text = wx.StaticText(self, wx.ID_ANY, u"Sky outer radius", wx.DefaultPosition,
wx.DefaultSize, wx.ALIGN_RIGHT )
self.skyradout_static_text.Wrap( -1 )
values_sizer.Add(self.skyradout_static_text, 0, wx.ALL|wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL, 0)
self.skyradout_textctrl = wx.TextCtrl(self, wx.ID_ANY, str(self.skyradout), wx.DefaultPosition, wx.DefaultSize,
wx.TE_PROCESS_ENTER)
self.skyradout_textctrl.SetFont(textentry_font)
values_sizer.Add(self.skyradout_textctrl, 0, wx.ALL|wx.EXPAND|wx.ALIGN_CENTER_VERTICAL, 2)
self.skyradout_textctrl.Bind(wx.EVT_TEXT, self.skyradout_textctrl_changed)
self.skyradout_textctrl.Bind(wx.EVT_TEXT_ENTER, self.skyradout_textctrl_entered)
self.centroid_static_text = wx.StaticText(self, wx.ID_ANY, u"Centroid", wx.DefaultPosition,
wx.DefaultSize, wx.ALIGN_RIGHT )
self.centroid_static_text.Wrap( -1 )
values_sizer.Add(self.centroid_static_text, 0, wx.ALL|wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL, 0)
self.xcentroid_textctrl = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize,
wx.TE_READONLY)
self.xcentroid_textctrl.SetFont(textentry_font)
self.xcentroid_textctrl.SetBackgroundColour(textctrl_output_only_background_color)
values_sizer.Add(self.xcentroid_textctrl, 0, wx.ALL|wx.EXPAND|wx.ALIGN_CENTER_VERTICAL, 2)
self.ycentroid_textctrl = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize,
wx.TE_READONLY)
self.ycentroid_textctrl.SetFont(textentry_font)
self.ycentroid_textctrl.SetBackgroundColour(textctrl_output_only_background_color)
values_sizer.Add(self.ycentroid_textctrl, 0, wx.ALL|wx.EXPAND|wx.ALIGN_CENTER_VERTICAL, 2)
v_sizer1 = wx.BoxSizer(wx.VERTICAL)
v_sizer1.Add(values_sizer, 0, wx.ALIGN_CENTER_HORIZONTAL)
# v_sizer1.AddSpacer((0, 1), 0, wx.EXPAND)
v_sizer1.Add(wx.StaticLine(self, -1, style=wx.LI_HORIZONTAL), 0, wx.EXPAND|wx.ALL, 5)
# v_sizer1.AddSpacer((0, 1), 0, wx.EXPAND)
h_sizer1 = wx.BoxSizer(wx.HORIZONTAL)
self.sky_static_text = wx.StaticText( self, wx.ID_ANY, u"Sky", wx.DefaultPosition, wx.DefaultSize, wx.ALIGN_RIGHT )
self.sky_static_text.Wrap( -1 )
h_sizer1.Add(self.sky_static_text, 0, wx.ALL|wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL, 0)
self.sky_textctrl = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize,
wx.TE_READONLY)
self.sky_textctrl.SetFont(textentry_font)
self.sky_textctrl.SetBackgroundColour(textctrl_output_only_background_color)
h_sizer1.Add(self.sky_textctrl, 0, wx.ALL|wx.EXPAND|wx.ALIGN_CENTER_VERTICAL, 2)
# TODO: look up how to do nice plus/minus symbol
self.pm_static_text = wx.StaticText( self, wx.ID_ANY, u"+-", wx.DefaultPosition, wx.DefaultSize, wx.ALIGN_RIGHT )
self.pm_static_text.Wrap( -1 )
h_sizer1.Add(self.pm_static_text, 0, wx.ALL|wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL, 0)
self.skyerr_textctrl = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize,
wx.TE_READONLY)
self.skyerr_textctrl.SetFont(textentry_font)
self.skyerr_textctrl.SetBackgroundColour(textctrl_output_only_background_color)
h_sizer1.Add(self.skyerr_textctrl, 0, wx.ALL|wx.EXPAND|wx.ALIGN_CENTER_VERTICAL, 2)
self.perpixel_static_text = wx.StaticText( self, wx.ID_ANY, u"cts/pixel", wx.DefaultPosition, wx.DefaultSize, wx.ALIGN_RIGHT )
self.perpixel_static_text.Wrap( -1 )
h_sizer1.Add(self.perpixel_static_text, 0, wx.ALL|wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL, 0)
v_sizer1.Add(h_sizer1, 0, wx.ALIGN_LEFT)
h_sizer2 = wx.BoxSizer(wx.HORIZONTAL)
self.flux_static_text = wx.StaticText( self, wx.ID_ANY, u"Flux", wx.DefaultPosition, wx.DefaultSize, wx.ALIGN_RIGHT )
self.flux_static_text.Wrap( -1 )
h_sizer2.Add(self.flux_static_text, 0, wx.ALL|wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL, 0)
self.flux_textctrl = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize,
wx.TE_READONLY)
self.flux_textctrl.SetFont(textentry_font)
self.flux_textctrl.SetBackgroundColour(textctrl_output_only_background_color)
h_sizer2.Add(self.flux_textctrl, 0, wx.ALL|wx.EXPAND|wx.ALIGN_CENTER_VERTICAL, 2)
self.cts_static_text = wx.StaticText( self, wx.ID_ANY, u"cts with FWHM", wx.DefaultPosition, wx.DefaultSize, wx.ALIGN_RIGHT )
self.cts_static_text.Wrap( -1 )
h_sizer2.Add(self.cts_static_text, 0, wx.ALL|wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL, 0)
self.fwhm_textctrl = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize,
wx.TE_READONLY)
self.fwhm_textctrl.SetFont(textentry_font)
self.fwhm_textctrl.SetBackgroundColour(textctrl_output_only_background_color)
h_sizer2.Add(self.fwhm_textctrl, 0, wx.ALL|wx.EXPAND|wx.ALIGN_CENTER_VERTICAL, 2)
self.pix_static_text = wx.StaticText( self, wx.ID_ANY, u"pix", wx.DefaultPosition, wx.DefaultSize, wx.ALIGN_RIGHT )
self.pix_static_text.Wrap( -1 )
h_sizer2.Add(self.pix_static_text, 0, wx.ALL|wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL, 0)
v_sizer1.Add(h_sizer2, 0, wx.ALIGN_LEFT)
h_sizer3 = wx.BoxSizer(wx.HORIZONTAL)
self.radec_static_text = wx.StaticText( self, wx.ID_ANY, u"RADec", wx.DefaultPosition, wx.DefaultSize, wx.ALIGN_RIGHT )
self.radec_static_text.Wrap( -1 )
h_sizer3.Add(self.radec_static_text, 0, wx.ALL|wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL, 0)
self.radec_textctrl = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize,
wx.TE_READONLY)
self.radec_textctrl.SetFont(textentry_font)
self.radec_textctrl.SetBackgroundColour(textctrl_output_only_background_color)
h_sizer3.Add(self.radec_textctrl, 1, wx.ALL|wx.EXPAND|wx.ALIGN_CENTER_VERTICAL, 2)
h_sizer3.AddSpacer([30, 0], 0, 0)
self.hideshow_button = wx.Button(self, wx.ID_ANY, u"Hide", wx.DefaultPosition, wx.DefaultSize, 0)
h_sizer3.Add(self.hideshow_button, 0, wx.ALL|wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL, 2)
self.hideshow_button.Bind(wx.EVT_BUTTON, self.on_hideshow_button)
v_sizer1.Add(h_sizer3, 0, wx.ALIGN_LEFT)
self.plot_panel = PhotPlotPanel(self)
v_sizer1.Add(self.plot_panel, 1, wx.LEFT | wx.TOP | wx.EXPAND)
self.SetSizer(v_sizer1)
pub.subscribe(self.queue_recalc_phot, 'recalc-display-image-called')
pub.subscribe(self._set_aperture_phot_parameters, 'set-aperture-phot-parameters')
pub.subscribe(self.publish_aperture_phot_info_to_stream, 'get-aperture-phot-info')
def publish_aperture_phot_info_to_stream(self, msg=None):
phot_info = self.phot_info.copy()
phot_info.pop('distances', None)
wx.CallAfter(send_to_stream, sys.stdout, ('aperture-phot-info', phot_info))
def on_button_press(self, event):
self.select_panel()
self.update_phot_xy((event.xdata, event.ydata))
def set_cursor_to_phot_mode(self, event):
self.ztv_frame.primary_image_panel.cursor_mode = 'Phot'
self.select_panel()
self.highlight_panel()
def queue_recalc_phot(self, msg=None):
"""
wrapper to call recalc_phot from CallAfter in order to make GUI as responsive as possible.
"""
wx.CallAfter(self.recalc_phot, msg=None)
def on_hideshow_button(self, evt):
if self.hideshow_button.GetLabel() == 'Hide':
self.remove_overplot_on_image()
else:
self.redraw_overplot_on_image()
def remove_overplot_on_image(self, *args):
if self.star_center_patch is not None:
self.ztv_frame.primary_image_panel.axes.patches.remove(self.star_center_patch)
self.star_center_patch = None
if self.star_aperture_patch is not None:
self.ztv_frame.primary_image_panel.axes.patches.remove(self.star_aperture_patch)
self.star_aperture_patch = None
if self.sky_aperture_patch is not None:
self.ztv_frame.primary_image_panel.axes.patches.remove(self.sky_aperture_patch)
self.sky_aperture_patch = None
self.ztv_frame.primary_image_panel.figure.canvas.draw()
self.hideshow_button.SetLabel(u"Show")
def redraw_overplot_on_image(self, *args):
if self.star_center_patch is not None:
self.ztv_frame.primary_image_panel.axes.patches.remove(self.star_center_patch)
if self.star_aperture_patch is not None:
self.ztv_frame.primary_image_panel.axes.patches.remove(self.star_aperture_patch)
if self.sky_aperture_patch is not None:
self.ztv_frame.primary_image_panel.axes.patches.remove(self.sky_aperture_patch)
self.star_center_patch = Circle([self.xcentroid, self.ycentroid], 0.125, color=self.aprad_color)
self.ztv_frame.primary_image_panel.axes.add_patch(self.star_center_patch)
self.star_aperture_patch = Circle([self.xcentroid, self.ycentroid], self.aprad, color=self.aprad_color, alpha=self.alpha)
self.ztv_frame.primary_image_panel.axes.add_patch(self.star_aperture_patch)
self.sky_aperture_patch = Wedge([self.xcentroid, self.ycentroid], self.skyradout, 0., 360.,
width=self.skyradout - self.skyradin, color=self.skyrad_color, alpha=self.alpha)
self.ztv_frame.primary_image_panel.axes.add_patch(self.sky_aperture_patch)
self.ztv_frame.primary_image_panel.figure.canvas.draw()
self.hideshow_button.SetLabel(u"Hide")
def _set_aperture_phot_parameters(self, msg):
if msg['xclick'] is not None:
self.xclick = msg['xclick']
if msg['yclick'] is not None:
self.yclick = msg['yclick']
if msg['radius'] is not None:
self.aprad = msg['radius']
if msg['inner_sky_radius'] is not None:
self.skyradin = msg['inner_sky_radius']
if msg['outer_sky_radius'] is not None:
self.skyradout = msg['outer_sky_radius']
self.recalc_phot()
if msg['show_overplot'] is not None:
if msg['show_overplot']:
self.redraw_overplot_on_image()
else:
self.remove_overplot_on_image()
send_to_stream(sys.stdout, ('set-aperture-phot-parameters-done', True))
def update_phot_xy(self, msg):
self.xclick, self.yclick = msg
self.recalc_phot()
def on_activate(self, msg=None):
wx.CallAfter(self.recalc_phot, 'called-from-on-activate')
def recalc_phot(self, msg=None):
if self.xclick is None or self.yclick is None:
self.xclick_textctrl.SetValue('None')
self.yclick_textctrl.SetValue('None')
return
if not isinstance(self.ztv_frame.control_panels[self.ztv_frame.controls_notebook.GetSelection()], PhotPanel):
self._need_to_recalc_phot_on_next_activation = True
return # do not recalculate if phot_panel is not visible
if msg == 'called-from-on-activate':
if not self._need_to_recalc_phot_on_next_activation:
return
self.xclick_textctrl.SetValue("{:8.2f}".format(self.xclick))
self.yclick_textctrl.SetValue("{:8.2f}".format(self.yclick))
self.xcentroid,self.ycentroid = centroid(self.ztv_frame.display_image, self.xclick, self.yclick)
self.xcentroid_textctrl.SetValue("{:8.2f}".format(self.xcentroid))
self.ycentroid_textctrl.SetValue("{:8.2f}".format(self.ycentroid))
self.phot_info = aperture_phot(self.ztv_frame.display_image, self.xcentroid, self.ycentroid,
self.aprad, self.skyradin, self.skyradout, return_distances=True)
self.phot_info['xclick'] = self.xclick
self.phot_info['yclick'] = self.yclick
self.phot_info['xcentroid'] = self.xcentroid
self.phot_info['ycentroid'] = self.ycentroid
self.flux_textctrl.SetValue("{:0.6g}".format(self.phot_info['flux']))
self.sky_textctrl.SetValue("{:0.6g}".format(self.phot_info['sky_per_pixel']))
| |
<gh_stars>0
import datetime
import logging
import uuid
from dataclasses import dataclass, field
from typing import Dict, Any, Optional, Iterator, Union, List
from marshmallow import fields, post_load, Schema
from sqlalchemy import (
Table,
MetaData,
Column,
String,
ForeignKey,
Boolean,
DateTime,
select,
Integer,
func,
UniqueConstraint,
Index,
and_,
exists,
or_,
insert,
)
from sqlalchemy.dialects.postgresql import UUID, JSONB
from sqlalchemy.exc import DatabaseError
from stopcovid import db
from ..dialog.models.events import (
DrillStarted,
ReminderTriggered,
UserValidated,
UserValidationFailed,
CompletedPrompt,
FailedPrompt,
AdvancedToNextPrompt,
DrillCompleted,
OptedOut,
NextDrillRequested,
DialogEvent,
DialogEventBatch,
)
from ..drills.drills import get_all_drill_slugs
metadata = MetaData()
users = Table(
"users",
metadata,
Column("user_id", UUID, primary_key=True),
Column("seq", String, nullable=False),
Column("profile", JSONB, nullable=False),
Column("last_interacted_time", DateTime(timezone=True), index=True),
)
phone_numbers = Table(
"phone_numbers",
metadata,
Column("id", UUID, primary_key=True),
Column("phone_number", String, nullable=False, unique=True),
Column("user_id", UUID, ForeignKey("users.user_id"), nullable=False),
Column("is_primary", Boolean, nullable=False),
)
drill_statuses = Table(
"drill_statuses",
metadata,
Column("id", UUID, primary_key=True),
Column("user_id", UUID, ForeignKey("users.user_id"), nullable=False),
Column("drill_instance_id", UUID, nullable=True, index=True),
Column("drill_slug", String, nullable=False),
Column("place_in_sequence", Integer, nullable=False),
Column("started_time", DateTime(timezone=True)),
Column("completed_time", DateTime(timezone=True)),
UniqueConstraint("user_id", "place_in_sequence"),
UniqueConstraint("user_id", "drill_slug"),
Index("user_id_started", "user_id", "started_time"),
Index("user_id_completed", "user_id", "completed_time"),
)
drill_instances = Table(
"drill_instances",
metadata,
Column("drill_instance_id", UUID, primary_key=True),
Column("user_id", UUID, ForeignKey("users.user_id"), nullable=False),
Column("phone_number", String, nullable=False),
Column("drill_slug", String, nullable=False),
Column("current_prompt_slug", String, nullable=True),
Column("current_prompt_start_time", DateTime(timezone=True), nullable=True),
Column("current_prompt_last_response_time", DateTime(timezone=True), nullable=True),
Column("completion_time", DateTime(timezone=True), nullable=True),
Column("is_valid", Boolean, nullable=False, default=True),
)
class DrillProgressSchema(Schema):
phone_number = fields.String(required=True)
user_id = fields.UUID(required=True)
first_unstarted_drill_slug = fields.String(allow_none=True)
first_incomplete_drill_slug = fields.String(allow_none=True)
@post_load
def make_drill_progress(self, data, **kwargs):
return DrillProgress(**data)
@dataclass
class DrillProgress:
phone_number: str
user_id: uuid.UUID
first_unstarted_drill_slug: Optional[str] = None
first_incomplete_drill_slug: Optional[str] = None
def next_drill_slug_to_trigger(self) -> Optional[str]:
if self.first_unstarted_drill_slug:
return self.first_unstarted_drill_slug
return self.first_incomplete_drill_slug
def to_dict(self):
return DrillProgressSchema().dump(self)
@dataclass
class User:
seq: str
user_id: UUID = field(default_factory=uuid.uuid4)
profile: Dict[str, Any] = field(default_factory=dict)
last_interacted_time: Optional[datetime.datetime] = None
@dataclass
class PhoneNumber:
phone_number: str
user_id: UUID
is_primary: bool = True
id: UUID = field(default_factory=uuid.uuid4)
@dataclass
class DrillStatus:
id: uuid.UUID
user_id: uuid.UUID
# Why are drill instance IDs nullable? We add a drill status row for each known drill before
# any of them have started. At that time, the drill instance IDs haven't yet been created.
drill_instance_id: Optional[uuid.UUID]
drill_slug: str
place_in_sequence: int
started_time: datetime.datetime
completed_time: datetime.datetime
@dataclass
class DrillInstance:
drill_instance_id: uuid.UUID
user_id: uuid.UUID
phone_number: str
drill_slug: str
current_prompt_slug: Optional[str] = None
current_prompt_start_time: Optional[datetime.datetime] = None
current_prompt_last_response_time: Optional[datetime.datetime] = None
completion_time: Optional[datetime.datetime] = None
is_valid: bool = True
class DrillProgressRepository:
def __init__(self, engine_factory=db.get_sqlalchemy_engine):
self.engine_factory = engine_factory
self.engine = engine_factory()
def get_user(self, user_id: uuid.UUID) -> Optional[User]:
result = self.engine.execute(
select([users]).where(users.c.user_id == func.uuid(str(user_id)))
)
row = result.fetchone()
if row is None:
return None
return User(
user_id=uuid.UUID(row["user_id"]),
profile=row["profile"],
last_interacted_time=row["last_interacted_time"],
seq=row["seq"],
)
def get_drill_status(self, user_id: uuid.UUID, drill_slug: str) -> Optional[DrillStatus]:
result = self.engine.execute(
select([drill_statuses]).where(
and_(
drill_statuses.c.user_id == func.uuid(str(user_id)),
drill_statuses.c.drill_slug == drill_slug,
)
)
)
row = result.fetchone()
if row is None:
return None
drill_instance_id = (
uuid.UUID(row["drill_instance_id"]) if row["drill_instance_id"] else None
)
return DrillStatus(
id=uuid.UUID(row["id"]),
user_id=uuid.UUID(row["user_id"]),
drill_instance_id=drill_instance_id,
drill_slug=row["drill_slug"],
place_in_sequence=row["place_in_sequence"],
started_time=row["started_time"],
completed_time=row["completed_time"],
)
def update_user( # noqa: C901
self, batch: DialogEventBatch, ensure_user_id: Optional[uuid.UUID] = None
) -> uuid.UUID:
logging.info(f"Updating {batch.phone_number} at seq {batch.seq}")
with self.engine.connect() as connection:
with connection.begin():
user = self.get_user_for_phone_number(batch.phone_number, connection)
if user is not None and int(user.seq) >= int(batch.seq):
logging.info(
f"Ignoring batch at {batch.seq} because a more recent user exists "
f"(seq {user.seq})"
)
return user.user_id
# also updates sequence number for the user, which won't be committed unless the
# transaction succeeds
user_id = self._create_or_update_user(batch, ensure_user_id, connection)
for event in batch.events:
self._mark_interacted_time(user_id, event, connection)
if isinstance(event, UserValidated):
self._reset_drill_statuses(user_id, connection)
self._invalidate_prior_drills(user_id, connection)
elif isinstance(event, DrillStarted):
self._mark_drill_started(user_id, event, connection)
self._record_new_drill_instance(user_id, event, connection)
elif isinstance(event, DrillCompleted):
self._mark_drill_completed(event, connection)
self._mark_drill_instance_complete(event, connection)
elif isinstance(event, OptedOut):
if event.drill_instance_id is not None:
self._unmark_drill_started(event, connection)
self._invalidate_drill_instance(event.drill_instance_id, connection)
elif isinstance(event, CompletedPrompt):
self._update_current_prompt_response_time(event, connection)
elif isinstance(event, FailedPrompt):
self._update_current_prompt_response_time(event, connection)
elif isinstance(event, AdvancedToNextPrompt):
self._update_current_prompt(event, connection)
elif (
isinstance(event, ReminderTriggered)
or isinstance(event, UserValidationFailed)
or isinstance(event, NextDrillRequested)
):
logging.info(f"Ignoring event of type {event.event_type}")
else:
raise ValueError(f"Unknown event type {event.event_type}")
return user_id
def get_progress_for_users_who_need_drills(self, inactivity_minutes) -> Iterator[DrillProgress]:
ds1 = drill_statuses.alias()
ds2 = drill_statuses.alias()
time_threshold = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(
minutes=inactivity_minutes
)
stmt = (
select([drill_statuses, phone_numbers.c.phone_number])
.select_from(
drill_statuses.join(users, users.c.user_id == drill_statuses.c.user_id).join(
phone_numbers, phone_numbers.c.user_id == drill_statuses.c.user_id
)
)
.where(
and_(
phone_numbers.c.is_primary.is_(True),
# haven't interacted recently
or_(
users.c.last_interacted_time.is_(None),
users.c.last_interacted_time <= time_threshold,
),
# there's at least one started drill
exists().where(
and_(ds2.c.user_id == users.c.user_id, ds2.c.started_time.isnot(None))
),
# and at least one incomplete drill
exists().where(
and_(ds1.c.user_id == users.c.user_id, ds1.c.completed_time.is_(None))
),
)
)
.order_by(drill_statuses.c.user_id, drill_statuses.c.place_in_sequence)
)
cur_drill_progress = None
for row in self.engine.execute(stmt):
user_id = uuid.UUID(row["user_id"])
if cur_drill_progress is None or cur_drill_progress.user_id != user_id:
if cur_drill_progress is not None:
yield cur_drill_progress
cur_drill_progress = DrillProgress(
phone_number=row["phone_number"], user_id=user_id
)
if (
cur_drill_progress.first_incomplete_drill_slug is None
and row["completed_time"] is None
):
cur_drill_progress.first_incomplete_drill_slug = row["drill_slug"]
if (
cur_drill_progress.first_unstarted_drill_slug is None
and row["started_time"] is None
):
cur_drill_progress.first_unstarted_drill_slug = row["drill_slug"]
if cur_drill_progress is not None:
yield cur_drill_progress
def get_progress_for_user(self, phone_number: str) -> DrillProgress:
user = self.get_user_for_phone_number(phone_number)
user_id = user.user_id
result = self.engine.execute(
select([drill_statuses])
.where(drill_statuses.c.user_id == func.uuid((str(user_id))))
.order_by(drill_statuses.c.place_in_sequence)
)
progress = DrillProgress(phone_number=phone_number, user_id=user_id)
for row in result:
if progress.first_incomplete_drill_slug is None and row["completed_time"] is None:
progress.first_incomplete_drill_slug = row["drill_slug"]
if progress.first_unstarted_drill_slug is None and row["started_time"] is None:
progress.first_unstarted_drill_slug = row["drill_slug"]
return progress
def delete_user_info(self, phone_number: str) -> Optional[uuid.UUID]:
# useful for backfills and rebuilding users. Shouldn't be called regularly.
with self.engine.connect() as connection:
with connection.begin():
user = self.get_user_for_phone_number(phone_number, connection)
if user is None:
logging.info(f"No user exists for {phone_number}")
return None
connection.execute(
phone_numbers.delete().where(
phone_numbers.c.user_id == func.uuid(str(user.user_id))
)
)
connection.execute(
drill_statuses.delete().where(
drill_statuses.c.user_id == func.uuid(str(user.user_id))
)
)
connection.execute(
drill_instances.delete().where(
drill_instances.c.user_id == func.uuid(str(user.user_id))
)
)
connection.execute(
users.delete().where(users.c.user_id == func.uuid(str(user.user_id)))
)
return user.user_id
def get_user_for_phone_number(self, phone_number: str, connection=None) -> Optional[User]:
if connection is None:
connection = self.engine
result = connection.execute(
select([users])
.select_from(users.join(phone_numbers, users.c.user_id == phone_numbers.c.user_id))
.where(phone_numbers.c.phone_number == phone_number)
)
row = result.fetchone()
if row is None:
return None
return User(
user_id=uuid.UUID(row["user_id"]),
profile=row["profile"],
last_interacted_time=row["last_interacted_time"],
seq=row["seq"],
)
def _create_or_update_user(
self, batch: DialogEventBatch, ensure_user_id: Optional[uuid.UUID], connection
) -> uuid.UUID:
event = batch.events[-1]
phone_number = event.phone_number
profile = event.user_profile.to_dict()
result = connection.execute(
select([phone_numbers]).where(phone_numbers.c.phone_number == phone_number)
)
row = result.fetchone()
if row is None:
logging.info(f"No record of {phone_number}. Creating a new entry.")
user_record = User(profile=profile, seq=batch.seq)
if ensure_user_id:
user_record.user_id = ensure_user_id
phone_number_record = PhoneNumber(
phone_number=phone_number, user_id=user_record.user_id
)
connection.execute(
users.insert().values(
user_id=str(user_record.user_id), profile=user_record.profile, seq=batch.seq
)
)
connection.execute(
phone_numbers.insert().values(
id=str(phone_number_record.id),
user_id=str(phone_number_record.user_id),
is_primary=phone_number_record.is_primary,
phone_number=phone_number_record.phone_number,
)
)
for i, slug in enumerate(get_all_drill_slugs()):
connection.execute(
drill_statuses.insert().values(
id=str(uuid.uuid4()),
user_id=str(user_record.user_id),
drill_slug=slug,
place_in_sequence=i,
)
)
logging.info(f"New user ID for {phone_number} is {user_record.user_id}")
return user_record.user_id
phone_number_record = PhoneNumber(**row)
user_record = self.get_user(phone_number_record.user_id)
if int(user_record.seq) >= int(batch.seq):
logging.info(
f"Ignoring batch at {batch.seq} because a more recent user exists "
f"(seq {user_record.seq}"
)
return phone_number_record.user_id
connection.execute(
users.update()
.where(users.c.user_id == func.uuid(str(phone_number_record.user_id)))
.values(profile=profile, seq=batch.seq)
)
return phone_number_record.user_id
@staticmethod
def _reset_drill_statuses(user_id: uuid.UUID, connection):
connection.execute(
drill_statuses.update()
.where(drill_statuses.c.user_id == func.uuid(str(user_id)))
.values(started_time=None, completed_time=None, drill_instance_id=None)
)
@staticmethod
def _mark_drill_started(user_id: uuid.UUID, event: DrillStarted, connection):
connection.execute(
drill_statuses.update()
.where(
and_(
drill_statuses.c.user_id == func.uuid(str(user_id)),
drill_statuses.c.drill_slug == event.drill.slug,
)
)
.values(started_time=event.created_time, drill_instance_id=str(event.drill_instance_id))
)
@staticmethod
def _unmark_drill_started(event: OptedOut, connection):
connection.execute(
drill_statuses.update()
.where(drill_statuses.c.drill_instance_id == func.uuid(str(event.drill_instance_id)))
.values(started_time=None)
)
@staticmethod
def _mark_drill_completed(event: DrillCompleted, connection):
connection.execute(
drill_statuses.update()
.where((drill_statuses.c.drill_instance_id == func.uuid(str(event.drill_instance_id))))
.values(completed_time=event.created_time)
)
@staticmethod
def _mark_interacted_time(user_id, event: DialogEvent, connection):
connection.execute(
users.update()
.where(users.c.user_id == func.uuid(str(user_id)))
.values(last_interacted_time=event.created_time)
)
@staticmethod
def _invalidate_prior_drills(user_id: uuid.UUID, connection):
connection.execute(
drill_instances.update()
.where(
and_(
drill_instances.c.user_id == func.uuid(str(user_id)),
drill_instances.c.is_valid.is_(True),
)
)
.values(is_valid=False)
)
@staticmethod
def _invalidate_drill_instance(drill_instance_id: Optional[uuid.UUID], connection):
if drill_instance_id is None:
return
connection.execute(
drill_instances.update()
.where(drill_instances.c.drill_instance_id == func.uuid(str(drill_instance_id)))
.values(is_valid=False)
)
def _record_new_drill_instance(self, user_id: uuid.UUID, event: DrillStarted, connection):
drill_instance = DrillInstance(
drill_instance_id=event.drill_instance_id,
user_id=user_id,
phone_number=event.phone_number,
drill_slug=event.drill.slug,
current_prompt_slug=event.first_prompt.slug,
current_prompt_start_time=event.created_time,
)
self._save_drill_instance(drill_instance, connection)
@staticmethod
def _mark_drill_instance_complete(event: DrillCompleted, connection):
connection.execute(
drill_instances.update()
.where(drill_instances.c.drill_instance_id == func.uuid(str(event.drill_instance_id)))
.values(
completion_time=event.created_time,
current_prompt_slug=None,
current_prompt_start_time=None,
current_prompt_last_response_time=None,
)
)
@staticmethod
def _update_current_prompt_response_time(
event: Union[FailedPrompt, CompletedPrompt], connection
):
connection.execute(
drill_instances.update()
.where(drill_instances.c.drill_instance_id == func.uuid(str(event.drill_instance_id)))
.values(current_prompt_last_response_time=event.created_time)
)
@staticmethod
def _update_current_prompt(event: AdvancedToNextPrompt, connection):
connection.execute(
drill_instances.update()
.where(drill_instances.c.drill_instance_id == func.uuid(str(event.drill_instance_id)))
.values(
current_prompt_last_response_time=None,
current_prompt_start_time=event.created_time,
current_prompt_slug=event.prompt.slug,
)
)
@staticmethod
def _deserialize(row):
return DrillInstance(
drill_instance_id=uuid.UUID(row["drill_instance_id"]),
user_id=uuid.UUID(row["user_id"]),
phone_number=row["phone_number"],
drill_slug=row["drill_slug"],
current_prompt_slug=row["current_prompt_slug"],
current_prompt_start_time=row["current_prompt_start_time"],
current_prompt_last_response_time=row["current_prompt_last_response_time"],
completion_time=row["completion_time"],
is_valid=row["is_valid"],
)
def get_drill_instance(
self, drill_instance_id: uuid.UUID, connection=None
) -> Optional[DrillInstance]:
if connection is None:
connection = self.engine
result = connection.execute(
select([drill_instances]).where(
drill_instances.c.drill_instance_id == func.uuid(str(drill_instance_id))
)
)
row = result.fetchone()
if row is None:
return None
return self._deserialize(row)
def _save_drill_instance(self, drill_instance: DrillInstance, connection=None):
if connection is None:
connection = self.engine
stmt = insert(drill_instances).values(
drill_instance_id=str(drill_instance.drill_instance_id),
user_id=str(drill_instance.user_id),
phone_number=drill_instance.phone_number,
drill_slug=str(drill_instance.drill_slug),
current_prompt_slug=str(drill_instance.current_prompt_slug),
current_prompt_start_time=drill_instance.current_prompt_start_time,
current_prompt_last_response_time=drill_instance.current_prompt_last_response_time,
completion_time=drill_instance.completion_time,
is_valid=drill_instance.is_valid,
)
connection.execute(stmt)
def get_incomplete_drills(
self, inactive_for_minutes_floor=None, inactive_for_minutes_ceil=None
) -> List[DrillInstance]:
stmt = select([drill_instances]).where(
and_(drill_instances.c.completion_time.is_(None), drill_instances.c.is_valid.is_(True))
) # noqa: E711
if inactive_for_minutes_floor is | |
#Mainly inteded as a private method although kept public, and
#fascilitated the transformation of the compliance matrix to another
#coordinate system.
#:Args:
#- `th (1x3 Array[float])`: The angles about which the material can be
#rotated when it is initialized. In degrees.
#:Returns:
#- `Sp`: The transformed compliance matrix.
#per AeroComBAT
#"""
## Method to return the compliance matrix
#rh = RotationHelper()
#Sp = rh.transformCompl(self.Smat,th)
#return Sp
class MAT2(AnisotropicMaterial):
"""
Defines the material properties for linear anisotropic materials for
two-dimensional elements.
+------+-------+-----+-----+------+-----+------+-----+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+======+=======+=====+=====+======+=====+======+=====+=====+
| MAT2 | MID | G11 | G12 | G13 | G22 | G23 | G33 | RHO |
+------+-------+-----+-----+------+-----+------+-----+-----+
| | A1 | A2 | A3 | TREF | GE | ST | SC | SS |
+------+-------+-----+-----+------+-----+------+-----+-----+
| | MCSID | | | | | | | |
+------+-------+-----+-----+------+-----+------+-----+-----+
"""
type = 'MAT2'
_field_map = {
1: 'mid', 2:'G11', 3:'G12', 4:'G13', 5: 'G22', 6:'G23', 7:'G33',
8:'rho', 9:'a1', 10:'a2', 11:'a3', 12:'tref', 13:'ge',
14: 'St', 15:'Sc', 16:'Ss', 17:'mcsid',
}
mp_name_map = {
'G11' : 'G11',
'G12' : 'G12',
'G13' : 'G13',
'G22' : 'G22',
'G23' : 'G23',
'G33' : 'G33',
'RHO' : 'rho',
# TODO: is this correct...I doubt it...
'A1' : 'a1',
'A2' : 'a2',
'A3' : 'a3',
#'A4' : 'A[3]',
#'A5' : 'A[4]',
#'A6' : 'A[5]',
'TREF' : 'tref', #8 : 'tref',
#'GE' : 'ge', #9 : 'ge',
}
_properties = ['_field_map', 'mp_name_map']
def __init__(self, mid, G11, G12, G13, G22, G23, G33,
rho=0., a1=None, a2=None, a3=None, tref=0., ge=0.,
St=None, Sc=None, Ss=None, mcsid=None, comment=''):
AnisotropicMaterial.__init__(self)
self.matt2_ref = None
if comment:
self.comment = comment
self.mid = mid
self.G11 = G11
self.G12 = G12
self.G13 = G13
self.G22 = G22
self.G23 = G23
self.G33 = G33
self.rho = rho
self.a1 = a1
self.a2 = a2
self.a3 = a3
self.tref = tref
self.ge = ge
self.St = St
self.Sc = Sc
self.Ss = Ss
self.mcsid = mcsid
@classmethod
def export_to_hdf5(cls, h5_file, model, mids):
"""exports the materials in a vectorized way"""
comments = []
G = []
rho = []
a = []
tref = []
ge = []
St = []
Sc = []
Ss = []
mcsid = []
for mid in mids:
material = model.materials[mid]
#comments.append(element.comment)
Gi = [
material.G11, material.G22, material.G33,
material.G12, material.G13, material.G23,
]
G.append(Gi)
rho.append(material.rho)
ai = [ai if ai is not None else np.nan
for ai in [material.a1, material.a2, material.a3]]
a.append(ai)
tref.append(material.tref)
ge.append(material.ge)
St.append(material.St)
Sc.append(material.Sc)
Ss.append(material.Ss)
if material.mcsid is None:
mcsid.append(-1)
else:
mcsid.append(material.mcsid)
#h5_file.create_dataset('_comment', data=comments)
St = [value if value is not None else np.nan for value in St]
Sc = [value if value is not None else np.nan for value in Sc]
Ss = [value if value is not None else np.nan for value in Ss]
h5_file.create_dataset('mid', data=mids)
h5_file.create_dataset('G', data=G)
h5_file.create_dataset('A', data=a)
h5_file.create_dataset('rho', data=rho)
h5_file.create_dataset('tref', data=tref)
h5_file.create_dataset('ge', data=ge)
h5_file.create_dataset('St', data=St)
h5_file.create_dataset('Sc', data=Sc)
h5_file.create_dataset('Ss', data=Ss)
h5_file.create_dataset('mcsid', data=mcsid)
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a MAT2 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
mid = integer(card, 1, 'mid')
G11 = double_or_blank(card, 2, 'G11', 0.0)
G12 = double_or_blank(card, 3, 'G12', 0.0)
G13 = double_or_blank(card, 4, 'G13', 0.0)
G22 = double_or_blank(card, 5, 'G22', 0.0)
G23 = double_or_blank(card, 6, 'G23', 0.0)
G33 = double_or_blank(card, 7, 'G33', 0.0)
rho = double_or_blank(card, 8, 'rho', 0.0)
a1 = double_or_blank(card, 9, 'a1') # blank?
a2 = double_or_blank(card, 10, 'a2') # blank?
a3 = double_or_blank(card, 11, 'a3') # blank?
tref = double_or_blank(card, 12, 'tref', 0.0)
ge = double_or_blank(card, 13, 'ge', 0.0)
St = double_or_blank(card, 14, 'St') # or blank?
Sc = double_or_blank(card, 15, 'Sc') # or blank?
Ss = double_or_blank(card, 16, 'Ss') # or blank?
mcsid = integer_or_blank(card, 17, 'mcsid')
assert len(card) <= 18, 'len(MAT2 card) = %i\ncard=%s' % (len(card), card)
return MAT2(mid, G11, G12, G13, G22, G23, G33,
rho, a1, a2, a3, tref, ge, St, Sc, Ss, mcsid,
comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds a MAT2 card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
mid = data[0]
G11 = data[1]
G12 = data[2]
G13 = data[3]
G22 = data[4]
G23 = data[5]
G33 = data[6]
rho = data[7]
a1 = data[8]
a2 = data[9]
a3 = data[10]
tref = data[11]
ge = data[12]
St = data[13]
Sc = data[14]
Ss = data[15]
mcsid = data[16]
return MAT2(mid, G11, G12, G13, G22, G23, G33,
rho, a1, a2, a3, tref, ge, St, Sc, Ss, mcsid,
comment=comment)
def get_density(self):
return self.rho
def Rho(self):
return self.rho
def cross_reference(self, model):
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ', which is required by MAT2 mid=%s' % self.mid
if self.mid in model.MATT2:
self.matt2_ref = model.MATT2[self.mid] # not using a method...
def uncross_reference(self):
self.matt2_ref = None
def _verify(self, xref):
"""
Verifies all methods for this object work
Parameters
----------
xref : bool
has this model been cross referenced
"""
pass
def Dsolid(self):
"""
Eq 9.4.7 in Finite Element Method using Matlab
"""
D = zeros((6, 6))
E = self.E()
nu12 = self.nu12
nu = nu12
mu = 1. - nu12 * nu12 # *E11/E22 # not necessary b/c they're equal
Emu = E / mu
D[0, 0] = Emu # E/(1-nu^2)
D[1, 1] = Emu
D[2, 2] = Emu
D[0, 1] = nu * Emu # nu*E/(1-nu^2)
# nu*E/(1-nu^2)
D[1, 2] = D[2, 1] = D[0, 2] = D[2, 0] = D[1, 0] = D[0, 1]
# (1.-nu)/2.*E/(1-nu^2)
D[3, 3] = (1. - nu) * 0.5 * Emu
# (1.-nu)/2.*E/(1-nu^2)
D[5, 5] = D[4, 4] = D[3, 3]
def Dplate(self):
"""
Eq 9.1.6 in Finite Element Method using Matlab
"""
E = self.E()
nu12 = self.Nu()
nu = nu12
#G12 = self.G()
D = zeros((3, 3))
mu = 1. - nu12 * nu12 # *E11/E22 # not necessary b/c they're equal
Emu = E / mu
D[0, 0] = Emu
D[1, 1] = Emu
D[0, 1] = nu * Emu
D[1, 0] = D[0, 1]
D[2, 2] = 1. - nu / 2. * Emu
#D[4,4] = #: .. todo:: verify
#D[5,5] = G22
#D[6,6] = G33
return D
def raw_fields(self):
list_fields = ['MAT2', self.mid, self.G11, self.G12, self.G13, self.G22,
self.G23, self.G33, self.rho, self.a1, self.a2, self.a3,
self.tref, self.ge, self.St, self.Sc, self.Ss,
self.mcsid]
return list_fields
def repr_fields(self):
"""
Gets the fields in their simplified form
Returns
-------
fields : [varies, ...]
the fields that define the card
"""
G11 = set_blank_if_default(self.G11, 0.0)
G12 = set_blank_if_default(self.G12, 0.0)
G13 = set_blank_if_default(self.G13, 0.0)
G22 = set_blank_if_default(self.G22, 0.0)
G23 = set_blank_if_default(self.G23, 0.0)
G33 = set_blank_if_default(self.G33, 0.0)
rho = set_blank_if_default(self.rho, 0.0)
tref = set_blank_if_default(self.tref, 0.0)
ge = set_blank_if_default(self.ge, 0.0)
list_fields = ['MAT2', self.mid, G11, G12, G13, G22, G23, G33, rho,
self.a1, self.a2, self.a3, tref, ge,
self.St, self.Sc, self.Ss, self.mcsid]
return list_fields
def write_card(self, size=8, is_double=False):
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class MAT3(OrthotropicMaterial):
"""
Defines the material properties for linear orthotropic materials used by
the CTRIAX6 element entry.
+------+-----+----+-----+----+-------+-------+------+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+======+=====+====+=====+====+=======+=======+======+=====+
| MAT3 | MID | EX | ETH | EZ | NUXTH | NUTHZ | NUZX | RHO |
+------+-----+----+-----+----+-------+-------+------+-----+
| | | | GZX | AX | ATH | AZ | TREF | GE |
+------+-----+----+-----+----+-------+-------+------+-----+
"""
type = 'MAT3'
_field_map = {
1: 'mid', 2:'ex', 3:'eth', 4:'ez', 5: 'nuxth', 6:'nuthz', 7:'nuzx',
8:'rho', 11:'gzx', 12:'ax', 13:'ath', 14:'az', 15:'tref',
16: 'ge',
}
def __init__(self, mid, ex, eth, ez, nuxth, nuthz, nuzx, rho=0.0, gzx=None,
ax=0., ath=0., az=0., tref=0., ge=0., comment=''):
OrthotropicMaterial.__init__(self)
| |
<gh_stars>0
import os
import numpy as np
from d3m import container
from collections import OrderedDict
from d3m import container, utils
from common_primitives import utils as comUtils
from d3m.metadata import base as metadata_base
from d3m import metrics
from common_primitives.dataset_to_dataframe import DatasetToDataFramePrimitive
from common_primitives import ndarray_to_dataframe
from common_primitives.extract_columns_semantic_types import ExtractColumnsBySemanticTypesPrimitive
from common_primitives.column_parser import ColumnParserPrimitive
from common_primitives.unseen_label_encoder import UnseenLabelEncoderPrimitive
from common_primitives.unseen_label_decoder import UnseenLabelDecoderPrimitive
from common_primitives import construct_predictions
from d3m.primitives.evaluation import compute_scores
from common_primitives import extract_columns_semantic_types, column_parser, utils
#from common_primitives import dataset_remove_columns
#from sklearn.ensemble import RandomForestClassifier
import pandas as pd
from sklearn.metrics import f1_score, accuracy_score
import time
#from rpi_d3m_primitives.JMIplus import JMIplus
from rpi_d3m_primitives.JMIplus_auto import JMIplus_auto
from rpi_d3m_primitives.STMBplus_auto import STMBplus_auto
from rpi_d3m_primitives.S2TMBplus import S2TMBplus
import d3m.primitives.data_cleaning.imputer as Imputer
import d3m.primitives.classification.random_forest as RF
import d3m.primitives.classification.bagging as Bagging
import d3m.primitives.classification.gradient_boosting as GB
import d3m.primitives.classification.extra_trees as ET
from rpi_d3m_primitives.TreeAugmentedNB_BayesianInf import TreeAugmentedNB_BayesianInf as TAN_BAY
import d3m.primitives.data_preprocessing.robust_scaler as Robustscaler
import d3m.primitives.data_preprocessing.min_max_scaler as MMscaler #SKlearn
from common_primitives.extract_columns import ExtractColumnsPrimitive
from common_primitives.simple_profiler import SimpleProfilerPrimitive
from common_primitives.remove_semantic_types import RemoveSemanticTypesPrimitive
# dataset_name = '38_sick' # target_index = 30 metric= f1 posLabel= sick
# dataset_name = '57_hypothyroid' #target = 30 metric = f1 macro SCORE/dataset_TEST
# dataset_name = '27_wordLevels' #target = 13 metric = f1 macro SCORE/dataset_TEST
# dataset_name = '313_spectrometer' # target = 2 metric = f1 macro SCORE/dataset_TEST remove col 1 JMI-counting
# dataset_name = 'LL0_1100_popularkids' #target = 7 metric = f1 macro SCORE/dataset_TEST JMI-counting
# dataset_name = '1491_one_hundred_plants_margin' # target = 65, metric = f1 macro SCORE/dataset_TEST
#dataset_name = 'LL0_186_braziltourism' #target = 9 metric = f1 macro SCORE/dataset_SCORE
# dataset_name = 'LL0_acled_reduced' # target = 6 metric = accuracy SCORE/dataset_TEST JMI-pseudoBayesian -> remove col 7 8 10 13
# dataset_name = '299_libras_move' #target = 91 metric = accuracy SCORE/dataset_TEST
# dataset_name = 'LL1_336_MS_Geolife_transport_mode_prediction' #target = 7 metric = accuracy SCORE/dataset_SCORE remove col 1,4
# dataset_name = '1567_poker_hand' #target = 11 metric = f1_macro SCORE/dataset_TEST
# dataset_name = '185_baseball' #target = 18 metric = f1 macro SCORE/dataset_TEST
# dataset_name = '38_sick_MIN_METADATA' # target_index = 30 nbins, n_estimator = 9, 10, 27, 28 pseudo bagging 0.003125
# dataset_name = '57_hypothyroid_MIN_METADATA' #target = 30 metric = f1 macro SCORE/dataset_TEST
# dataset_name = '27_wordLevels_MIN_METADATA' #target = 13 metric = f1 macro SCORE/dataset_TEST
# dataset_name = '313_spectrometer_MIN_METADATA' # target = 2 metric = f1 macro SCORE/dataset_TEST remove col 1 JMI-counting
# dataset_name = 'LL0_1100_popularkids_MIN_METADATA' #target = 7 metric = f1 macro SCORE/dataset_TEST JMI-counting
# dataset_name = '1491_one_hundred_plants_margin_MIN_METADATA' # target = 65, metric = f1 macro SCORE/dataset_TEST
# dataset_name = 'LL0_186_braziltourism_MIN_METADATA' #target = 9 metric = f1 macro SCORE/dataset_SCORE
# dataset_name = 'LL0_acled_reduced_MIN_METADATA' # target = 6 metric = accuracy SCORE/dataset_TEST JMI-pseudoBayesian -> remove col 7 8 10 13
# dataset_name = '299_libras_move_MIN_METADATA' #target = 91 metric = accuracy SCORE/dataset_SCORE
# dataset_name = 'LL1_336_MS_Geolife_transport_mode_prediction_MIN_METADATA' #target = 7 metric = accuracy SCORE/dataset_SCORE remove col 1,4
# dataset_name = '1567_poker_hand_MIN_METADATA' #target = 11 metric = f1_macro SCORE/dataset_TEST
dataset_name = '185_baseball_MIN_METADATA' #target = 18, metric = f1 macro SCORE/dataset_SCORE
target_index = 18
score_file_name = "dataset_SCORE"
if dataset_name in ['38_sick','DA_fifa2018_manofmatch', 'uu4_SPECT', 'uu5_heartstatlog', 'uu6_hepatitis', 'uu7_pima_diabetes']:
metric = 'F1'
elif dataset_name in ['299_libras_move','LL0_acled_reduced','LL1_336_MS_Geolife_transport_mode_prediction', 'LL1_multilearn_emotions', 'DA_global_terrorism']:
metric = 'ACCURACY'
else:
metric = 'F1_MACRO'
if dataset_name == "38_sick":
poslabel = 'sick'
elif dataset_name in ["DA_fifa2018_manofmatch", 'uu4_SPECT', 'uu5_heartstatlog', 'uu6_hepatitis', 'uu7_pima_diabetes']:
poslabel = '1'
else:
poslabel = None
filename = dataset_name + '-tan.txt'
fs_nbins_lower_bound = 4
fs_nbins_upper_bound = 5
nbins_lower_bound, nbins_upper_bound, N0_lower_bound, N0_upper_bound = 7, 8, 2, 3
feal_list = ['non']
method = {'STMB':['pseudoBayesian'], 'S2TMB':[], 'JMI': ['counting'], 'non':[]}
print('\ndataset to dataframe')
# step 1: dataset to dataframe
# path = os.path.join('/home/naiyu/Desktop/D3M_seed_datasets', dataset_name,'TRAIN/dataset_TRAIN/datasetDoc.json')
path = os.path.join('/Users/naiyuyin/Desktop/datasets/seed_datasets_current', dataset_name,'TRAIN/dataset_TRAIN/datasetDoc.json')
dataset = container.Dataset.load('file://{uri}'.format(uri=path))
#==============================training dataset================================
dataset.metadata = dataset.metadata.add_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, target_index), 'https://metadata.datadrivendiscovery.org/types/Target')
dataset.metadata = dataset.metadata.add_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, target_index), 'https://metadata.datadrivendiscovery.org/types/TrueTarget')
# dataset.metadata = dataset.metadata.remove_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, target_index), 'https://metadata.datadrivendiscovery.org/types/Attribute')
# dataset.metadata = dataset.metadata.remove_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, 15), 'https://metadata.datadrivendiscovery.org/types/Attribute')
# dataset.metadata = dataset.metadata.remove_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, 1), 'https://metadata.datadrivendiscovery.org/types/Attribute')
# dataset.metadata = dataset.metadata.remove_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, 10), 'https://metadata.datadrivendiscovery.org/types/Attribute')
# dataset.metadata = dataset.metadata.remove_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, 13), 'https://metadata.datadrivendiscovery.org/types/Attribute')
print('\nDataset to Dataframe')
hyperparams_class = DatasetToDataFramePrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
primitive = DatasetToDataFramePrimitive(hyperparams=hyperparams_class.defaults())
call_metadata = primitive.produce(inputs=dataset)
dataframe = call_metadata.value
print('\n metadata generation')
hyperparams_class = SimpleProfilerPrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
profile_primitive = SimpleProfilerPrimitive(hyperparams=hyperparams_class.defaults().replace({'detect_semantic_types': ['https://metadata.datadrivendiscovery.org/types/CategoricalData',
'http://schema.org/Integer', 'http://schema.org/Float', 'http://schema.org/Text', 'https://metadata.datadrivendiscovery.org/types/Attribute','https://metadata.datadrivendiscovery.org/types/PrimaryKey']}))
profile_primitive.set_training_data(inputs = dataframe)
profile_primitive.fit()
call_metadata = profile_primitive.produce(inputs=dataframe)
dataframe = call_metadata.value
print('\n remove semantic type')
# dataframe.metadata = dataframe.metadata.remove_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, target_index), 'https://metadata.datadrivendiscovery.org/types/Attribute')
hyperparams_class = RemoveSemanticTypesPrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
primitive = RemoveSemanticTypesPrimitive(hyperparams = hyperparams_class.defaults().replace({'columns': [target_index, 1], 'semantic_types': ['https://metadata.datadrivendiscovery.org/types/Attribute']}))
call_metadata = primitive.produce(inputs=dataframe)
dataframe = call_metadata.value
print('\nColumn Parser')
hyperparams_class = column_parser.ColumnParserPrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
primitive = column_parser.ColumnParserPrimitive(hyperparams=hyperparams_class.defaults())
dataframe = primitive.produce(inputs=dataframe).value
print('\nExtract Attributes')
# hyperparams_class = ExtractColumnsPrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
# primitive = ExtractColumnsPrimitive(hyperparams=hyperparams_class.defaults().replace({'columns': att_columns}))
# call_metadata = primitive.produce(inputs=dataframe)
# trainD = call_metadata.value
hyperparams_class = extract_columns_semantic_types.ExtractColumnsBySemanticTypesPrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
primitive = extract_columns_semantic_types.ExtractColumnsBySemanticTypesPrimitive(hyperparams=hyperparams_class.defaults().replace({'semantic_types': ['https://metadata.datadrivendiscovery.org/types/Attribute']}))
call_metadata = primitive.produce(inputs=dataframe)
trainD = call_metadata.value
# print('\nImpute trainD')
# hyperparams_class = Imputer.SKlearn.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
# Imputer_primitive = Imputer.SKlearn(hyperparams=hyperparams_class.defaults().replace({'strategy':'most_frequent'}))
# Imputer_primitive.set_training_data(inputs=trainD)
# Imputer_primitive.fit()
# trainD = Imputer_primitive.produce(inputs=trainD).value
# print('\nRobust Scaler')
# hyperparams_class = Robustscaler.SKlearn.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
# scale_primitive = Robustscaler.SKlearn(hyperparams=hyperparams_class.defaults())
# scale_primitive.set_training_data(inputs=trainD)
# scale_primitive.fit()
# trainD = scale_primitive.produce(inputs=trainD).value
print('\nExtract Targets')
hyperparams_class = extract_columns_semantic_types.ExtractColumnsBySemanticTypesPrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
primitive = extract_columns_semantic_types.ExtractColumnsBySemanticTypesPrimitive(hyperparams=hyperparams_class.defaults().replace({'semantic_types':['https://metadata.datadrivendiscovery.org/types/TrueTarget']}))
call_metadata = primitive.produce(inputs=dataframe)
trainL = call_metadata.value
#==============================testing dataset=================================
print ('\nLoad testing dataset')
# path = os.path.join('/home/naiyu/Desktop/D3M_seed_datasets/', dataset_name,'TEST/dataset_TEST/datasetDoc.json')
path = os.path.join('/Users/naiyuyin/Desktop/datasets/seed_datasets_current', dataset_name,'TEST/dataset_TEST/datasetDoc.json')
dataset = container.Dataset.load('file://{uri}'.format(uri=path))
dataset.metadata = dataset.metadata.add_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, target_index), 'https://metadata.datadrivendiscovery.org/types/Target')
dataset.metadata = dataset.metadata.add_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, target_index), 'https://metadata.datadrivendiscovery.org/types/TrueTarget')
# dataset.metadata = dataset.metadata.remove_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, target_index), 'https://metadata.datadrivendiscovery.org/types/Attribute')
# dataset.metadata = dataset.metadata.remove_semantic_tpye(('learningData', metadata_base.ALL_ELEMENTS, 15), 'https://metadata.datadrivendiscovery.org/types/Attribute')
# dataset.metadata = dataset.metadata.remove_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, 1), 'https://metadata.datadrivendiscovery.org/types/Attribute')
# dataset.metadata = dataset.metadata.remove_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, 10), 'https://metadata.datadrivendiscovery.org/types/Attribute')
# dataset.metadata = dataset.metadata.remove_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, 13), 'https://metadata.datadrivendiscovery.org/types/Attribute')
print('\nDataset to Dataframe')
hyperparams_class = DatasetToDataFramePrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
primitive = DatasetToDataFramePrimitive(hyperparams=hyperparams_class.defaults())
call_metadata = primitive.produce(inputs=dataset)
dataframe = call_metadata.value
print('\n metadata generation')
call_metadata = profile_primitive.produce(inputs=dataframe)
dataframe = call_metadata.value
print('\n remove semantic type')
# dataframe.metadata = dataframe.metadata.remove_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, target_index), 'https://metadata.datadrivendiscovery.org/types/Attribute')
hyperparams_class = RemoveSemanticTypesPrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
primitive = RemoveSemanticTypesPrimitive(hyperparams = hyperparams_class.defaults().replace({'columns': [target_index, 1], 'semantic_types': ['https://metadata.datadrivendiscovery.org/types/Attribute']}))
call_metadata = primitive.produce(inputs=dataframe)
dataframe = call_metadata.value
print('\nColumn Parser')
hyperparams_class = column_parser.ColumnParserPrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
primitive = column_parser.ColumnParserPrimitive(hyperparams=hyperparams_class.defaults())
dataframe = primitive.produce(inputs=dataframe).value
print('\nExtract Attributes')
# hyperparams_class = ExtractColumnsPrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
# primitive = ExtractColumnsPrimitive(hyperparams=hyperparams_class.defaults().replace({'columns': att_columns}))
# call_metadata = primitive.produce(inputs=dataframe)
# testD = call_metadata.value
hyperparams_class = extract_columns_semantic_types.ExtractColumnsBySemanticTypesPrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
primitive = extract_columns_semantic_types.ExtractColumnsBySemanticTypesPrimitive(hyperparams=hyperparams_class.defaults().replace({'semantic_types': ['https://metadata.datadrivendiscovery.org/types/Attribute']}))
call_metadata = primitive.produce(inputs=dataframe)
testD = call_metadata.value
# print('\nImpute testD')
# testD = Imputer_primitive.produce(inputs=testD).value
# print('\nScale')
# testD = scale_primitive.produce(inputs=testD).value
print('\nExtract Suggested Target')
hyperparams_class = extract_columns_semantic_types.ExtractColumnsBySemanticTypesPrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
primitive = extract_columns_semantic_types.ExtractColumnsBySemanticTypesPrimitive(hyperparams=hyperparams_class.defaults().replace({'semantic_types': ['https://metadata.datadrivendiscovery.org/types/TrueTarget']}))
call_metadata = primitive.produce(inputs=dataframe)
testL = call_metadata.value
print('\nGet Target Name')
column_metadata = testL.metadata.query((metadata_base.ALL_ELEMENTS, 0))
TargetName = column_metadata.get('name',[])
for f in feal_list:
if len(method[f]) == 0:
best_score = 0
best_param = ""
str_line = ""
# with open(os.path.join('/home/naiyu/Desktop/D3M_seed_datasets/',dataset_name, file_name),"w+") as f_output:
with open(os.path.join('/Users/naiyuyin/Desktop/datasets/seed_datasets_current',dataset_name, filename),"w+") as f_output:
f_output.write('Method: ' + f + '\n')
f_output.write("feat_sel_nbins \t feat_sel_idx \t feat_sel_num \t nbins \t classifier \t F1_score\n")
trainD_org, trainL_org, testD_org, testL_org = trainD, trainL, testD, testL
for fs_nbins in range(fs_nbins_lower_bound,fs_nbins_upper_bound,1):
str_fnbins =str_line + str(fs_nbins)+'\t'
trainD_c, trainL_c, testD_c, testL_c = trainD_org, trainL_org, testD_org, testL_org
if f == 'non':
print('Oops! No Feature Selection.')
str_feal = str_fnbins + 'ALL\t'
str_num = str_feal + str(1) + '\t'
elif f == 'S2TMB':
print('S2TMB Feature Selection Initiated')
hyperparams_class = S2TMBplus.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
FSmodel = S2TMBplus(hyperparams=hyperparams_class.defaults().replace({'nbins':fs_nbins, 'strategy':'uniform'}))
FSmodel.set_training_data(inputs=trainD_c, outputs=trainL_c)
print('\nSelected Feature Index')
print(FSmodel._index)
print('\n')
if FSmodel._index is not None and len(FSmodel._index) is not 0:
trainD_c = FSmodel.produce(inputs=trainD_c)
trainD_c = trainD_c.value
print('\nSubset of testing data')
testD_c = FSmodel.produce(inputs=testD_c)
testD_c = testD_c.value
str_feal = str_fnbins + str(FSmodel._index) + '\t'
str_num = str_feal + str(len(FSmodel._index)) + '/' + str(np.shape(trainD_c)[1]) + '\t'
else:
str_feal = str_fnbins + 'ALL\t'
str_num = str_feal + str(1) + '\t'
for nbins in range(nbins_lower_bound,nbins_upper_bound,1):
str_nbins = str_num + str(nbins) + '\t'
str_class = str_nbins + 'TAN' + '\t'
print('The nbins is %d\n'%nbins)
for N0 in range(N0_lower_bound,N0_upper_bound,1):
str_n0 = str_class + str(N0) + '\t'
hyperparams_class = TAN_BAY.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
classifier = TAN_BAY(hyperparams=hyperparams_class.defaults().replace({'nbins':nbins,'N0':N0, 'strategy': 'uniform'}))
classifier.set_training_data(inputs=trainD_c, outputs=trainL_c)
classifier.fit()
predictedTargets = classifier.produce(inputs=testD_c)
predictedTargets = predictedTargets.value
print('\nConstruct Predictions')
hyperparams_class = construct_predictions.ConstructPredictionsPrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
construct_primitive = construct_predictions.ConstructPredictionsPrimitive(hyperparams=hyperparams_class.defaults())
call_metadata = construct_primitive.produce(inputs=predictedTargets, reference=dataframe)
dataframe = call_metadata.value
print('\ncompute scores')
# path = os.path.join('/home/naiyu/Desktop/D3M_seed_datasets/', dataset_name, 'SCORE', score_file, 'datasetDoc.json')
path = os.path.join('/Users/naiyuyin/Desktop/datasets/seed_datasets_current', dataset_name, 'SCORE', score_file_name,'datasetDoc.json')
dataset = container.Dataset.load('file://{uri}'.format(uri=path))
dataset.metadata = dataset.metadata.add_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, target_index), 'https://metadata.datadrivendiscovery.org/types/Target')
dataset.metadata = dataset.metadata.add_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, target_index), 'https://metadata.datadrivendiscovery.org/types/TrueTarget')
hyperparams_class = compute_scores.Core.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
metrics_class = hyperparams_class.configuration['metrics'].elements
primitive = compute_scores.Core(hyperparams=hyperparams_class.defaults().replace({
'metrics': [metrics_class({
'metric': metric,
'pos_label': poslabel,
'k': None,
})],
'add_normalized_scores': False,
}))
scores = primitive.produce(inputs=dataframe, score_dataset=dataset).value
str_line_final = str_n0 + str(scores.iat[0,1])+'\t\n'
f_output.write(str_line_final)
if scores.iat[0,1] > best_score:
best_score = scores.iat[0,1]
best_param = str_line_final
f_output.write("the best\n")
f_output.write(best_param)
f_output.close()
elif len(method[f]) != 0:
for m in method[f]:
# with open(os.path.join('/home/naiyu/Desktop/D3M_seed_datasets/',dataset_name, file_name),"w+") as f_output:
with open(os.path.join('/Users/naiyuyin/Desktop/datasets/seed_datasets_current',dataset_name, filename),"w+") as f_output:
best_score = 0
best_param = ""
str_line = ""
f_output.write("Method: " + f + '-' + m + '\t\n')
f_output.write("feat_sel_nbins \t feat_sel_idx \t feat_sel_num \t nbins \t classifier \t F1_score\n")
trainD_org, trainL_org, testD_org, testL_org = trainD, trainL, testD, testL
for fs_nbins in range(fs_nbins_lower_bound,fs_nbins_upper_bound,1): #fs_nbins is the nbins for feature selection
str_fnbins =str_line + str(fs_nbins)+'\t'
trainD_c, trainL_c, testD_c, testL_c = trainD_org, trainL_org, testD_org, testL_org
if f == 'STMB':
print('The STMB Feature Selection Method Initiated.')
hyperparams_class = STMBplus_auto.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
FSmodel = STMBplus_auto(hyperparams=hyperparams_class.defaults().replace({'nbins':fs_nbins, 'method': m, 'strategy':'uniform'}))
elif f == 'JMI':
print('The JMI Feature Selection Method Initiated.')
hyperparams_class = JMIplus_auto.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
FSmodel = JMIplus_auto(hyperparams=hyperparams_class.defaults().replace({'nbins':fs_nbins, 'method': m}))
FSmodel.set_training_data(inputs=trainD_c, outputs=trainL_c)
FSmodel.fit()
print('\nSelected Feature Index')
print(FSmodel._index)
print(len(FSmodel._index))
print('\n')
# idx = []?
if FSmodel._index is not None and len(FSmodel._index) is not 0:
trainD_c = FSmodel.produce(inputs=trainD_c)
trainD_c = trainD_c.value
print('\nSubset of testing data')
testD_c = FSmodel.produce(inputs=testD_c)
testD_c = testD_c.value
str_feal = str_fnbins + str(FSmodel._index) + '\t'
str_num = str_feal + str(len(FSmodel._index)) + '/' + str(np.shape(trainD)[1])+ '\t'
else:
str_feal = str_fnbins + 'ALL\t'
str_num = str_feal + str(1) + '\t'
# print('\nImpute trainD')
# hyperparams_class = Imputer.SKlearn.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
# Imputer_primitive = Imputer.SKlearn(hyperparams=hyperparams_class.defaults().replace({'strategy':'most_frequent'}))
# Imputer_primitive.set_training_data(inputs=trainD_c)
# Imputer_primitive.fit()
# trainD_c = Imputer_primitive.produce(inputs=trainD_c).value
# print('\nImpute testD')
# testD_c = Imputer_primitive.produce(inputs=testD_c).value
for nbins in range(nbins_lower_bound,nbins_upper_bound,1): #n_bins is for the TAN classifier
print(nbins)
str_nbins =str_num + str(nbins) + '\t'
str_class =str_nbins + 'TAN' + '\t'
for N0 in range(N0_lower_bound,N0_upper_bound,1):
print(N0)
str_n0 = str_class + str(N0) + '\t'
hyperparams_class = TAN_BAY.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
classifier = TAN_BAY(hyperparams=hyperparams_class.defaults().replace({'nbins':nbins,'N0':N0, 'strategy': 'quantile'}))
classifier.set_training_data(inputs=trainD_c, outputs=trainL_c)
classifier.fit()
predictedTargets = classifier.produce(inputs=testD_c)
predictedTargets = predictedTargets.value
print('\nConstruct Predictions')
hyperparams_class = construct_predictions.ConstructPredictionsPrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
construct_primitive = construct_predictions.ConstructPredictionsPrimitive(hyperparams=hyperparams_class.defaults())
call_metadata = construct_primitive.produce(inputs=predictedTargets, reference=dataframe)
dataframe = call_metadata.value
print('\ncompute scores')
# path = os.path.join('/home/naiyu/Desktop/D3M_seed_datasets/', dataset_name, 'SCORE',score_file,'datasetDoc.json')
path = os.path.join('/Users/naiyuyin/Desktop/datasets/seed_datasets_current', dataset_name, | |
if any value is such a
value.
:return: The computed sample standard deviation.
"""
return sd(data=self, ignore_nodata=ignore_nodata)
def sgn(self) -> 'ProcessBuilder':
"""
Signum
:param self: A number.
:return: The computed signum value of `x`.
"""
return sgn(x=self)
def sin(self) -> 'ProcessBuilder':
"""
Sine
:param self: An angle in radians.
:return: The computed sine of `x`.
"""
return sin(x=self)
def sinh(self) -> 'ProcessBuilder':
"""
Hyperbolic sine
:param self: An angle in radians.
:return: The computed hyperbolic sine of `x`.
"""
return sinh(x=self)
def sort(self, asc=UNSET, nodata=UNSET) -> 'ProcessBuilder':
"""
Sort data
:param self: An array with data to sort.
:param asc: The default sort order is ascending, with smallest values first. To sort in reverse
(descending) order, set this parameter to `false`.
:param nodata: Controls the handling of no-data values (`null`). By default, they are removed. If set
to `true`, missing values in the data are put last; if set to `false`, they are put first.
:return: The sorted array.
"""
return sort(data=self, asc=asc, nodata=nodata)
def sqrt(self) -> 'ProcessBuilder':
"""
Square root
:param self: A number.
:return: The computed square root.
"""
return sqrt(x=self)
def subtract(self, y) -> 'ProcessBuilder':
"""
Subtraction of two numbers
:param self: The minuend.
:param y: The subtrahend.
:return: The computed result.
"""
return subtract(x=self, y=y)
def sum(self, ignore_nodata=UNSET) -> 'ProcessBuilder':
"""
Compute the sum by adding up numbers
:param self: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default.
Setting this flag to `false` considers no-data values so that `null` is returned if any value is such a
value.
:return: The computed sum of the sequence of numbers.
"""
return sum(data=self, ignore_nodata=ignore_nodata)
def tan(self) -> 'ProcessBuilder':
"""
Tangent
:param self: An angle in radians.
:return: The computed tangent of `x`.
"""
return tan(x=self)
def tanh(self) -> 'ProcessBuilder':
"""
Hyperbolic tangent
:param self: An angle in radians.
:return: The computed hyperbolic tangent of `x`.
"""
return tanh(x=self)
def text_begins(self, pattern, case_sensitive=UNSET) -> 'ProcessBuilder':
"""
Text begins with another text
:param self: Text in which to find something at the beginning.
:param pattern: Text to find at the beginning of `data`. Regular expressions are not supported.
:param case_sensitive: Case sensitive comparison can be disabled by setting this parameter to `false`.
:return: `true` if `data` begins with `pattern`, false` otherwise.
"""
return text_begins(data=self, pattern=pattern, case_sensitive=case_sensitive)
def text_contains(self, pattern, case_sensitive=UNSET) -> 'ProcessBuilder':
"""
Text contains another text
:param self: Text in which to find something in.
:param pattern: Text to find in `data`. Regular expressions are not supported.
:param case_sensitive: Case sensitive comparison can be disabled by setting this parameter to `false`.
:return: `true` if `data` contains the `pattern`, false` otherwise.
"""
return text_contains(data=self, pattern=pattern, case_sensitive=case_sensitive)
def text_ends(self, pattern, case_sensitive=UNSET) -> 'ProcessBuilder':
"""
Text ends with another text
:param self: Text in which to find something at the end.
:param pattern: Text to find at the end of `data`. Regular expressions are not supported.
:param case_sensitive: Case sensitive comparison can be disabled by setting this parameter to `false`.
:return: `true` if `data` ends with `pattern`, false` otherwise.
"""
return text_ends(data=self, pattern=pattern, case_sensitive=case_sensitive)
def text_merge(self, separator=UNSET) -> 'ProcessBuilder':
"""
Concatenate elements to a single text
:param self: A set of elements. Numbers, boolean values and null values get converted to their (lower
case) string representation. For example: `1` (integer), `-1.5` (number), `true` / `false` (boolean
values)
:param separator: A separator to put between each of the individual texts. Defaults to an empty string.
:return: A string containing a string representation of all the array elements in the same order, with
the separator between each element.
"""
return text_merge(data=self, separator=separator)
def trim_cube(self) -> 'ProcessBuilder':
"""
Remove dimension labels with no-data values
:param self: A raster data cube to trim.
:return: A trimmed raster data cube with the same dimensions. The dimension properties name, type,
reference system and resolution remain unchanged. The number of dimension labels may decrease.
"""
return trim_cube(data=self)
def variance(self, ignore_nodata=UNSET) -> 'ProcessBuilder':
"""
Variance
:param self: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default.
Setting this flag to `false` considers no-data values so that `null` is returned if any value is such a
value.
:return: The computed sample variance.
"""
return variance(data=self, ignore_nodata=ignore_nodata)
def xor(self, y) -> 'ProcessBuilder':
"""
Logical XOR (exclusive or)
:param self: A boolean value.
:param y: A boolean value.
:return: Boolean result of the logical XOR.
"""
return xor(x=self, y=y)
# Public shortcut
process = ProcessBuilder.process
# Private shortcut that has lower chance to collide with a process argument named `process`
_process = ProcessBuilder.process
def absolute(x) -> ProcessBuilder:
"""
Absolute value
:param x: A number.
:return: The computed absolute value.
"""
return _process('absolute', x=x)
def add(x, y) -> ProcessBuilder:
"""
Addition of two numbers
:param x: The first summand.
:param y: The second summand.
:return: The computed sum of the two numbers.
"""
return _process('add', x=x, y=y)
def add_dimension(data, name, label, type=UNSET) -> ProcessBuilder:
"""
Add a new dimension
:param data: A data cube to add the dimension to.
:param name: Name for the dimension.
:param label: A dimension label.
:param type: The type of dimension, defaults to `other`.
:return: The data cube with a newly added dimension. The new dimension has exactly one dimension label. All
other dimensions remain unchanged.
"""
return _process('add_dimension', data=data, name=name, label=label, type=type)
def aggregate_spatial(data, geometries, reducer, target_dimension=UNSET, context=UNSET) -> ProcessBuilder:
"""
Zonal statistics for geometries
:param data: A raster data cube. The data cube must have been reduced to only contain two spatial
dimensions and a third dimension the values are aggregated for, for example the temporal dimension to get a
time series. Otherwise, this process fails with the `TooManyDimensions` exception. The data cube
implicitly gets restricted to the bounds of the geometries as if ``filter_spatial()`` would have been used
with the same values for the corresponding parameters immediately before this process.
:param geometries: Geometries as GeoJSON on which the aggregation will be based. One value will be
computed per GeoJSON `Feature`, `Geometry` or `GeometryCollection`. For a `FeatureCollection` multiple
values will be computed, one value per contained `Feature`. For example, a single value will be computed
for a `MultiPolygon`, but two values will be computed for a `FeatureCollection` containing two polygons. -
For **polygons**, the process considers all pixels for which the point at the pixel center intersects with
the corresponding polygon (as defined in the Simple Features standard by the OGC). - For **points**, the
process considers the closest pixel center. - For **lines** (line strings), the process considers all the
pixels whose centers are closest to at least one point on the line. Thus, pixels may be part of multiple
geometries and be part of multiple aggregations. To maximize interoperability, a nested
`GeometryCollection` should be avoided. Furthermore, a `GeometryCollection` composed of a single type of
geometries should be avoided in favour of the corresponding multi-part type (e.g. `MultiPolygon`).
:param reducer: A reducer to be applied on all values of each geometry. A reducer is a single process such
as ``mean()`` or a set of processes, which computes a single value for a list of values, see the category
'reducer' for such processes.
:param target_dimension: The new dimension name to be used for storing the results. Defaults to `result`.
:param context: Additional data to be passed to the reducer.
:return: A vector data cube with the computed results and restricted to the bounds of the geometries. The
computed value is used for the dimension with the name that was specified in the parameter
`target_dimension`. The computation also stores information about the total count of pixels (valid +
invalid pixels) and the number of valid pixels (see ``is_valid()``) for each geometry. These values are
added as a new dimension with a dimension name derived from `target_dimension` by adding | |
<reponame>meghasfdc/st2
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import jsonschema
import mock
import six
from st2actions.container.base import RunnerContainer
from st2common.constants import action as action_constants
from st2common.exceptions import action as action_exc
from st2common.exceptions import actionrunner as runner_exc
from st2common.models.db.liveaction import LiveActionDB
from st2common.models.api.action import RunnerTypeAPI, ActionAPI
from st2common.models.system.common import ResourceReference
from st2common.persistence.action import Action
from st2common.persistence.liveaction import LiveAction
from st2common.persistence.runner import RunnerType
from st2common.runners import utils as runners_utils
from st2common.services import action as action_service
from st2common.services import executions
from st2common.transport.publishers import PoolPublisher
from st2common.util import isotime
from st2common.util import action_db
from st2tests import DbTestCase
from six.moves import range
RUNNER = {
'name': 'local-shell-script',
'description': 'A runner to execute local command.',
'enabled': True,
'runner_parameters': {
'hosts': {'type': 'string'},
'cmd': {'type': 'string'},
'sudo': {'type': 'boolean', 'default': False}
},
'runner_module': 'remoterunner'
}
RUNNER_ACTION_CHAIN = {
'name': 'action-chain',
'description': 'AC runner.',
'enabled': True,
'runner_parameters': {
},
'runner_module': 'remoterunner'
}
ACTION = {
'name': 'my.action',
'description': 'my test',
'enabled': True,
'entry_point': '/tmp/test/action.sh',
'pack': 'default',
'runner_type': 'local-shell-script',
'parameters': {
'arg_default_value': {
'type': 'string',
'default': 'abc'
},
'arg_default_type': {
}
},
'notify': {
'on-complete': {
'message': 'My awesome action is complete. Party time!!!',
'routes': ['notify.slack']
}
}
}
ACTION_WORKFLOW = {
'name': 'my.wf_action',
'description': 'my test',
'enabled': True,
'entry_point': '/tmp/test/action.sh',
'pack': 'default',
'runner_type': 'action-chain'
}
ACTION_OVR_PARAM = {
'name': 'my.sudo.default.action',
'description': 'my test',
'enabled': True,
'entry_point': '/tmp/test/action.sh',
'pack': 'default',
'runner_type': 'local-shell-script',
'parameters': {
'sudo': {
'default': True
}
}
}
ACTION_OVR_PARAM_MUTABLE = {
'name': 'my.sudo.mutable.action',
'description': 'my test',
'enabled': True,
'entry_point': '/tmp/test/action.sh',
'pack': 'default',
'runner_type': 'local-shell-script',
'parameters': {
'sudo': {
'immutable': False
}
}
}
ACTION_OVR_PARAM_IMMUTABLE = {
'name': 'my.sudo.immutable.action',
'description': 'my test',
'enabled': True,
'entry_point': '/tmp/test/action.sh',
'pack': 'default',
'runner_type': 'local-shell-script',
'parameters': {
'sudo': {
'immutable': True
}
}
}
ACTION_OVR_PARAM_BAD_ATTR = {
'name': 'my.sudo.invalid.action',
'description': 'my test',
'enabled': True,
'entry_point': '/tmp/test/action.sh',
'pack': 'default',
'runner_type': 'local-shell-script',
'parameters': {
'sudo': {
'type': 'number'
}
}
}
ACTION_OVR_PARAM_BAD_ATTR_NOOP = {
'name': 'my.sudo.invalid.noop.action',
'description': 'my test',
'enabled': True,
'entry_point': '/tmp/test/action.sh',
'pack': 'default',
'runner_type': 'local-shell-script',
'parameters': {
'sudo': {
'type': 'boolean'
}
}
}
PACK = 'default'
ACTION_REF = ResourceReference(name='my.action', pack=PACK).ref
ACTION_WORKFLOW_REF = ResourceReference(name='my.wf_action', pack=PACK).ref
ACTION_OVR_PARAM_REF = ResourceReference(name='my.sudo.default.action', pack=PACK).ref
ACTION_OVR_PARAM_MUTABLE_REF = ResourceReference(name='my.sudo.mutable.action', pack=PACK).ref
ACTION_OVR_PARAM_IMMUTABLE_REF = ResourceReference(name='my.sudo.immutable.action', pack=PACK).ref
ACTION_OVR_PARAM_BAD_ATTR_REF = ResourceReference(name='my.sudo.invalid.action', pack=PACK).ref
ACTION_OVR_PARAM_BAD_ATTR_NOOP_REF = ResourceReference(
name='my.sudo.invalid.noop.action', pack=PACK).ref
USERNAME = 'stanley'
@mock.patch.object(runners_utils, 'invoke_post_run', mock.MagicMock(return_value=None))
@mock.patch.object(PoolPublisher, 'publish', mock.MagicMock())
class TestActionExecutionService(DbTestCase):
@classmethod
def setUpClass(cls):
super(TestActionExecutionService, cls).setUpClass()
cls.runner = RunnerTypeAPI(**RUNNER)
cls.runnerdb = RunnerType.add_or_update(RunnerTypeAPI.to_model(cls.runner))
runner_api = RunnerTypeAPI(**RUNNER_ACTION_CHAIN)
RunnerType.add_or_update(RunnerTypeAPI.to_model(runner_api))
cls.actions = {
ACTION['name']: ActionAPI(**ACTION),
ACTION_WORKFLOW['name']: ActionAPI(**ACTION_WORKFLOW),
ACTION_OVR_PARAM['name']: ActionAPI(**ACTION_OVR_PARAM),
ACTION_OVR_PARAM_MUTABLE['name']: ActionAPI(**ACTION_OVR_PARAM_MUTABLE),
ACTION_OVR_PARAM_IMMUTABLE['name']: ActionAPI(**ACTION_OVR_PARAM_IMMUTABLE),
ACTION_OVR_PARAM_BAD_ATTR['name']: ActionAPI(**ACTION_OVR_PARAM_BAD_ATTR),
ACTION_OVR_PARAM_BAD_ATTR_NOOP['name']: ActionAPI(**ACTION_OVR_PARAM_BAD_ATTR_NOOP)
}
cls.actiondbs = {name: Action.add_or_update(ActionAPI.to_model(action))
for name, action in six.iteritems(cls.actions)}
cls.container = RunnerContainer()
@classmethod
def tearDownClass(cls):
for actiondb in cls.actiondbs.values():
Action.delete(actiondb)
RunnerType.delete(cls.runnerdb)
super(TestActionExecutionService, cls).tearDownClass()
def _submit_request(self, action_ref=ACTION_REF):
context = {'user': USERNAME}
parameters = {'hosts': '127.0.0.1', 'cmd': 'uname -a'}
req = LiveActionDB(action=action_ref, context=context, parameters=parameters)
req, _ = action_service.request(req)
ex = action_db.get_liveaction_by_id(str(req.id))
return req, ex
def _submit_cancellation(self, execution):
execution, _ = action_service.request_cancellation(execution, USERNAME)
execution = action_db.get_liveaction_by_id(execution.id)
return execution
def _submit_pause(self, execution):
execution, _ = action_service.request_pause(execution, USERNAME)
execution = action_db.get_liveaction_by_id(execution.id)
return execution
def _submit_resume(self, execution):
execution, _ = action_service.request_resume(execution, USERNAME)
execution = action_db.get_liveaction_by_id(execution.id)
return execution
def _create_nested_executions(self, depth=2):
"""Utility function for easily creating nested LiveAction and ActionExecutions for testing
returns (childmost_liveaction_db, parentmost_liveaction_db)
"""
if depth <= 0:
raise Exception("Please provide a depth > 0")
root_liveaction_db = LiveActionDB()
root_liveaction_db.status = action_constants.LIVEACTION_STATUS_PAUSED
root_liveaction_db.action = ACTION_WORKFLOW_REF
root_liveaction_db = LiveAction.add_or_update(root_liveaction_db)
root_ex = executions.create_execution_object(root_liveaction_db)
last_id = root_ex['id']
# Create children to the specified depth
for i in range(depth):
# Childmost liveaction should use ACTION_REF, everything else
# should use ACTION_WORKFLOW_REF
if i == depth:
action = ACTION_REF
else:
action = ACTION_WORKFLOW_REF
child_liveaction_db = LiveActionDB()
child_liveaction_db.status = action_constants.LIVEACTION_STATUS_PAUSED
child_liveaction_db.action = action
child_liveaction_db.context = {
"parent": {
"execution_id": last_id
}
}
child_liveaction_db = LiveAction.add_or_update(child_liveaction_db)
parent_ex = executions.create_execution_object(child_liveaction_db)
last_id = parent_ex.id
# Return the last-created child as well as the root
return (child_liveaction_db, root_liveaction_db)
def test_req_non_workflow_action(self):
actiondb = self.actiondbs[ACTION['name']]
req, ex = self._submit_request(action_ref=ACTION_REF)
self.assertIsNotNone(ex)
self.assertEqual(ex.action_is_workflow, False)
self.assertEqual(ex.id, req.id)
self.assertEqual(ex.action, '.'.join([actiondb.pack, actiondb.name]))
self.assertEqual(ex.context['user'], req.context['user'])
self.assertDictEqual(ex.parameters, req.parameters)
self.assertEqual(ex.status, action_constants.LIVEACTION_STATUS_REQUESTED)
self.assertTrue(ex.notify is not None)
# mongoengine DateTimeField stores datetime only up to milliseconds
self.assertEqual(isotime.format(ex.start_timestamp, usec=False),
isotime.format(req.start_timestamp, usec=False))
def test_req_workflow_action(self):
actiondb = self.actiondbs[ACTION_WORKFLOW['name']]
req, ex = self._submit_request(action_ref=ACTION_WORKFLOW_REF)
self.assertIsNotNone(ex)
self.assertEqual(ex.action_is_workflow, True)
self.assertEqual(ex.id, req.id)
self.assertEqual(ex.action, '.'.join([actiondb.pack, actiondb.name]))
self.assertEqual(ex.context['user'], req.context['user'])
self.assertDictEqual(ex.parameters, req.parameters)
self.assertEqual(ex.status, action_constants.LIVEACTION_STATUS_REQUESTED)
def test_req_invalid_parameters(self):
parameters = {'hosts': '127.0.0.1', 'cmd': 'uname -a', 'arg_default_value': 123}
liveaction = LiveActionDB(action=ACTION_REF, parameters=parameters)
self.assertRaises(jsonschema.ValidationError, action_service.request, liveaction)
def test_req_optional_parameter_none_value(self):
parameters = {'hosts': '127.0.0.1', 'cmd': 'uname -a', 'arg_default_value': None}
req = LiveActionDB(action=ACTION_REF, parameters=parameters)
req, _ = action_service.request(req)
def test_req_optional_parameter_none_value_no_default(self):
parameters = {'hosts': '127.0.0.1', 'cmd': 'uname -a', 'arg_default_type': None}
req = LiveActionDB(action=ACTION_REF, parameters=parameters)
req, _ = action_service.request(req)
def test_req_override_runner_parameter(self):
parameters = {'hosts': '127.0.0.1', 'cmd': 'uname -a'}
req = LiveActionDB(action=ACTION_OVR_PARAM_REF, parameters=parameters)
req, _ = action_service.request(req)
parameters = {'hosts': '127.0.0.1', 'cmd': 'uname -a', 'sudo': False}
req = LiveActionDB(action=ACTION_OVR_PARAM_REF, parameters=parameters)
req, _ = action_service.request(req)
def test_req_override_runner_parameter_type_attribute_value_changed(self):
parameters = {'hosts': '127.0.0.1', 'cmd': 'uname -a'}
req = LiveActionDB(action=ACTION_OVR_PARAM_BAD_ATTR_REF, parameters=parameters)
with self.assertRaises(action_exc.InvalidActionParameterException) as ex_ctx:
req, _ = action_service.request(req)
expected = ('The attribute "type" for the runner parameter "sudo" in '
'action "default.my.sudo.invalid.action" cannot be overridden.')
self.assertEqual(str(ex_ctx.exception), expected)
def test_req_override_runner_parameter_type_attribute_no_value_changed(self):
parameters = {'hosts': '127.0.0.1', 'cmd': 'uname -a'}
req = LiveActionDB(action=ACTION_OVR_PARAM_BAD_ATTR_NOOP_REF, parameters=parameters)
req, _ = action_service.request(req)
def test_req_override_runner_parameter_mutable(self):
parameters = {'hosts': '127.0.0.1', 'cmd': 'uname -a'}
req = LiveActionDB(action=ACTION_OVR_PARAM_MUTABLE_REF, parameters=parameters)
req, _ = action_service.request(req)
parameters = {'hosts': '127.0.0.1', 'cmd': 'uname -a', 'sudo': True}
req = LiveActionDB(action=ACTION_OVR_PARAM_MUTABLE_REF, parameters=parameters)
req, _ = action_service.request(req)
def test_req_override_runner_parameter_immutable(self):
parameters = {'hosts': '127.0.0.1', 'cmd': 'uname -a'}
req = LiveActionDB(action=ACTION_OVR_PARAM_IMMUTABLE_REF, parameters=parameters)
req, _ = action_service.request(req)
parameters = {'hosts': '127.0.0.1', 'cmd': 'uname -a', 'sudo': True}
req = LiveActionDB(action=ACTION_OVR_PARAM_IMMUTABLE_REF, parameters=parameters)
self.assertRaises(ValueError, action_service.request, req)
def test_req_nonexistent_action(self):
parameters = {'hosts': '127.0.0.1', 'cmd': 'uname -a'}
action_ref = ResourceReference(name='i.action', pack='default').ref
ex = LiveActionDB(action=action_ref, parameters=parameters)
self.assertRaises(ValueError, action_service.request, ex)
def test_req_disabled_action(self):
actiondb = self.actiondbs[ACTION['name']]
actiondb.enabled = False
Action.add_or_update(actiondb)
try:
parameters = {'hosts': '127.0.0.1', 'cmd': 'uname -a'}
ex = LiveActionDB(action=ACTION_REF, parameters=parameters)
self.assertRaises(ValueError, action_service.request, ex)
except Exception as e:
raise e
finally:
actiondb.enabled = True
Action.add_or_update(actiondb)
def test_req_cancellation(self):
req, ex = self._submit_request()
self.assertIsNotNone(ex)
self.assertEqual(ex.id, req.id)
self.assertEqual(ex.status, action_constants.LIVEACTION_STATUS_REQUESTED)
# Update ex status to RUNNING.
action_service.update_status(ex, action_constants.LIVEACTION_STATUS_RUNNING, False)
ex = action_db.get_liveaction_by_id(ex.id)
self.assertEqual(ex.status, action_constants.LIVEACTION_STATUS_RUNNING)
# Request cancellation.
ex = self._submit_cancellation(ex)
self.assertEqual(ex.status, action_constants.LIVEACTION_STATUS_CANCELING)
def test_req_cancellation_uncancelable_state(self):
req, ex = self._submit_request()
self.assertIsNotNone(ex)
self.assertEqual(ex.id, req.id)
self.assertEqual(ex.status, action_constants.LIVEACTION_STATUS_REQUESTED)
# Update ex status to FAILED.
action_service.update_status(ex, action_constants.LIVEACTION_STATUS_FAILED, False)
ex = action_db.get_liveaction_by_id(ex.id)
self.assertEqual(ex.status, action_constants.LIVEACTION_STATUS_FAILED)
# Request cancellation.
self.assertRaises(Exception, action_service.request_cancellation, ex)
def test_req_cancellation_on_idle_ex(self):
req, ex = self._submit_request()
self.assertIsNotNone(ex)
self.assertEqual(ex.id, req.id)
self.assertEqual(ex.status, action_constants.LIVEACTION_STATUS_REQUESTED)
# Request cancellation.
ex = self._submit_cancellation(ex)
self.assertEqual(ex.status, action_constants.LIVEACTION_STATUS_CANCELED)
def test_req_pause_unsupported(self):
req, ex = self._submit_request()
self.assertIsNotNone(ex)
self.assertEqual(ex.id, req.id)
self.assertEqual(ex.status, action_constants.LIVEACTION_STATUS_REQUESTED)
# Update ex status to RUNNING.
action_service.update_status(ex, action_constants.LIVEACTION_STATUS_RUNNING, False)
ex = action_db.get_liveaction_by_id(ex.id)
self.assertEqual(ex.status, action_constants.LIVEACTION_STATUS_RUNNING)
# Request pause.
self.assertRaises(
runner_exc.InvalidActionRunnerOperationError,
self._submit_pause,
ex
)
def test_req_pause(self):
# Add the runner type to the list of runners that support pause and resume.
action_constants.WORKFLOW_RUNNER_TYPES.append(ACTION['runner_type'])
try:
req, ex = self._submit_request()
self.assertIsNotNone(ex)
self.assertEqual(ex.id, req.id)
self.assertEqual(ex.status, action_constants.LIVEACTION_STATUS_REQUESTED)
# Update ex status to RUNNING.
action_service.update_status(ex, action_constants.LIVEACTION_STATUS_RUNNING, False)
ex = action_db.get_liveaction_by_id(ex.id)
self.assertEqual(ex.status, action_constants.LIVEACTION_STATUS_RUNNING)
# Request pause.
ex = self._submit_pause(ex)
self.assertEqual(ex.status, action_constants.LIVEACTION_STATUS_PAUSING)
finally:
action_constants.WORKFLOW_RUNNER_TYPES.remove(ACTION['runner_type'])
def test_req_pause_not_running(self):
# Add the runner type to the list of runners that support pause and resume.
action_constants.WORKFLOW_RUNNER_TYPES.append(ACTION['runner_type'])
try:
req, ex = self._submit_request()
self.assertIsNotNone(ex)
self.assertEqual(ex.id, req.id)
self.assertEqual(ex.status, action_constants.LIVEACTION_STATUS_REQUESTED)
# Request pause.
self.assertRaises(
runner_exc.UnexpectedActionExecutionStatusError,
self._submit_pause,
ex
)
finally:
action_constants.WORKFLOW_RUNNER_TYPES.remove(ACTION['runner_type'])
def test_req_pause_already_pausing(self):
# Add the runner type to the list of runners that support pause and resume.
action_constants.WORKFLOW_RUNNER_TYPES.append(ACTION['runner_type'])
try:
req, ex = self._submit_request()
self.assertIsNotNone(ex)
self.assertEqual(ex.id, req.id)
self.assertEqual(ex.status, action_constants.LIVEACTION_STATUS_REQUESTED)
# Update ex status to RUNNING.
action_service.update_status(ex, action_constants.LIVEACTION_STATUS_RUNNING, False)
ex = action_db.get_liveaction_by_id(ex.id)
self.assertEqual(ex.status, action_constants.LIVEACTION_STATUS_RUNNING)
# Request pause.
ex = self._submit_pause(ex)
self.assertEqual(ex.status, action_constants.LIVEACTION_STATUS_PAUSING)
# Request pause again.
with mock.patch.object(action_service, 'update_status', return_value=None) as mocked:
ex = self._submit_pause(ex)
self.assertEqual(ex.status, action_constants.LIVEACTION_STATUS_PAUSING)
mocked.assert_not_called()
finally:
action_constants.WORKFLOW_RUNNER_TYPES.remove(ACTION['runner_type'])
def test_req_resume_unsupported(self):
req, ex = self._submit_request()
self.assertIsNotNone(ex)
self.assertEqual(ex.id, req.id)
self.assertEqual(ex.status, action_constants.LIVEACTION_STATUS_REQUESTED)
# Update ex status to RUNNING.
action_service.update_status(ex, action_constants.LIVEACTION_STATUS_RUNNING, False)
ex = action_db.get_liveaction_by_id(ex.id)
self.assertEqual(ex.status, action_constants.LIVEACTION_STATUS_RUNNING)
# | |
from copy import deepcopy
import unittest
from flask import json
from flask.helpers import url_for
from flask_testing.utils import TestCase
from google.appengine.datastore import datastore_stub_util
from google.appengine.ext import testbed, ndb
from mejcrt.util import onlynumbers
from .fixtures import fixture_random
class TestBase(TestCase):
def setUp(self):
super(TestCase, self).setUp()
from .. import controllers
self.ctrl = controllers
self.maxDiff = None
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy(probability=1)
# Next, declare which service stubs you want to use.
self.testbed.init_datastore_v3_stub(consistency_policy=policy)
self.testbed.init_memcache_stub()
# Clear ndb's in-context cache between tests.
# This prevents data from leaking between tests.
# Alternatively, you could disable caching by
# using ndb.get_context().set_cache_policy(False)
ndb.get_context().clear_cache()
@classmethod
def fixtureCreateSomeData(cls):
fixture_random()
def login(self, is_admin=False, email=None, id_=None):
if id_ is None or email is None:
from .. import models
u = models.UserPrefs.query().filter(models.UserPrefs.admin == is_admin).get()
email = u.email
id_ = u.userid
self.testbed.setup_env(
user_email=email,
user_id=id_,
user_is_admin='1' if is_admin else '0',
overwrite=True)
def tearDown(self):
self.testbed.deactivate()
def create_app(self):
from ..app import app
return app
class TestRoot(TestBase):
def testRoot(self):
rv = self.client.get("/")
assert "MEJC RT" in rv.data
class TestPatient(TestBase):
patient_data = {u'blood_type': u'O+',
u'name': u'<NAME>\xe3o',
u'type': u'Rec\xe9m-nascido',
u'code': u'123450', }
def setUp(self):
super(TestPatient, self).setUp()
self.fixtureCreateSomeData()
def testCreate(self):
self.login()
rv = self.client.post(url_for('patient.upinsert'),
data=json.dumps(self.patient_data),
content_type='application/json')
self.assert200(rv)
from ..models import Patient
p = Patient.get_by_code(self.patient_data['code'])
self.assertIsInstance(p, Patient)
def testCreateInvalidCode(self):
self.login()
data = self.patient_data.copy()
data.update(code='12345.00')
data.update(name='John')
# import ipdb;ipdb.set_trace()
rv = self.client.post(url_for('patient.upinsert'),
data=json.dumps(data),
content_type='application/json')
self.assert200(rv)
from ..models import Patient
p = Patient.get_by_code(data['code'])
self.assertIsNone(p)
p = Patient.get_by_code(onlynumbers(data['code']))
self.assertIsInstance(p, Patient)
def testDuplicated(self):
self.login()
rv = self.client.post(url_for('patient.upinsert'),
data=json.dumps(self.patient_data),
content_type='application/json')
self.assert200(rv)
rv = self.client.post(url_for('patient.upinsert'),
data=json.dumps(self.patient_data),
content_type='application/json')
self.assert400(rv)
def testGetKey(self):
self.login()
from ..models import Patient
key = Patient.query().get(keys_only=True).urlsafe()
rv = self.client.get(url_for('patient.get', key=key))
self.assert200(rv)
data = rv.json['data']
self.assertEquals(key, data[0]['key'])
def testGetKeyInvalid(self):
self.login()
rv = self.client.get(url_for('patient.get', key='a'))
self.assert404(rv)
def testGetKeyInvalid2(self):
self.login()
rv = self.client.get(url_for('patient.get',
key='<KEY>'))
self.assert404(rv)
def testGetListQueryCode(self):
from ..models import Patient
self.login()
p = Patient.query().get()
query = {'exact':True, 'q': p.code, 'fields': 'code'}
rv = self.client.get(url_for('patient.get', **query))
self.assert200(rv)
self.assertIsNotNone(rv.json)
data = rv.json['data']
self.assertEquals(len(data), 1)
self.assertEquals(p.key.urlsafe(), data[0]['key'])
def testGetListMax(self):
self.login()
from ..models import Patient
n = Patient.query().count()
query = dict({'max': n / 2})
rv = self.client.get(url_for('patient.get', **query))
self.assert200(rv)
data = rv.json['data']
self.assertEquals(len(data), query['max'])
def testGetListOffset(self):
self.login()
from ..models import Patient
n = Patient.query().count()
# last two
query = dict({'offset': n - 2})
rv = self.client.get(url_for('patient.get', **query))
self.assert200(rv)
data = rv.json['data']
self.assertEquals(len(data), 2)
def testGetListPaginatorNext(self):
self.login()
from ..models import Patient
n = Patient.query().count()
codes = [];
url = url_for('patient.get', **{'offset': 0, 'max': 2})
for _ in range(n):
rv = self.client.get(url)
self.assert200(rv)
data = rv.json['data']
self.assertLessEqual(len(data), 2)
url = rv.json['next']
for o in data:
codes.append(o['code'])
expected_codes = [p.code for p in Patient.query().fetch()]
self.assertEquals(codes, expected_codes)
def _interatePaginatorPrev(self, start, max_, count):
codes = [];
url = url_for('patient.get', **{'offset': start, 'max': max_})
for _ in range(count + 1):
# print url
rv = self.client.get(url)
self.assert200(rv)
data = rv.json['data']
self.assertLessEqual(len(data), max_)
url = rv.json['prev']
for o in data:
codes.append(o['code'])
return codes
def testGetListPaginatorPrevMulti(self):
self.login()
from ..models import Patient
count = Patient.query().count()
expected_codes = sorted([p.code for p in Patient.query().fetch()])
for n in range(1, count):
# print "**** %d " % n
got_codes = sorted(self._interatePaginatorPrev(count, n, count))
self.assertEquals(got_codes, expected_codes)
def testDeleteNotAdmin(self):
self.login()
from ..models import Patient
p = Patient.query().get()
rv = self.client.delete(url_for('patient.delete', key=p.key.urlsafe()))
self.assert403(rv)
def testDeleteAdmin(self):
self.login(is_admin=True)
from ..models import Patient
p = Patient.query().get()
rv = self.client.delete(url_for('patient.delete', key=p.key.urlsafe()))
self.assert200(rv)
def testStats(self):
self.login()
from ..models import Patient
c = Patient.query().count()
rv = self.client.get(url_for('patient.stats'))
self.assert200(rv)
data = rv.json['data']
self.assertEquals(c, data['stats']['all'])
def testHistoryCreateGetUpdateGetDeleteGet(self):
self.login(is_admin=True)
data = self.patient_data.copy()
# create
rv = self.client.post(url_for('patient.upinsert'),
data=json.dumps(data),
content_type='application/json')
self.assert200(rv)
key = rv.json['data']['key']
# get
rv = self.client.get(url_for('patient.get', key=key))
got_data = rv.json['data'][0]
self.assert200(rv)
data['key'] = key
self.assertEquals(len(got_data['logs']), 1)
self.assertEquals(got_data['logs'][0]['action'], 'create')
del got_data['logs']
self.assertEquals(data, got_data)
# update
data['name'] = "<NAME>"
rv = self.client.put(url_for('patient.upinsert'),
data=json.dumps(data),
content_type='application/json')
self.assert200(rv)
key = rv.json['data']['key']
self.assertEquals(data['key'], key)
# get
rv = self.client.get(url_for('patient.get', key=key))
got_data = rv.json['data'][0]
self.assert200(rv)
data['key'] = key
self.assertEquals(len(got_data['logs']), 2)
self.assertEquals(got_data['logs'][0]['action'], 'create')
self.assertEquals(got_data['logs'][1]['action'], 'update')
del got_data['logs']
self.assertEquals(data, got_data)
# key = got_data['key']
# delete
rv = self.client.delete(url_for('patient.delete', key=key))
self.assert200(rv)
# get not found
rv = self.client.get(url_for('patient.get', key=key))
self.assert404(rv)
def testGetPatientTypes(self):
self.login()
rv = self.client.get(url_for('patient.types'))
self.assert200(rv)
from .. import models
self.assertListEqual(rv.json['data']['types'], list(models.patient_types))
class TestTransfusion(TestBase):
tr_data = {u'bags': [{u'content': u'CHPLI', u'type': u'O-'}],
u'date': u'2015-05-22',
u'local': u'Sem registro',
u'patient': None,
u'code': u'20900',
u'tags': [u'rt'],
u'text': u'some test'}
def setUp(self):
super(TestTransfusion, self).setUp()
self.fixtureCreateSomeData()
self.data = deepcopy(self.tr_data)
from .. import models
self.data['patient'] = dict(
key=models.Patient.query().get(keys_only=True).urlsafe())
def testStats(self):
self.login()
from ..models import Transfusion
c = Transfusion.query().count()
rv = self.client.get(url_for('transfusion.stats'))
self.assert200(rv)
data = rv.json['data']
self.assertEquals(c, data['stats']['all'])
def testDeleteNotAdmin(self):
self.login()
from ..models import Transfusion
o = Transfusion.query().get()
rv = self.client.delete(url_for('transfusion.delete', key=o.key.urlsafe()))
self.assert403(rv)
def testDeleteAdmin(self):
self.login(is_admin=True)
from ..models import Transfusion
o = Transfusion.query().get()
rv = self.client.delete(url_for('transfusion.delete', key=o.key.urlsafe()))
self.assert200(rv)
def testCreate(self):
self.login()
rv = self.client.post(url_for('transfusion.upinsert'), data=json.dumps(self.data),
content_type='application/json')
self.assert200(rv)
def testCreateInvalidDate(self):
self.login()
data = self.data.copy()
data.update(date="16-01-30T03:43:26.494Z")
rv = self.client.post(url_for('transfusion.upinsert'),
data=json.dumps(data),
content_type='application/json')
self.assert400(rv)
def testDuplicated(self):
self.login()
rv = self.client.post(url_for('transfusion.upinsert'),
data=json.dumps(self.data),
content_type='application/json')
self.assert200(rv)
rv = self.client.post(url_for('transfusion.upinsert'),
data=json.dumps(self.data),
content_type='application/json')
self.assert400(rv)
def testCreateEmpty(self):
self.login()
rv = self.client.post(url_for('transfusion.upinsert'))
self.assert400(rv)
def testCreateNotLogged(self):
rv = self.client.post(url_for('transfusion.upinsert'), data=json.dumps(self.data),
content_type='application/json')
self.assert401(rv)
def testCreateInvalid(self):
self.login()
data = self.data.copy()
data['bags'][0]['type'] = 'invalid'
rv = self.client.post(url_for('transfusion.upinsert'), data=json.dumps(data),
content_type='application/json')
self.assert400(rv)
def testGet(self):
from .. import models
self.login()
key = models.Transfusion.query().get(keys_only=True)
rv = self.client.get(url_for('transfusion.get', key=key.urlsafe()))
got_data = rv.json['data']
self.assertEquals(len(got_data), 1)
self.assertEquals(key.urlsafe(), got_data[0]['key'])
def testGetListQueryFieldCode(self):
from ..models import Transfusion
self.login()
tr = Transfusion.query().get()
query = dict({'q': tr.code, 'fields': 'code'})
rv = self.client.get(url_for('transfusion.get', **query))
self.assert200(rv)
self.assertIsNotNone(rv.json)
data = rv.json['data']
self.assertEquals(len(data), 1)
self.assertEquals(tr.key.urlsafe(), data[0]['key'])
def testGetListQueryTags(self):
self.login()
query = dict({'tags': 'rt', 'max': 100})
rv = self.client.get(url_for('transfusion.get', **query))
self.assert200(rv)
self.assertIsNotNone(rv.json)
data = rv.json['data']
for i, tr in enumerate(data):
self.assertIn('rt', tr['tags'], "'rt' was not found in data[%d]['tags'] = %r" % (i, tr['tags']))
def testGetListQueryInvalidTag(self):
self.login()
query = dict({'tags': 'novalidtag', 'max': 100})
rv = self.client.get(url_for('transfusion.get', **query))
self.assert200(rv)
self.assertIsNotNone(rv.json)
data = rv.json['data']
self.assertEquals(len(data), 0)
def testGetListQueryFieldPatientName(self):
from .. import models
self.login()
p = models.Patient.query().get()
query = dict(q=p.name, fields='patient.name')
rv = self.client.get(url_for('transfusion.get', **query))
self.assert200(rv)
self.assertIsNotNone(rv.json)
data = rv.json['data']
self.assertEquals([k.urlsafe() for k in p.transfusions], [tr['key'] for tr in data])
def testGetListQueryFieldPatientNameDoesNotExist(self):
self.login()
query = dict(fields='patient.name,code,patient.code', max='10', offset=0, q='NonExistentKKKK')
rv = self.client.get(url_for('transfusion.get', **query))
self.assert200(rv)
self.assertIsNotNone(rv.json)
data = rv.json['data']
self.assertEquals(len(data), 0)
def testGetListQueryFieldPatientCode(self):
from .. import models
self.login()
p = models.Patient.query().get()
query = dict(q=p.code, fields='patient.code')
rv = self.client.get(url_for('transfusion.get', **query))
self.assert200(rv)
self.assertIsNotNone(rv.json)
data = rv.json['data']
self.assertEquals([k.urlsafe() for k in p.transfusions], [tr['key'] for tr in data])
def testGetListQueryFieldPatientKey(self):
from .. import models
self.login()
p = models.Patient.query().get()
query = dict(q=p.key.urlsafe(), fields='patient.key')
rv = self.client.get(url_for('transfusion.get', **query))
self.assert200(rv)
self.assertIsNotNone(rv.json)
data = rv.json['data']
self.assertEquals([k.urlsafe() for k in p.transfusions], [tr['key'] for tr in data])
def testGetListQueryFieldPatientKeyInvalid(self):
self.login()
query = dict(q='a', fields='patient.key')
rv = self.client.get(url_for('transfusion.get', **query))
self.assert200(rv)
self.assertIsNotNone(rv.json)
data = rv.json['data']
self.assertEquals(len(data), 0)
def testGetListMax(self):
self.login()
from ..models import Transfusion
n = Transfusion.query().count()
query = dict({'max': n / 2})
rv = self.client.get(url_for('transfusion.get', **query))
self.assert200(rv)
data = rv.json['data']
self.assertEquals(len(data), query['max'])
def testGetListOffset(self):
self.login()
from ..models import Transfusion
n = Transfusion.query().count()
# last two
query = dict({'offset': n - 2})
rv = self.client.get(url_for('transfusion.get', **query))
self.assert200(rv)
data = rv.json['data']
self.assertEquals(len(data), 2)
def testGetListPaginatorNext(self):
self.login()
from ..models import Transfusion
# n = Transfusion.query().count()
keys = [];
url = url_for('transfusion.get', **{'offset': 0, 'max': 2})
for _ in range(5):
print url
rv = self.client.get(url)
self.assert200(rv)
data = rv.json['data']
self.assertLessEqual(len(data), 2)
url = rv.json['next']
for o in data:
keys.append(o['key'])
expected_keys = [k.urlsafe() for k in Transfusion.query().fetch(keys_only=True, limit=10)]
self.assertEquals(keys, expected_keys)
def testGetNotLogged(self):
rv = self.client.get(url_for('transfusion.get', key=123))
self.assert401(rv)
def testHistoryCreateGetUpdateGetDeleteGet(self):
self.login(is_admin=True)
data = self.data
# create
rv = self.client.post(url_for('transfusion.upinsert'), data=json.dumps(data),
content_type='application/json')
self.assert200(rv)
# get
data[u'key'] = rv.json['data']['key']
rv = self.client.get(url_for('transfusion.get', key=data['key']))
self.assert200(rv)
got_data = rv.json['data'][0]
self.assertEquals(len(got_data['logs']), 1)
self.assertEquals(got_data['logs'][0]['action'], 'create')
del got_data['logs']
self.assertEquals(got_data['patient']['key'], data['patient']['key'])
del got_data['patient']
data_without_patient = data.copy()
del data_without_patient['patient']
self.assertDictEqual(got_data, data_without_patient)
# update
data['tags'] = ['semrt']
rv = self.client.put(url_for('transfusion.upinsert'), data=json.dumps(self.data),
content_type='application/json')
self.assert200(rv)
# get
rv = self.client.get(url_for('transfusion.get', key=rv.json['data']['key']))
self.assert200(rv)
got_data = rv.json['data'][0]
self.assertEquals(len(got_data['logs']), 2)
self.assertEquals(got_data['logs'][0]['action'], 'create')
self.assertEquals(got_data['logs'][1]['action'], 'update')
del got_data['logs']
self.assertEquals(got_data['patient']['key'], data['patient']['key'])
del got_data['patient']
data_without_patient = data.copy()
del data_without_patient['patient']
self.assertDictEqual(got_data, data_without_patient)
key = got_data['key']
# delete
rv = self.client.delete(url_for('transfusion.delete', key=key))
self.assert200(rv)
# get not found
rv = self.client.get(url_for('transfusion.get', key=key))
self.assert404(rv)
def testUpdateNotFound(self):
self.login()
data | |
(1200, 7, 1000, 1800.0),
(1250, 3, 1100, 2000.0),
(1250, 7, 1200, 2200.0),
], ["time", "id", "volume", "volume_sum"])
assert_same(new_pdf2, expected_pdf2)
def test_summarizeWindows_udf(self):
from ts.flint import udf
from ts.flint import windows
from collections import OrderedDict
from pyspark.sql.types import DoubleType, LongType
vol = self.vol()
w = windows.past_absolute_time('99s')
@udf(DoubleType())
def mean(v):
return v.mean()
result7 = vol.summarizeWindows(
w,
{'mean': mean(vol['volume'])},
key='id'
).toPandas()
expected7 = make_pdf([
(1000, 7, 100, 100.0),
(1000, 3, 200, 200.0),
(1050, 3, 300, 250.0),
(1050, 7, 400, 250.0),
(1100, 3, 500, 400.0),
(1100, 7, 600, 500.0),
(1150, 3, 700, 600.0),
(1150, 7, 800, 700.0),
(1200, 3, 900, 800.0),
(1200, 7, 1000, 900.0),
(1250, 3, 1100, 1000.0),
(1250, 7, 1200, 1100.0),
], ['time', 'id', 'volume', 'mean'])
assert_same(result7, expected7)
result8 = vol.summarizeWindows(
w,
{'mean': mean(vol['volume'])}
).toPandas()
expected8 = make_pdf([
(1000, 7, 100, 150.0),
(1000, 3, 200, 150.0),
(1050, 3, 300, 250.0),
(1050, 7, 400, 250.0),
(1100, 3, 500, 450.0),
(1100, 7, 600, 450.0),
(1150, 3, 700, 650.0),
(1150, 7, 800, 650.0),
(1200, 3, 900, 850.0),
(1200, 7, 1000, 850.0),
(1250, 3, 1100, 1050.0),
(1250, 7, 1200, 1050.0),
], ['time', 'id', 'volume', 'mean'])
assert_same(result8, expected8)
def test_summarizeWindows_numpy_udf(self):
from ts.flint import windows
from ts.flint.functions import udf
from pyspark.sql.types import DoubleType, LongType
vol = self.vol()
df = self.flintContext.read.pandas(make_pdf([
(1000, 3, 10.0),
(1000, 7, 20.0),
(1050, 3, 30.0),
(1050, 7, 40.0),
(1100, 3, 50.0),
(1150, 3, 60.0),
(1150, 7, 70.0),
(1200, 3, 80.0),
(1200, 7, 90.0),
(1250, 7, 100.0),
], ['time', 'id', 'v']))
@udf(DoubleType(), arg_type='numpy')
def mean_np(v):
assert isinstance(v, np.ndarray)
return v.mean()
@udf((DoubleType(), LongType()), arg_type='numpy')
def mean_and_sum_np(v):
assert isinstance(v, np.ndarray)
return v.mean(), v.sum()
@udf(DoubleType(), arg_type='numpy')
def mean_np_df(window):
assert isinstance(window, list)
assert isinstance(window[-1], np.ndarray)
return window[-1].mean()
@udf(DoubleType(), arg_type='numpy')
def mean_np_2(v, window):
assert isinstance(v, np.float64)
assert isinstance(window, list)
assert isinstance(window[-1], np.ndarray)
return v + window[-1].mean()
@udf(DoubleType(), arg_type='numpy')
def mean_np_df_2(left, window):
assert isinstance(left, list)
assert isinstance(left[0], np.float64)
assert isinstance(window, list)
assert isinstance(window[-1], np.ndarray)
return window[-1].mean()
w = windows.past_absolute_time('99s')
result1 = vol.summarizeWindows(
w,
{'mean': mean_np(vol['volume'])}
).toPandas()
expected1 = make_pdf([
(1000, 7, 100, 150.0),
(1000, 3, 200, 150.0),
(1050, 3, 300, 250.0),
(1050, 7, 400, 250.0),
(1100, 3, 500, 450.0),
(1100, 7, 600, 450.0),
(1150, 3, 700, 650.0),
(1150, 7, 800, 650.0),
(1200, 3, 900, 850.0),
(1200, 7, 1000, 850.0),
(1250, 3, 1100, 1050.0),
(1250, 7, 1200, 1050.0),
], ['time', 'id', 'volume', 'mean'])
assert_same(result1, expected1)
result2 = vol.summarizeWindows(
w,
{'mean': mean_np(vol['volume'])},
key = 'id'
).toPandas()
expected2 = make_pdf([
(1000, 7, 100, 100.0),
(1000, 3, 200, 200.0),
(1050, 3, 300, 250.0),
(1050, 7, 400, 250.0),
(1100, 3, 500, 400.0),
(1100, 7, 600, 500.0),
(1150, 3, 700, 600.0),
(1150, 7, 800, 700.0),
(1200, 3, 900, 800.0),
(1200, 7, 1000, 900.0),
(1250, 3, 1100, 1000.0),
(1250, 7, 1200, 1100.0),
], ['time', 'id', 'volume', 'mean'])
assert_same(result2, expected2)
result3 = vol.summarizeWindows(
w,
{'mean': mean_np_df(vol[['volume']])},
).toPandas()
expected3 = expected1
assert_same(result3, expected3)
result4 = vol.summarizeWindows(
w,
{'mean': mean_np_df(vol[['time', 'volume']])},
).toPandas()
expected4 = expected1
assert_same(result4, expected4)
result8 = vol.summarizeWindows(
w,
{('mean', 'sum'): mean_and_sum_np(vol['volume'])},
key = 'id'
).toPandas()
expected8 = make_pdf([
(1000, 7, 100, 100.0, 100),
(1000, 3, 200, 200.0, 200),
(1050, 3, 300, 250.0, 500),
(1050, 7, 400, 250.0, 500),
(1100, 3, 500, 400.0, 800),
(1100, 7, 600, 500.0, 1000),
(1150, 3, 700, 600.0, 1200),
(1150, 7, 800, 700.0, 1400),
(1200, 3, 900, 800.0, 1600),
(1200, 7, 1000, 900.0, 1800),
(1250, 3, 1100, 1000.0, 2000),
(1250, 7, 1200, 1100.0, 2200),
], ['time', 'id', 'volume', 'mean', 'sum'])
assert_same(result8, expected8)
def test_addSummaryColumns(self):
from ts.flint import summarizers
vol = self.vol()
expected_pdf = make_pdf([
(1000, 7, 100, 100.0),
(1000, 3, 200, 300.0),
(1050, 3, 300, 600.0),
(1050, 7, 400, 1000.0),
(1100, 3, 500, 1500.0),
(1100, 7, 600, 2100.0),
(1150, 3, 700, 2800.0),
(1150, 7, 800, 3600.0),
(1200, 3, 900, 4500.0),
(1200, 7, 1000, 5500.0),
(1250, 3, 1100, 6600.0),
(1250, 7, 1200, 7800.0),
], ["time", "id", "volume", "volume_sum"])
new_pdf = vol.addSummaryColumns(summarizers.sum("volume")).toPandas()
assert_same(new_pdf, expected_pdf)
expected_pdf = make_pdf([
(1000, 7, 100, 100.0),
(1000, 3, 200, 200.0),
(1050, 3, 300, 500.0),
(1050, 7, 400, 500.0),
(1100, 3, 500, 1000.0),
(1100, 7, 600, 1100.0),
(1150, 3, 700, 1700.0),
(1150, 7, 800, 1900.0),
(1200, 3, 900, 2600.0),
(1200, 7, 1000, 2900.0),
(1250, 3, 1100, 3700.0),
(1250, 7, 1200, 4100.0),
], ["time", "id", "volume", "volume_sum"])
new_pdf = vol.addSummaryColumns(summarizers.sum("volume"), "id").toPandas()
assert_same(new_pdf, expected_pdf, "with key")
def test_addColumnsForCycle_udf(self):
from ts.flint import udf
from pyspark.sql.types import DoubleType
from collections import OrderedDict
price2 = self.price2()
result1 = price2.addColumnsForCycle({
'rank': udf(lambda v: v.rank(), DoubleType())(price2['price'])
}).toPandas()
expected1 = make_pdf([
(0, 1, 1.0, 1.0),
(0, 2, 2.0, 2.0),
(1, 1, 3.0, 1.0),
(1, 2, 4.0, 2.0),
(1, 3, 5.0, 3.0),
], ['time', 'id', 'price', 'rank'])
assert_same(result1, expected1)
result2 = price2.addColumnsForCycle(OrderedDict([
('rank', udf(lambda v: v.rank(), DoubleType())(price2['price'])),
('pct_rank', udf(lambda v: v.rank(pct=True), DoubleType())(price2['price']))
])).toPandas()
expected2 = make_pdf([
(0, 1, 1.0, 1.0, 0.5),
(0, 2, 2.0, 2.0, 1.0),
(1, 1, 3.0, 1.0, 0.333333),
(1, 2, 4.0, 2.0, 0.666667),
(1, 3, 5.0, 3.0, 1.0),
], ['time', 'id', 'price', 'rank', 'pct_rank'])
pdt.assert_frame_equal(result2, expected2)
@udf((DoubleType(), DoubleType()))
def rank(v):
return v.rank(), v.rank(pct=True)
result3 = price2.addColumnsForCycle({
('rank', 'pct_rank'): rank(price2['price']),
}).toPandas()
expected3 = expected2
pdt.assert_frame_equal(result3, expected3)
def test_addWindows(self):
from ts.flint import windows
from pyspark.sql import Row
vol = self.vol()
VolRow = Row('time', 'id', 'volume')
id = [VolRow(int(r['time'].strftime('%s')), r['id'], r['volume'])
for r in vol.collect()]
expected_pdf = make_pdf([
(1000, 7, 100, [id[0], id[1]]),
(1000, 3, 200, [id[0], id[1]]),
(1050, 3, 300, [id[0], id[1], id[2], id[3]]),
(1050, 7, 400, [id[0], id[1], id[2], id[3]]),
(1100, 3, 500, [id[2], id[3], id[4], id[5]]),
(1100, 7, 600, [id[2], id[3], id[4], id[5]]),
(1150, 3, 700, [id[4], id[5], id[6], id[7]]),
(1150, 7, 800, [id[4], id[5], id[6], id[7]]),
(1200, 3, 900, [id[6], id[7], id[8], id[9]]),
(1200, 7, 1000, [id[6], id[7], id[8], id[9]]),
(1250, 3, 1100, [id[8], id[9], id[10], id[11]]),
(1250, 7, 1200, [id[8], id[9], id[10], id[11]]),
], ["time", "id", "volume", "window_past_50s"])
new_pdf = vol.addWindows(windows.past_absolute_time("50s")).toPandas()
assert_same(new_pdf, expected_pdf)
def test_shiftTime(self):
price = self.price()
delta = pd.Timedelta('1000s')
expected_pdf = price.toPandas()
expected_pdf.time += delta
new_pdf = price.shiftTime(delta).toPandas()
assert_same(new_pdf, expected_pdf, "forwards")
expected_pdf = price.toPandas()
expected_pdf.time -= delta
new_pdf = price.shiftTime(delta, backwards=True).toPandas()
assert_same(new_pdf, expected_pdf, "backwards")
def test_shiftTime_windows(self):
import datetime
from ts.flint import windows
friday = datetime.datetime(2001, 11, 9, 15, 0).timestamp()
saturday = datetime.datetime(2001, 11, 10, 15, 0).timestamp()
monday = datetime.datetime(2001, 11, 12, 15, 0).timestamp()
tuesday = datetime.datetime(2001, 11, 13, 15, 0).timestamp()
wednesday = datetime.datetime(2001, 11, 14, 15, 0).timestamp()
thrusday = datetime.datetime(2001, 11, 15, 15, 0).timestamp()
dates = self.flintContext.read.pandas(make_pdf([
(friday,),
(monday,),
(tuesday,),
(wednesday,),
], ['time']))
expected1 = make_pdf([
(saturday,),
(tuesday,),
(wednesday,),
(thrusday,),
], ['time'])
result1 = dates.shiftTime(windows.future_absolute_time('1day')).toPandas()
assert_same(result1, expected1)
def test_uniform_clocks(self):
from ts.flint import clocks
df = clocks.uniform(self.sqlContext, '1d', '0s', '2016-11-07', '2016-11-17')
assert(df.count() == 11)
# the last timestamp should be 17 Nov 2016 00:00:00 GMT
assert(df.collect()[-1]['time'] == make_timestamp(1479340800))
def test_read_uniform_clock(self):
expected_exclusive = pd.date_range('20171116 12:00:05am',
tz='Asia/Tokyo', periods=2880,
freq='30s').tz_convert("UTC").tz_localize(None)
actual_exclusive = (self.flintContext.read
.range('2017-11-16', '2017-11-17 12:00:05am',
'Asia/Tokyo')
.clock('uniform', '30s', '5s', end_inclusive=False)
.toPandas()['time'])
assert np.all(expected_exclusive == actual_exclusive)
expected_inclusive = pd.date_range('20171116', periods=2881,
freq='30s').tz_localize(None)
actual_inclusive = (self.flintContext.read
.range('2017-11-16', '2017-11-17')
.clock('uniform', '30s')
.toPandas()['time'])
assert np.all(expected_inclusive == actual_inclusive)
def test_groupedData(self):
from pyspark.sql import DataFrame
from pyspark.sql.functions import sum, pandas_udf, PandasUDFType
from ts.flint import TimeSeriesGroupedData
price = self.price()
assert(type(price.groupBy('time')) is TimeSeriesGroupedData)
assert(type(price.groupby('time')) is TimeSeriesGroupedData)
result1 = price.groupBy('time').agg(sum(price['price'])).sort('time').toPandas()
expected1 = DataFrame.groupBy(price, 'time').agg(sum(price['price'])).sort('time').toPandas()
assert_same(result1, expected1)
result2 = price.groupBy('time').pivot('id').sum('price').toPandas()
expected2 = DataFrame.groupBy(price, 'time').pivot('id').sum('price').toPandas()
assert_same(result2, expected2)
@pandas_udf(price.schema, PandasUDFType.GROUPED_MAP)
def foo(df):
return df
result3 = price.groupby('time').apply(foo).toPandas()
expected3 = DataFrame.groupBy(price, 'time').apply(foo).toPandas()
assert_same(result3, expected3)
result4 = price.groupby('time').count().toPandas()
expected4 = DataFrame.groupBy(price, 'time').count().toPandas()
assert_same(result4, expected4)
result5 = price.groupby('time').mean('price').toPandas()
expected5 = DataFrame.groupBy(price, 'time').mean('price').toPandas()
assert_same(result5, expected5)
def test_preview(self):
price = self.price()
assert_same(price.limit(10).toPandas(), price.preview())
def test_column_selections(self):
price = self.price()
assert_same(price.select('price').toPandas(), price.toPandas()[['price']])
assert_same(price.select('time').toPandas(), price.toPandas()[['time']])
def test_withColumn_time(self):
from ts.flint import TimeSeriesDataFrame
from pyspark.sql import DataFrame
from tests.test_data import FORECAST_DATA
pdf = make_pdf(FORECAST_DATA, ["time", "id", "forecast"])
df = self.flintContext.read.pandas(pdf)
df = df.withColumn("time", df.time)
assert(not isinstance(df, TimeSeriesDataFrame))
assert(isinstance(df, DataFrame))
expected = pdf.assign(time=pdf['time'])
assert_same(df.toPandas(), expected)
def test_describe(self):
from tests.test_data import FORECAST_DATA
df = self.flintContext.read.pandas(make_pdf(FORECAST_DATA, ["time", "id", "forecast"]))
df.describe()
def test_empty_df(self):
from pyspark.sql.types import LongType, TimestampType, StructType, StructField
from ts.flint import summarizers
df = self.sqlContext.createDataFrame(
self.sc.emptyRDD(),
schema=StructType([StructField('time', TimestampType())]))
df2 = self.flintContext.read.dataframe(df)
df3 = df2.summarize(summarizers.count())
assert(df2.count() == 0)
assert(df3.count() == 0)
assert(df2.schema == StructType([StructField('time', | |
<filename>opsdroid/connector/gitlab/tests/test_connector_gitlab.py<gh_stars>1-10
import logging
import asyncio
from pathlib import Path
import asynctest.mock as amock
import pytest
import opsdroid.connector.gitlab.events as gitlab_events
from opsdroid.connector.gitlab import ConnectorGitlab
from opsdroid.matchers import match_event
from opsdroid.testing import call_endpoint, running_opsdroid
from opsdroid.const import GITLAB_API_ENDPOINT
from opsdroid.events import Message
@pytest.fixture
async def connector(opsdroid, mock_api_obj):
opsdroid.config["connectors"] = {"gitlab": {"webhook-token": "<PASSWORD>!"}}
opsdroid.config["web"] = {"base-url": mock_api_obj.base_url}
await opsdroid.load()
return opsdroid.get_connector("gitlab")
def get_response_path(response: str) -> Path:
return Path(__file__).parent / "gitlab_response_payloads" / response
def get_webhook_payload(path: str) -> str:
with open(get_response_path(path), "r") as fh:
return fh.read()
def test_app_init():
"""Test that the connector is initialised properly when using Gitlab"""
connector = ConnectorGitlab({"name": "gitlab", "webhook-token": "<PASSWORD>!"})
assert connector.name == "gitlab"
assert connector.webhook_token == "<PASSWORD>!"
def test_init(opsdroid):
connector = ConnectorGitlab({}, opsdroid=opsdroid)
assert connector.name == "gitlab"
assert connector.opsdroid == opsdroid
assert connector.base_url is None
def test_optional_config(opsdroid):
config = {
"name": "my-gitlab",
"forward-url": "http://my-awesome-url",
"webhook-token": "<PASSWORD>",
}
connector = ConnectorGitlab(config, opsdroid)
assert connector.name == "my-gitlab"
assert connector.base_url == "http://my-awesome-url"
assert connector.webhook_token == "<PASSWORD>"
def test_base_url(opsdroid):
opsdroid.config["web"] = {"base-url": "http://example.com"}
connector = ConnectorGitlab({}, opsdroid)
assert connector.base_url == "http://example.com"
@pytest.mark.asyncio
async def test_gitlab_webhook_handler_excepion(caplog):
caplog.set_level(logging.DEBUG)
connector = ConnectorGitlab({"name": "gitlab"})
mocked_request = amock.CoroutineMock()
mocked_request.json.side_effect = Exception()
resp = await connector.gitlab_webhook_handler(mocked_request)
assert resp.status == 400
assert "Unable to get JSON from request" in caplog.text
@pytest.mark.asyncio
async def test_validate_request(opsdroid):
config = {"webhook-token": "<PASSWORD>"}
connector = ConnectorGitlab(config, opsdroid)
request = amock.CoroutineMock()
request.headers = {"X-Gitlab-Token": "<PASSWORD>"}
is_valid = await connector.validate_request(request)
assert is_valid
fake_request = amock.CoroutineMock()
request.headers = {}
is_valid = await connector.validate_request(fake_request)
assert not is_valid
@pytest.mark.asyncio
async def test_listen(connector):
assert await connector.listen() is None
@pytest.mark.asyncio
async def test_issue_created(opsdroid, connector, mock_api, caplog):
caplog.set_level(logging.INFO)
@match_event(gitlab_events.GitlabIssueCreated)
async def test_skill(opsdroid, config, event):
url = "https://gitlab.com/FabioRosado/test-project/-/issues/1"
target = f"{GITLAB_API_ENDPOINT}/projects/30456730/issues/1"
assert event.connector.name == "gitlab"
assert event.url == url
assert event.target == target
assert event.project == "test-project"
assert event.user == "FabioRosado"
assert event.title == "New test issue"
assert event.description == "Test description"
assert event.labels == ["test-label"]
logging.getLogger(__name__).info("Test skill complete")
opsdroid.register_skill(test_skill, config={"name": "test"})
async with running_opsdroid(opsdroid):
resp = await call_endpoint(
opsdroid,
"/connector/gitlab",
"POST",
headers={
"X-Gitlab-Token": "secret-stuff!",
"Content-Type": "application/json",
},
data=get_webhook_payload("issue_created.json"),
)
assert resp.status == 200
assert "Test skill complete" in caplog.text
assert "Exception when running skill" not in caplog.text
@pytest.mark.asyncio
async def test_issue_label_updated(opsdroid, connector, mock_api, caplog):
caplog.set_level(logging.INFO)
@match_event(gitlab_events.GitlabIssueLabeled)
async def test_skill(opsdroid, config, event):
url = "https://gitlab.com/FabioRosado/test-project/-/issues/1"
assert event.connector.name == "gitlab"
assert event.url == url
assert event.project == "test-project"
assert event.user == "FabioRosado"
assert event.title == "New test issue"
assert event.description == "This should have been filled"
assert event.labels == ["test-label"]
logging.getLogger(__name__).info("Test skill complete")
opsdroid.register_skill(test_skill, config={"name": "test"})
async with running_opsdroid(opsdroid):
resp = await call_endpoint(
opsdroid,
"/connector/gitlab",
"POST",
headers={
"X-Gitlab-Token": "secret-stuff!",
"Content-Type": "application/json",
},
data=get_webhook_payload("issue_label_update.json"),
)
assert resp.status == 200
assert "Test skill complete" in caplog.text
assert "Exception when running skill" not in caplog.text
@pytest.mark.asyncio
async def test_issue_labeled(opsdroid, connector, mock_api, caplog):
caplog.set_level(logging.INFO)
@match_event(gitlab_events.GitlabIssueLabeled)
async def test_skill(opsdroid, config, event):
url = "https://gitlab.com/FabioRosado/test-project/-/issues/2"
assert event.connector.name == "gitlab"
assert event.url == url
assert event.project == "test-project"
assert event.user == "FabioRosado"
assert event.title == "test"
assert event.description == ""
assert event.labels == ["blah"]
logging.getLogger(__name__).info("Test skill complete")
opsdroid.register_skill(test_skill, config={"name": "test"})
async with running_opsdroid(opsdroid):
resp = await call_endpoint(
opsdroid,
"/connector/gitlab",
"POST",
headers={
"X-Gitlab-Token": "<PASSWORD>!",
"Content-Type": "application/json",
},
data=get_webhook_payload("issue_labeled.json"),
)
assert resp.status == 200
assert "Test skill complete" in caplog.text
assert "Exception when running skill" not in caplog.text
@pytest.mark.asyncio
async def test_issue_edited(opsdroid, connector, mock_api, caplog):
caplog.set_level(logging.INFO)
@match_event(gitlab_events.GitlabIssueEdited)
async def test_skill(opsdroid, config, event):
url = "https://gitlab.com/FabioRosado/test-project/-/issues/1"
assert event.connector.name == "gitlab"
assert event.url == url
assert event.project == "test-project"
assert event.user == "FabioRosado"
assert event.title == "New test issue"
assert event.description == "This should have been filled"
assert event.labels == ["test-label"]
logging.getLogger(__name__).info("Test skill complete")
opsdroid.register_skill(test_skill, config={"name": "test"})
async with running_opsdroid(opsdroid):
resp = await call_endpoint(
opsdroid,
"/connector/gitlab",
"POST",
headers={
"X-Gitlab-Token": "secret-stuff!",
"Content-Type": "application/json",
},
data=get_webhook_payload("issue_message_edited.json"),
)
assert resp.status == 200
assert "Test skill complete" in caplog.text
assert "Exception when running skill" not in caplog.text
@pytest.mark.asyncio
async def test_generic_issue(opsdroid, connector, mock_api, caplog):
caplog.set_level(logging.INFO)
@match_event(gitlab_events.GenericIssueEvent)
async def test_skill(opsdroid, config, event):
url = "https://gitlab.com/FabioRosado/test-project/-/issues/1"
assert event.connector.name == "gitlab"
assert event.url == url
assert event.project == "test-project"
assert event.user == "FabioRosado"
assert event.title == "New test issue"
assert event.description == "This should have been filled"
assert event.labels == ["test-label"]
logging.getLogger(__name__).info("Test skill complete")
opsdroid.register_skill(test_skill, config={"name": "test"})
async with running_opsdroid(opsdroid):
resp = await call_endpoint(
opsdroid,
"/connector/gitlab",
"POST",
headers={
"X-Gitlab-Token": "secret-stuff!",
"Content-Type": "application/json",
},
data=get_webhook_payload("generic_issue.json"),
)
assert resp.status == 200
assert "Test skill complete" in caplog.text
assert "Exception when running skill" not in caplog.text
@pytest.mark.asyncio
async def test_no_token_returns_401(opsdroid, connector, mock_api, caplog):
caplog.set_level(logging.INFO)
@match_event(gitlab_events.GitlabIssueCreated)
async def test_skill(opsdroid, config, event):
url = "https://gitlab.com/FabioRosado/test-project/-/issues/1"
assert event.connector.name == "gitlab"
assert event.url == url
assert event.project == "test-project"
assert event.user == "FabioRosado"
assert event.title == "New test issue"
assert event.description == "Test description"
assert event.labels == ["test-label"]
logging.getLogger(__name__).info("Test skill complete")
opsdroid.register_skill(test_skill, config={"name": "test"})
async with running_opsdroid(opsdroid):
resp = await call_endpoint(
opsdroid,
"/connector/gitlab",
"POST",
headers={
"Content-Type": "application/json",
},
data=get_webhook_payload("issue_created.json"),
)
assert resp.status == 401
assert "Test skill complete" not in caplog.text
assert "Exception when running skill" not in caplog.text
@pytest.mark.asyncio
async def test_issue_closed(opsdroid, connector, mock_api, caplog):
caplog.set_level(logging.INFO)
@match_event(gitlab_events.GitlabIssueClosed)
async def test_skill(opsdroid, config, event):
url = "https://gitlab.com/FabioRosado/test-project/-/issues/1"
assert event.connector.name == "gitlab"
assert event.url == url
assert event.project == "test-project"
assert event.user == "FabioRosado"
assert event.title == "New test issue"
assert event.description == "This should have been filled"
assert event.labels == ["test-label"]
logging.getLogger(__name__).info("Test skill complete")
opsdroid.register_skill(test_skill, config={"name": "test"})
async with running_opsdroid(opsdroid):
resp = await call_endpoint(
opsdroid,
"/connector/gitlab",
"POST",
headers={
"X-Gitlab-Token": "secret-stuff!",
"Content-Type": "application/json",
},
data=get_webhook_payload("issue_closed.json"),
)
assert resp.status == 200
assert "Test skill complete" in caplog.text
assert "Exception when running skill" not in caplog.text
@pytest.mark.asyncio
async def test_generic_issue_event(opsdroid, connector, mock_api, caplog):
caplog.set_level(logging.INFO)
@match_event(gitlab_events.GenericGitlabEvent)
async def test_skill(opsdroid, config, event):
url = "http://example.com/mike/diaspora"
assert event.connector.name == "gitlab"
assert event.url == url
assert event.project == "Diaspora"
assert event.user == "jsmith"
assert event.title is None
assert event.description is None
assert event.labels == []
logging.getLogger(__name__).info("Test skill complete")
opsdroid.register_skill(test_skill, config={"name": "test"})
async with running_opsdroid(opsdroid):
resp = await call_endpoint(
opsdroid,
"/connector/gitlab",
"POST",
headers={
"X-Gitlab-Token": "<PASSWORD>!",
"Content-Type": "application/json",
},
data=get_webhook_payload("push.json"),
)
assert resp.status == 200
assert "Test skill complete" in caplog.text
assert "Exception when running skill" not in caplog.text
@pytest.mark.asyncio
async def test_bad_json_file(opsdroid, connector, mock_api, caplog):
caplog.set_level(logging.DEBUG)
@match_event(gitlab_events.GenericGitlabEvent)
async def test_skill(opsdroid, config, event):
url = "http://example.com/mike/diaspora"
assert event.connector.name == "gitlab"
assert event.url == url
assert event.project == "Diaspora"
assert event.user == "jsmith"
assert event.title is None
assert event.description is None
assert event.labels == []
logging.getLogger(__name__).info("Test skill complete")
opsdroid.register_skill(test_skill, config={"name": "test"})
async with running_opsdroid(opsdroid):
resp = await call_endpoint(
opsdroid,
"/connector/gitlab",
"POST",
headers={
"X-Gitlab-Token": "secret-stuff!",
"Content-Type": "application/json",
},
data=get_webhook_payload("bad_json.json"),
)
assert resp.status == 400
assert "Unable to decode json" in caplog.text
assert "Test skill complete" not in caplog.text
assert "Exception when running skill" not in caplog.text
@pytest.mark.asyncio
async def test_mr_label_update_event(opsdroid, connector, mock_api, caplog):
caplog.set_level(logging.INFO)
@match_event(gitlab_events.MRLabeled)
async def test_skill(opsdroid, config, event):
url = "https://gitlab.com/FabioRosado/test-project/-/merge_requests/1"
assert event.connector.name == "gitlab"
assert event.url == url
assert event.project == "test-project"
assert event.user == "FabioRosado"
assert event.title == "Test MR"
assert event.description == ""
assert event.labels == ["blah"]
logging.getLogger(__name__).info("Test skill complete")
opsdroid.register_skill(test_skill, config={"name": "test"})
async with running_opsdroid(opsdroid):
resp = await call_endpoint(
opsdroid,
"/connector/gitlab",
"POST",
headers={
"X-Gitlab-Token": "secret-stuff!",
"Content-Type": "application/json",
},
data=get_webhook_payload("mr_label_update.json"),
)
assert resp.status == 200
assert "Test skill complete" in caplog.text
assert "Exception when running skill" not in caplog.text
@pytest.mark.asyncio
async def test_mr_opened_event(opsdroid, connector, mock_api, caplog):
caplog.set_level(logging.INFO)
@match_event(gitlab_events.MRCreated)
async def test_skill(opsdroid, config, event):
url = "https://gitlab.com/FabioRosado/test-project/-/merge_requests/1"
assert event.connector.name == "gitlab"
assert event.url == url
assert event.project == "test-project"
assert event.user == "FabioRosado"
assert event.title == "Test MR"
assert event.description == ""
assert event.labels == []
logging.getLogger(__name__).info("Test skill complete")
opsdroid.register_skill(test_skill, config={"name": "test"})
async with running_opsdroid(opsdroid):
resp = await call_endpoint(
opsdroid,
"/connector/gitlab",
"POST",
headers={
"X-Gitlab-Token": "<PASSWORD>!",
"Content-Type": "application/json",
},
data=get_webhook_payload("mr_opened.json"),
)
assert resp.status == 200
assert "Test skill complete" in caplog.text
assert "Exception when running skill" not in caplog.text
@pytest.mark.asyncio
async def test_mr_merged_event(opsdroid, connector, mock_api, caplog):
caplog.set_level(logging.INFO)
@match_event(gitlab_events.MRMerged)
async def test_skill(opsdroid, config, event):
url = "https://gitlab.com/FabioRosado/test-project/-/merge_requests/1"
assert event.connector.name == "gitlab"
assert event.url == url
assert event.project == "test-project"
assert event.user == "FabioRosado"
assert event.title == "Test MR"
assert event.description == ""
assert event.labels == ["blah"]
logging.getLogger(__name__).info("Test skill complete")
opsdroid.register_skill(test_skill, config={"name": "test"})
async with running_opsdroid(opsdroid):
resp = await call_endpoint(
opsdroid,
"/connector/gitlab",
"POST",
headers={
"X-Gitlab-Token": "<PASSWORD>!",
"Content-Type": "application/json",
},
data=get_webhook_payload("mr_merged.json"),
)
assert resp.status == 200
assert "Test skill complete" in caplog.text
| |
from ..dojo_test_case import DojoTestCase, get_unit_tests_path
from dojo.models import Test, Engagement, Product
from dojo.tools.sonarqube.parser import SonarQubeParser
class TestSonarQubeParser(DojoTestCase):
# comment out to get full diff with big reports
# maxDiff = None
def init(self, reportFilename):
my_file_handle = open(reportFilename)
product = Product()
engagement = Engagement()
test = Test()
engagement.product = product
test.engagement = engagement
return my_file_handle, product, engagement, test
# SonarQube Scan - no finding
def test_file_name_aggregated_parse_file_with_no_vulnerabilities_has_no_findings(
self,
):
my_file_handle, product, engagement, test = self.init(
get_unit_tests_path() + "/scans/sonarqube/sonar-no-finding.html"
)
parser = SonarQubeParser()
findings = parser.get_findings(my_file_handle, test)
self.assertEqual(0, len(findings))
# SonarQube Scan detailed - no finding
def test_detailed_parse_file_with_no_vulnerabilities_has_no_findings(self):
my_file_handle, product, engagement, test = self.init(
get_unit_tests_path() + "/scans/sonarqube/sonar-no-finding.html"
)
parser = SonarQubeParser()
parser.set_mode('detailed')
findings = parser.get_findings(my_file_handle, test)
self.assertEqual(0, len(findings))
# SonarQube Scan - report with one vuln
def test_file_name_aggregated_parse_file_with_single_vulnerability_has_single_finding(
self,
):
my_file_handle, product, engagement, test = self.init(
get_unit_tests_path() + "/scans/sonarqube/sonar-single-finding.html"
)
parser = SonarQubeParser()
findings = parser.get_findings(my_file_handle, test)
# common verifications
self.assertEqual(1, len(findings))
# specific verifications
item = findings[0]
self.assertEqual(str, type(item.description))
self.assertMultiLineEqual(
"Because it is easy to extract strings from a compiled application, credentials should never be hard-coded. Do so, and they're almost guaranteed to\n"
"end up in the hands of an attacker. This is particularly true for applications that are distributed.\n"
"Credentials should be stored outside of the code in a strongly-protected encrypted configuration file or database.\n"
'It\'s recommended to customize the configuration of this rule with additional credential words such as "oauthToken", "secret", ...\n'
"**Noncompliant Code Example**\n"
"\n"
"Connection conn = null;\n"
"try {\n"
' conn = DriverManager.getConnection("jdbc:mysql://localhost/test?" +\n'
' "user=steve&password=<PASSWORD>"); // Noncompliant\n'
' String uname = "steve";\n'
' String password = "<PASSWORD>";\n'
' conn = DriverManager.getConnection("jdbc:mysql://localhost/test?" +\n'
' "user=" + uname + "&password=" + password); // Noncompliant\n'
"\n"
' java.net.PasswordAuthentication pa = new java.net.PasswordAuthentication("userName", "<PASSWORD>".toCharArray()); // Noncompliant\n'
"\n"
"**Compliant Solution**\n"
"\n"
"Connection conn = null;\n"
"try {\n"
" String uname = getEncryptedUser();\n"
" String password = getEncryptedPass();\n"
' conn = DriverManager.getConnection("jdbc:mysql://localhost/test?" +\n'
' "user=" + uname + "&password=" + password);\n'
"\n"
"-----\n"
"Occurences:\n"
"Line: 66",
item.description,
)
self.assertIsNone(item.line)
self.assertIsNone(item.unique_id_from_tool)
self.assertEqual(int, type(item.nb_occurences))
self.assertEqual(1, item.nb_occurences)
def test_detailed_parse_file_with_single_vulnerability_has_single_finding(self):
my_file_handle, product, engagement, test = self.init(
get_unit_tests_path() + "/scans/sonarqube/sonar-single-finding.html"
)
parser = SonarQubeParser()
parser.set_mode('detailed')
findings = parser.get_findings(my_file_handle, test)
# common verifications
self.assertEqual(1, len(findings))
# specific verifications
item = findings[0]
self.assertEqual(str, type(item.description))
self.assertMultiLineEqual(
"Because it is easy to extract strings from a compiled application, credentials should never be hard-coded. Do so, and they're almost guaranteed to\n"
"end up in the hands of an attacker. This is particularly true for applications that are distributed.\n"
"Credentials should be stored outside of the code in a strongly-protected encrypted configuration file or database.\n"
'It\'s recommended to customize the configuration of this rule with additional credential words such as "oauthToken", "secret", ...\n'
"**Noncompliant Code Example**\n"
"\n"
"Connection conn = null;\n"
"try {\n"
' conn = DriverManager.getConnection("jdbc:mysql://localhost/test?" +\n'
' "user=steve&password=<PASSWORD>"); // Noncompliant\n'
' String uname = "steve";\n'
' String password = "<PASSWORD>";\n'
' conn = DriverManager.getConnection("jdbc:mysql://localhost/test?" +\n'
' "user=" + uname + "&password=" + password); // Noncompliant\n'
"\n"
' java.net.PasswordAuthentication pa = new java.net.PasswordAuthentication("userName", "1234".toCharArray()); // Noncompliant\n'
"\n"
"**Compliant Solution**\n"
"\n"
"Connection conn = null;\n"
"try {\n"
" String uname = getEncryptedUser();\n"
" String password = get<PASSWORD>();\n"
' conn = DriverManager.getConnection("jdbc:mysql://localhost/test?" +\n'
' "user=" + uname + "&password=" + password);',
item.description,
)
self.assertEqual(str, type(item.line))
self.assertEqual("66", item.line)
self.assertEqual(str, type(item.unique_id_from_tool))
self.assertEqual("AWK40IMu-pl6AHs22MnV", item.unique_id_from_tool)
def check_parse_file_with_single_vulnerability_has_single_finding(self):
self.assertEqual(1, len(findings))
# check content
item = findings[0]
self.assertEqual(str, type(findings[0].title))
self.assertEqual("Credentials should not be hard-coded", item.title)
self.assertEqual(int, type(item.cwe))
# This is only the first CWE in the list!
self.assertEqual(798, item.cwe)
self.assertEqual(bool, type(item.active))
self.assertEqual(False, item.active)
self.assertEqual(bool, type(item.verified))
self.assertEqual(False, item.verified)
self.assertEqual(str, type(item.severity))
self.assertEqual("Critical", item.severity)
self.assertEqual(str, type(item.mitigation))
self.assertEqual(
"'PASSWORD' detected in this expression, review this potentially hardcoded credential.",
item.mitigation,
)
self.assertEqual(str, type(item.references))
self.assertMultiLineEqual(
"squid:S2068\n"
"OWASP Top 10 2017 Category A2\n"
"MITRE, CWE-798\n"
"MITRE, CWE-259\n"
"CERT, MSC03-J.\n"
"SANS Top 25\n"
"Hard Coded Password",
item.references,
)
self.assertEqual(str, type(item.file_path))
self.assertEqual(
"modules/jdbc-pool/src/main/java/org/apache/tomcat/jdbc/pool/DataSourceFactory.java",
item.file_path,
)
self.assertEqual(bool, type(item.static_finding))
self.assertEqual(True, item.static_finding)
self.assertEqual(bool, type(item.dynamic_finding))
self.assertEqual(False, item.dynamic_finding)
def test_detailed_parse_file_with_multiple_vulnerabilities_has_multiple_findings(
self,
):
my_file_handle, product, engagement, test = self.init(
get_unit_tests_path() + "/scans/sonarqube/sonar-6-findings.html"
)
parser = SonarQubeParser()
parser.set_mode('detailed')
findings = parser.get_findings(my_file_handle, test)
# common verifications
self.assertEqual(6, len(findings))
def test_file_name_aggregated_parse_file_with_multiple_vulnerabilities_has_multiple_findings(
self,
):
my_file_handle, product, engagement, test = self.init(
get_unit_tests_path() + "/scans/sonarqube/sonar-6-findings.html"
)
parser = SonarQubeParser()
parser.set_mode('detailed')
findings = parser.get_findings(my_file_handle, test)
# common verifications
# (there is no aggregation to be done here)
self.assertEqual(6, len(findings))
def test_detailed_parse_file_with_table_in_table(self):
"""Test parsing when the vulnerability details include a table, with tr and td that should be ignored when looking for list of rules"""
my_file_handle, product, engagement, test = self.init(
get_unit_tests_path() + "/scans/sonarqube/sonar-table-in-table.html"
)
parser = SonarQubeParser()
parser.set_mode('detailed')
findings = parser.get_findings(my_file_handle, test)
self.assertEqual(1, len(findings))
# check content
item = findings[0]
self.assertEqual(str, type(findings[0].title))
self.assertEqual('"clone" should not be overridden', item.title)
self.assertEqual(int, type(item.cwe))
self.assertEqual(0, item.cwe)
self.assertEqual(bool, type(item.active))
self.assertEqual(True, item.active)
self.assertEqual(bool, type(item.verified))
self.assertEqual(True, item.verified)
self.assertEqual(str, type(item.description))
self.assertMultiLineEqual(
"Many consider clone and Cloneable broken in Java, largely because the rules for overriding clone are tricky\n"
"and difficult to get right, according to <NAME>:\n"
"\n"
" Object's clone method is very tricky. It's based on field copies, and it's \"extra-linguistic.\" It creates an object without calling a constructor.\n"
" There are no guarantees that it preserves the invariants established by the constructors. There have been lots of bugs over the years, both in and\n"
" outside Sun, stemming from the fact that if you just call super.clone repeatedly up the chain until you have cloned an object, you have a shallow\n"
" copy of the object. The clone generally shares state with the object being cloned. If that state is mutable, you don't have two independent objects.\n"
" If you modify one, the other changes as well. And all of a sudden, you get random behavior.\n"
"\n"
"A copy constructor or copy factory should be used instead.\n"
"This rule raises an issue when clone is overridden, whether or not Cloneable is implemented.\n"
"**Noncompliant Code Example**\n"
"\n"
"public class MyClass {\n"
" // ...\n"
"\n"
" public Object clone() { // Noncompliant\n"
" //...\n"
" }\n"
"}\n"
"\n"
"**Compliant Solution**\n"
"\n"
"public class MyClass {\n"
" // ...\n"
"\n"
" MyClass (MyClass source) {\n"
" //...\n"
" }\n"
"}",
item.description,
)
self.assertEqual(str, type(item.severity))
self.assertEqual("Critical", item.severity)
self.assertEqual(str, type(item.mitigation))
self.assertEqual(
'Remove this "clone" implementation; use a copy constructor or copy factory instead.',
item.mitigation,
)
self.assertEqual(str, type(item.references))
self.assertMultiLineEqual(
"squid:S2975\n" "Copy Constructor versus Cloning\n" "S2157\n" "S1182",
item.references,
)
self.assertEqual(str, type(item.file_path))
self.assertEqual(
"java/org/apache/catalina/util/URLEncoder.java", item.file_path
)
self.assertEqual(str, type(item.line))
self.assertEqual("190", item.line)
self.assertEqual(str, type(item.unique_id_from_tool))
self.assertEqual("AWK40IMu-pl6AHs22MnV", item.unique_id_from_tool)
self.assertEqual(bool, type(item.static_finding))
self.assertEqual(True, item.static_finding)
self.assertEqual(bool, type(item.dynamic_finding))
self.assertEqual(False, item.dynamic_finding)
def test_detailed_parse_file_with_rule_undefined(self):
"""the vulnerability's rule is not in the list of rules"""
my_file_handle, product, engagement, test = self.init(
get_unit_tests_path() + "/scans/sonarqube/sonar-rule-undefined.html"
)
parser = SonarQubeParser()
parser.set_mode('detailed')
findings = parser.get_findings(my_file_handle, test)
self.assertEqual(1, len(findings))
# check content
item = findings[0]
self.assertEqual(str, type(findings[0].title))
self.assertEqual('"clone" should not be overridden', item.title)
self.assertEqual(int, type(item.cwe))
# no rule found -> 0
self.assertEqual(0, item.cwe)
self.assertEqual(bool, type(item.active))
self.assertEqual(True, item.active)
self.assertEqual(bool, type(item.verified))
self.assertEqual(True, item.verified)
self.assertEqual(str, type(item.description))
self.assertEqual("No description provided", item.description)
self.assertEqual(str, type(item.severity))
self.assertEqual("Critical", item.severity)
self.assertEqual(str, type(item.mitigation))
self.assertEqual(
'Remove this "clone" implementation; use a copy constructor or copy factory instead.',
item.mitigation,
)
self.assertEqual(str, type(item.references))
self.assertEqual("", item.references)
self.assertEqual(str, type(item.file_path))
self.assertEqual(
"java/org/apache/catalina/util/URLEncoder.java", item.file_path
)
self.assertEqual(str, type(item.line))
self.assertEqual("190", item.line)
self.assertEqual(str, type(item.unique_id_from_tool))
self.assertEqual("AWK40IMu-pl6AHs22MnV", item.unique_id_from_tool)
self.assertEqual(bool, type(item.static_finding))
self.assertEqual(True, item.static_finding)
self.assertEqual(bool, type(item.dynamic_finding))
self.assertEqual(False, item.dynamic_finding)
# SonarQube Scan - report with aggregations to be made
def test_file_name_aggregated_parse_file_with_vuln_on_same_filename(self):
my_file_handle, product, engagement, test = self.init(
get_unit_tests_path() + "/scans/sonarqube/sonar-4-findings-3-to-aggregate.html"
)
parser = SonarQubeParser()
findings = parser.get_findings(my_file_handle, test)
# specific verifications
self.assertEqual(2, len(findings))
# checking both items because they aren't always in the same order
item1 = findings[0]
item2 = findings[1]
if item1.nb_occurences == 3:
aggregatedItem = item1
# there is nothing to aggregate on the other finding
self.assertEqual(int, type(item2.nb_occurences))
self.assertEqual(1, item2.nb_occurences)
elif item2.nb_occurences == 3:
aggregatedItem = item2
# there is nothing to aggregate on the other finding
self.assertEqual(int, type(item1.nb_occurences))
self.assertEqual(1, item1.nb_occurences)
else:
self.fail("cannot find aggregated item")
self.assertEqual(str, type(aggregatedItem.description))
self.assertMultiLineEqual(
"Because it is easy to extract strings from a compiled application, credentials should never be hard-coded. Do | |
# -*- test-case-name: twistedcaldav.directory.test.test_calendar -*-
##
# Copyright (c) 2005-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
CalDAV scheduling resources.
"""
__all__ = [
"ScheduleInboxResource",
"ScheduleOutboxResource",
"deliverSchedulePrivilegeSet",
]
from twistedcaldav.config import config
# _schedulePrivilegeSet implicitly depends on config being initialized. The
# following line is wrong because _schedulePrivilegeSet won't actually use the
# config file, it will pick up stdconfig whenever it is imported, so this works
# around that for now.
__import__("twistedcaldav.stdconfig") # FIXME
from txweb2 import responsecode
from txweb2.dav.http import ErrorResponse, MultiStatusResponse
from txweb2.dav.resource import davPrivilegeSet
from txweb2.dav.util import joinURL, normalizeURL
from txweb2.http import HTTPError
from twisted.internet.defer import inlineCallbacks, returnValue, succeed
from twisted.python.failure import Failure
from twistedcaldav import caldavxml, customxml
from twistedcaldav.caldavxml import caldav_namespace, CalendarFreeBusySet
from twistedcaldav.customxml import calendarserver_namespace
from twistedcaldav.ical import Component, allowedSchedulingComponents
from twistedcaldav.resource import CalDAVResource
from twistedcaldav.resource import isCalendarCollectionResource
from txdav.caldav.datastore.scheduling.caldav.scheduler import CalDAVScheduler
from txdav.caldav.icalendarstore import InvalidDefaultCalendar
from txdav.xml import element as davxml
from txdav.xml.rfc2518 import HRef
def _schedulePrivilegeSet(deliver):
edited = False
top_supported_privileges = []
for supported_privilege in davPrivilegeSet.childrenOfType(davxml.SupportedPrivilege):
all_privilege = supported_privilege.childOfType(davxml.Privilege)
if isinstance(all_privilege.children[0], davxml.All):
all_description = supported_privilege.childOfType(davxml.Description)
all_supported_privileges = list(supported_privilege.childrenOfType(davxml.SupportedPrivilege))
all_supported_privileges.append(
davxml.SupportedPrivilege(
davxml.Privilege(caldavxml.ScheduleDeliver() if deliver else caldavxml.ScheduleSend()),
davxml.Description("schedule privileges for current principal", **{"xml:lang": "en"}),
),
)
if config.Scheduling.CalDAV.OldDraftCompatibility:
all_supported_privileges.append(
davxml.SupportedPrivilege(
davxml.Privilege(caldavxml.Schedule()),
davxml.Description("old-style schedule privileges for current principal", **{"xml:lang": "en"}),
),
)
top_supported_privileges.append(
davxml.SupportedPrivilege(all_privilege, all_description, *all_supported_privileges)
)
edited = True
else:
top_supported_privileges.append(supported_privilege)
assert edited, "Structure of davPrivilegeSet changed in a way that I don't know how to extend for schedulePrivilegeSet"
return davxml.SupportedPrivilegeSet(*top_supported_privileges)
deliverSchedulePrivilegeSet = _schedulePrivilegeSet(True)
sendSchedulePrivilegeSet = _schedulePrivilegeSet(False)
class CalendarSchedulingCollectionResource (CalDAVResource):
"""
CalDAV principal resource.
Extends L{DAVResource} to provide CalDAV scheduling collection
functionality.
"""
def __init__(self, parent):
"""
@param parent: the parent resource of this one.
"""
assert parent is not None
super(CalendarSchedulingCollectionResource, self).__init__(principalCollections=parent.principalCollections())
self.parent = parent
def isCollection(self):
return True
def isCalendarCollection(self):
return False
def isPseudoCalendarCollection(self):
return True
def supportedReports(self):
result = super(CalDAVResource, self).supportedReports()
result.append(davxml.Report(caldavxml.CalendarQuery(),))
result.append(davxml.Report(caldavxml.CalendarMultiGet(),))
# free-busy report not allowed
if config.EnableSyncReport:
# Only allowed on calendar/inbox/addressbook collections
result.append(davxml.Report(davxml.SyncCollection(),))
return result
class ScheduleInboxResource (CalendarSchedulingCollectionResource):
"""
CalDAV schedule Inbox resource.
Extends L{DAVResource} to provide CalDAV functionality.
"""
def liveProperties(self):
return super(ScheduleInboxResource, self).liveProperties() + (
caldavxml.CalendarFreeBusySet.qname(),
caldavxml.ScheduleDefaultCalendarURL.qname(),
customxml.ScheduleDefaultTasksURL.qname(),
)
def dynamicProperties(self):
return super(ScheduleInboxResource, self).dynamicProperties() + (
customxml.CalendarAvailability.qname(),
)
def resourceType(self):
return davxml.ResourceType.scheduleInbox
def hasProperty(self, property, request):
"""
Need to special case calendar-free-busy-set for backwards compatibility.
"""
if type(property) is tuple:
qname = property
else:
qname = property.qname()
# Force calendar collections to always appear to have the property
if qname == caldavxml.CalendarFreeBusySet.qname():
return succeed(True)
elif qname == customxml.CalendarAvailability.qname():
return succeed(self.parent._newStoreHome.getAvailability() is not None)
else:
return super(ScheduleInboxResource, self).hasProperty(property, request)
@inlineCallbacks
def readProperty(self, property, request):
if type(property) is tuple:
qname = property
else:
qname = property.qname()
if qname == caldavxml.CalendarFreeBusySet.qname():
# Synthesize value for calendar transparency state
top = self.parent.url()
values = []
for cal in (yield self.parent._newStoreHome.calendars()):
if cal.isUsedForFreeBusy():
values.append(HRef(joinURL(top, cal.name()) + "/"))
returnValue(CalendarFreeBusySet(*values))
elif qname == customxml.CalendarAvailability.qname():
availability = self.parent._newStoreHome.getAvailability()
returnValue(customxml.CalendarAvailability.fromString(str(availability)) if availability else None)
elif qname in (caldavxml.ScheduleDefaultCalendarURL.qname(), customxml.ScheduleDefaultTasksURL.qname()):
result = (yield self.readDefaultCalendarProperty(request, qname))
returnValue(result)
result = (yield super(ScheduleInboxResource, self).readProperty(property, request))
returnValue(result)
@inlineCallbacks
def writeProperty(self, property, request):
assert isinstance(property, davxml.WebDAVElement)
# Strictly speaking CS:calendar-availability is a live property in the sense that the
# server enforces what can be stored, however it need not actually
# exist so we cannot list it in liveProperties on this resource, since its
# its presence there means that hasProperty will always return True for it.
if property.qname() == customxml.CalendarAvailability.qname():
if not property.valid():
raise HTTPError(ErrorResponse(
responsecode.CONFLICT,
(caldav_namespace, "valid-calendar-data"),
description="Invalid property"
))
yield self.parent._newStoreHome.setAvailability(property.calendar())
returnValue(None)
elif property.qname() == caldavxml.CalendarFreeBusySet.qname():
# Verify that the calendars added in the PROPPATCH are valid. We do not check
# whether existing items in the property are still valid - only new ones.
property.children = [davxml.HRef(normalizeURL(str(href))) for href in property.children]
new_calendars = set([str(href) for href in property.children])
old_calendars = set()
for cal in (yield self.parent._newStoreHome.calendars()):
if cal.isUsedForFreeBusy():
old_calendars.add(HRef(joinURL(self.parent.url(), cal.name())))
added_calendars = new_calendars.difference(old_calendars)
for href in added_calendars:
cal = (yield request.locateResource(str(href)))
if cal is None or not cal.exists() or not isCalendarCollectionResource(cal):
# Validate that href's point to a valid calendar.
raise HTTPError(ErrorResponse(
responsecode.CONFLICT,
(caldav_namespace, "valid-calendar-url"),
"Invalid URI",
))
# Remove old ones
for href in old_calendars.difference(new_calendars):
cal = (yield request.locateResource(str(href)))
if cal is not None and cal.exists() and isCalendarCollectionResource(cal) and cal._newStoreObject.isUsedForFreeBusy():
yield cal._newStoreObject.setUsedForFreeBusy(False)
# Add new ones
for href in new_calendars:
cal = (yield request.locateResource(str(href)))
if cal is not None and cal.exists() and isCalendarCollectionResource(cal) and not cal._newStoreObject.isUsedForFreeBusy():
yield cal._newStoreObject.setUsedForFreeBusy(True)
returnValue(None)
elif property.qname() in (caldavxml.ScheduleDefaultCalendarURL.qname(), customxml.ScheduleDefaultTasksURL.qname()):
yield self.writeDefaultCalendarProperty(request, property)
returnValue(None)
yield super(ScheduleInboxResource, self).writeProperty(property, request)
@inlineCallbacks
def removeProperty(self, property, request):
if type(property) is tuple:
qname = property
else:
qname = property.qname()
if qname == customxml.CalendarAvailability.qname():
yield self.parent._newStoreHome.setAvailability(None)
returnValue(None)
result = (yield super(ScheduleInboxResource, self).removeProperty(property, request))
returnValue(result)
@inlineCallbacks
def readDefaultCalendarProperty(self, request, qname):
"""
Read either the default VEVENT or VTODO calendar property. Try to pick one if not present.
"""
tasks = qname == customxml.ScheduleDefaultTasksURL.qname()
componentType = "VTODO" if tasks else "VEVENT"
prop_to_set = customxml.ScheduleDefaultTasksURL if tasks else caldavxml.ScheduleDefaultCalendarURL
# This property now comes direct from the calendar home new store object
default = (yield self.parent._newStoreHome.defaultCalendar(componentType, create=False))
if default is None:
returnValue(prop_to_set())
else:
defaultURL = joinURL(self.parent.url(), default.name())
returnValue(prop_to_set(davxml.HRef(defaultURL)))
@inlineCallbacks
def writeDefaultCalendarProperty(self, request, property):
"""
Write either the default VEVENT or VTODO calendar property, validating and canonicalizing the value
"""
if property.qname() == caldavxml.ScheduleDefaultCalendarURL.qname():
ctype = "VEVENT"
error_element = (caldav_namespace, "valid-schedule-default-calendar-URL")
elif property.qname() == customxml.ScheduleDefaultTasksURL.qname():
ctype = "VTODO"
error_element = (calendarserver_namespace, "valid-schedule-default-tasks-URL")
else:
returnValue(None)
# Verify that the calendar added in the PROPPATCH is valid.
property.children = [davxml.HRef(normalizeURL(str(href))) for href in property.children]
new_calendar = [str(href) for href in property.children]
cal = None
if len(new_calendar) == 1:
cal = (yield request.locateResource(str(new_calendar[0])))
else:
raise HTTPError(ErrorResponse(
responsecode.BAD_REQUEST,
error_element,
"Invalid HRef in property",
))
if cal is None or not cal.exists():
raise HTTPError(ErrorResponse(
responsecode.BAD_REQUEST,
error_element,
"HRef is not a valid calendar",
))
try:
# Now set it on the new store object
yield self.parent._newStoreHome.setDefaultCalendar(cal._newStoreObject, ctype)
except InvalidDefaultCalendar as e:
raise HTTPError(ErrorResponse(
responsecode.CONFLICT,
error_element,
str(e),
))
@inlineCallbacks
def defaultCalendar(self, request, componentType):
"""
Find the default calendar for the supplied iCalendar component type. If one does
not exist, automatically provision it.
"""
# This property now comes direct from the calendar home new store object
default = (yield self.parent._newStoreHome.defaultCalendar(componentType, create=False))
# Need L{DAVResource} object to return not new store object
if default is not None:
default = (yield request.locateResource(joinURL(self.parent.url(), default.name())))
returnValue(default)
##
# ACL
##
def supportedPrivileges(self, request):
return succeed(deliverSchedulePrivilegeSet)
def defaultAccessControlList(self):
privs = (
davxml.Privilege(caldavxml.ScheduleDeliver()),
)
if config.Scheduling.CalDAV.OldDraftCompatibility:
privs += (davxml.Privilege(caldavxml.Schedule()),)
return succeed(
davxml.ACL(
# CalDAV:schedule-deliver for any authenticated user
davxml.ACE(
davxml.Principal(davxml.Authenticated()),
davxml.Grant(*privs),
),
)
)
class ScheduleOutboxResource (CalendarSchedulingCollectionResource):
"""
CalDAV schedule Outbox resource.
Extends L{DAVResource} to provide CalDAV functionality.
"""
def resourceType(self):
return davxml.ResourceType.scheduleOutbox
def getSupportedComponentSet(self):
return caldavxml.SupportedCalendarComponentSet(
*[caldavxml.CalendarComponent(name=item) for item in allowedSchedulingComponents]
)
@inlineCallbacks
def http_POST(self, request):
"""
The CalDAV POST method.
This uses a generator function yielding either L{waitForDeferred} objects or L{Response} objects.
This allows for code that follows a 'linear' execution pattern rather than having to use nested
L{Deferred} callbacks. The logic is easier to follow this way plus we don't run into deep nesting
issues which the other approach would have with large numbers of recipients.
"""
# Check authentication and access controls
yield self.authorize(request, (caldavxml.ScheduleSend(),))
calendar, format = (yield self.loadCalendarFromRequest(request))
originator = (yield self.loadOriginatorFromRequestDetails(request))
recipients = self.loadRecipientsFromCalendarData(calendar)
# Log extended item
if not hasattr(request, "extendedLogItems"):
request.extendedLogItems = {}
# This is a local CALDAV scheduling operation.
scheduler = CalDAVScheduler(self._associatedTransaction, self.parent._newStoreHome.uid(), logItems=request.extendedLogItems)
# Do the POST processing treating
result = (yield scheduler.doSchedulingViaPOST(originator, recipients, calendar))
returnValue(result.response(format=format))
def determineType(self, content_type):
"""
Determine if the supplied content-type is valid for storing and return the matching PyCalendar type.
"""
format = None
if | |
<reponame>bhuztez/sqlite3ct
from functools import wraps
from inspect import signature, Signature
from ctypes import (
PyDLL, pythonapi, CFUNCTYPE, POINTER, Structure, cast, byref,
c_ubyte, c_int, c_uint, c_double,
c_char_p, c_void_p, string_at)
import sys
libsqlite = None
if sys.platform == 'win32':
libsqlite = PyDLL("C:/ProgramData/chocolatey/lib/SQLite/tools/sqlite3.dll")
else:
import _sqlite3
libsqlite = PyDLL(_sqlite3.__file__)
def annotate(func):
sig = signature(func)
cfunc = getattr(libsqlite, func.__name__)
argtypes = [p.annotation for p in sig.parameters.values()]
cfunc.argtypes = argtypes
if sig.return_annotation is Signature.empty:
cfunc.restype = None
else:
cfunc.restype = sig.return_annotation
return cfunc
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
# Run-Time Library Version Numbers
@annotate
def sqlite3_libversion() -> c_char_p:
pass
@annotate
def sqlite3_sourceid() -> c_char_p:
pass
@annotate
def sqlite3_libversion_number() -> c_int:
pass
# Database Connection Handle
sqlite3_p = c_void_p
# Run-Time Library Compilation Options Diagnostics
@annotate
def sqlite3_compileoption_used(zOptName: c_char_p) -> c_int:
pass
@annotate
def sqlite3_compileoption_get(n: c_int) -> c_char_p:
pass
# Test To See If The Library Is Threadsafe
@annotate
def sqlite3_threadsafe() -> c_int:
pass
# 64-Bit Integer Types
from ctypes import c_int64 as sqlite_int64, c_uint64 as sqlite_uint64
sqlite3_int64 = sqlite_int64
sqlite3_uint64 = sqlite_uint64
# Closing A Database Connection
@annotate
def sqlite3_close(db: sqlite3_p) -> c_int:
pass
@annotate
def sqlite3_close_v2(db: sqlite3_p) -> c_int:
pass
# Result Codes
SQLITE_OK = 0 # Successful result
# beginning-of-error-codes
SQLITE_ERROR = 1 # Generic error
SQLITE_INTERNAL = 2 # Internal logic error in SQLite
SQLITE_PERM = 3 # Access permission denied
SQLITE_ABORT = 4 # Callback routine requested an abort
SQLITE_BUSY = 5 # The database file is locked
SQLITE_LOCKED = 6 # A table in the database is locked
SQLITE_NOMEM = 7 # A malloc() failed
SQLITE_READONLY = 8 # Attempt to write a readonly database
SQLITE_INTERRUPT = 9 # Operation terminated by sqlite3_interrupt()*/
SQLITE_IOERR = 10 # Some kind of disk I/O error occurred
SQLITE_CORRUPT = 11 # The database disk image is malformed
SQLITE_NOTFOUND = 12 # Unknown opcode in sqlite3_file_control()
SQLITE_FULL = 13 # Insertion failed because database is full
SQLITE_CANTOPEN = 14 # Unable to open the database file
SQLITE_PROTOCOL = 15 # Database lock protocol error
SQLITE_EMPTY = 16 # Internal use only
SQLITE_SCHEMA = 17 # The database schema changed
SQLITE_TOOBIG = 18 # String or BLOB exceeds size limit
SQLITE_CONSTRAINT = 19 # Abort due to constraint violation
SQLITE_MISMATCH = 20 # Data type mismatch
SQLITE_MISUSE = 21 # Library used incorrectly
SQLITE_NOLFS = 22 # Uses OS features not supported on host
SQLITE_AUTH = 23 # Authorization denied
SQLITE_FORMAT = 24 # Not used
SQLITE_RANGE = 25 # 2nd parameter to sqlite3_bind out of range
SQLITE_NOTADB = 26 # File opened that is not a database file
SQLITE_NOTICE = 27 # Notifications from sqlite3_log()
SQLITE_WARNING = 28 # Warnings from sqlite3_log()
SQLITE_ROW = 100 # sqlite3_step() has another row ready
SQLITE_DONE = 101 # sqlite3_step() has finished executing
# end-of-error-codes
# Extended Result Codes
SQLITE_ERROR_MISSING_COLLSEQ = (SQLITE_ERROR | (1<<8))
SQLITE_ERROR_RETRY = (SQLITE_ERROR | (2<<8))
SQLITE_ERROR_SNAPSHOT = (SQLITE_ERROR | (3<<8))
SQLITE_IOERR_READ = (SQLITE_IOERR | (1<<8))
SQLITE_IOERR_SHORT_READ = (SQLITE_IOERR | (2<<8))
SQLITE_IOERR_WRITE = (SQLITE_IOERR | (3<<8))
SQLITE_IOERR_FSYNC = (SQLITE_IOERR | (4<<8))
SQLITE_IOERR_DIR_FSYNC = (SQLITE_IOERR | (5<<8))
SQLITE_IOERR_TRUNCATE = (SQLITE_IOERR | (6<<8))
SQLITE_IOERR_FSTAT = (SQLITE_IOERR | (7<<8))
SQLITE_IOERR_UNLOCK = (SQLITE_IOERR | (8<<8))
SQLITE_IOERR_RDLOCK = (SQLITE_IOERR | (9<<8))
SQLITE_IOERR_DELETE = (SQLITE_IOERR | (10<<8))
SQLITE_IOERR_BLOCKED = (SQLITE_IOERR | (11<<8))
SQLITE_IOERR_NOMEM = (SQLITE_IOERR | (12<<8))
SQLITE_IOERR_ACCESS = (SQLITE_IOERR | (13<<8))
SQLITE_IOERR_CHECKRESERVEDLOCK = (SQLITE_IOERR | (14<<8))
SQLITE_IOERR_LOCK = (SQLITE_IOERR | (15<<8))
SQLITE_IOERR_CLOSE = (SQLITE_IOERR | (16<<8))
SQLITE_IOERR_DIR_CLOSE = (SQLITE_IOERR | (17<<8))
SQLITE_IOERR_SHMOPEN = (SQLITE_IOERR | (18<<8))
SQLITE_IOERR_SHMSIZE = (SQLITE_IOERR | (19<<8))
SQLITE_IOERR_SHMLOCK = (SQLITE_IOERR | (20<<8))
SQLITE_IOERR_SHMMAP = (SQLITE_IOERR | (21<<8))
SQLITE_IOERR_SEEK = (SQLITE_IOERR | (22<<8))
SQLITE_IOERR_DELETE_NOENT = (SQLITE_IOERR | (23<<8))
SQLITE_IOERR_MMAP = (SQLITE_IOERR | (24<<8))
SQLITE_IOERR_GETTEMPPATH = (SQLITE_IOERR | (25<<8))
SQLITE_IOERR_CONVPATH = (SQLITE_IOERR | (26<<8))
SQLITE_IOERR_VNODE = (SQLITE_IOERR | (27<<8))
SQLITE_IOERR_AUTH = (SQLITE_IOERR | (28<<8))
SQLITE_IOERR_BEGIN_ATOMIC = (SQLITE_IOERR | (29<<8))
SQLITE_IOERR_COMMIT_ATOMIC = (SQLITE_IOERR | (30<<8))
SQLITE_IOERR_ROLLBACK_ATOMIC = (SQLITE_IOERR | (31<<8))
SQLITE_LOCKED_SHAREDCACHE = (SQLITE_LOCKED | (1<<8))
SQLITE_LOCKED_VTAB = (SQLITE_LOCKED | (2<<8))
SQLITE_BUSY_RECOVERY = (SQLITE_BUSY | (1<<8))
SQLITE_BUSY_SNAPSHOT = (SQLITE_BUSY | (2<<8))
SQLITE_CANTOPEN_NOTEMPDIR = (SQLITE_CANTOPEN | (1<<8))
SQLITE_CANTOPEN_ISDIR = (SQLITE_CANTOPEN | (2<<8))
SQLITE_CANTOPEN_FULLPATH = (SQLITE_CANTOPEN | (3<<8))
SQLITE_CANTOPEN_CONVPATH = (SQLITE_CANTOPEN | (4<<8))
SQLITE_CANTOPEN_DIRTYWAL = (SQLITE_CANTOPEN | (5<<8)) # Not Used
SQLITE_CORRUPT_VTAB = (SQLITE_CORRUPT | (1<<8))
SQLITE_CORRUPT_SEQUENCE = (SQLITE_CORRUPT | (2<<8))
SQLITE_READONLY_RECOVERY = (SQLITE_READONLY | (1<<8))
SQLITE_READONLY_CANTLOCK = (SQLITE_READONLY | (2<<8))
SQLITE_READONLY_ROLLBACK = (SQLITE_READONLY | (3<<8))
SQLITE_READONLY_DBMOVED = (SQLITE_READONLY | (4<<8))
SQLITE_READONLY_CANTINIT = (SQLITE_READONLY | (5<<8))
SQLITE_READONLY_DIRECTORY = (SQLITE_READONLY | (6<<8))
SQLITE_ABORT_ROLLBACK = (SQLITE_ABORT | (2<<8))
SQLITE_CONSTRAINT_CHECK = (SQLITE_CONSTRAINT | (1<<8))
SQLITE_CONSTRAINT_COMMITHOOK = (SQLITE_CONSTRAINT | (2<<8))
SQLITE_CONSTRAINT_FOREIGNKEY = (SQLITE_CONSTRAINT | (3<<8))
SQLITE_CONSTRAINT_FUNCTION = (SQLITE_CONSTRAINT | (4<<8))
SQLITE_CONSTRAINT_NOTNULL = (SQLITE_CONSTRAINT | (5<<8))
SQLITE_CONSTRAINT_PRIMARYKEY = (SQLITE_CONSTRAINT | (6<<8))
SQLITE_CONSTRAINT_TRIGGER = (SQLITE_CONSTRAINT | (7<<8))
SQLITE_CONSTRAINT_UNIQUE = (SQLITE_CONSTRAINT | (8<<8))
SQLITE_CONSTRAINT_VTAB = (SQLITE_CONSTRAINT | (9<<8))
SQLITE_CONSTRAINT_ROWID = (SQLITE_CONSTRAINT |(10<<8))
SQLITE_NOTICE_RECOVER_WAL = (SQLITE_NOTICE | (1<<8))
SQLITE_NOTICE_RECOVER_ROLLBACK = (SQLITE_NOTICE | (2<<8))
SQLITE_WARNING_AUTOINDEX = (SQLITE_WARNING | (1<<8))
SQLITE_AUTH_USER = (SQLITE_AUTH | (1<<8))
SQLITE_OK_LOAD_PERMANENTLY = (SQLITE_OK | (1<<8))
# Flags For File Open Operations
SQLITE_OPEN_READONLY = 0x00000001 # Ok for sqlite3_open_v2()
SQLITE_OPEN_READWRITE = 0x00000002 # Ok for sqlite3_open_v2()
SQLITE_OPEN_CREATE = 0x00000004 # Ok for sqlite3_open_v2()
SQLITE_OPEN_DELETEONCLOSE = 0x00000008 # VFS only
SQLITE_OPEN_EXCLUSIVE = 0x00000010 # VFS only
SQLITE_OPEN_AUTOPROXY = 0x00000020 # VFS only
SQLITE_OPEN_URI = 0x00000040 # Ok for sqlite3_open_v2()
SQLITE_OPEN_MEMORY = 0x00000080 # Ok for sqlite3_open_v2()
SQLITE_OPEN_MAIN_DB = 0x00000100 # VFS only
SQLITE_OPEN_TEMP_DB = 0x00000200 # VFS only
SQLITE_OPEN_TRANSIENT_DB = 0x00000400 # VFS only
SQLITE_OPEN_MAIN_JOURNAL = 0x00000800 # VFS only
SQLITE_OPEN_TEMP_JOURNAL = 0x00001000 # VFS only
SQLITE_OPEN_SUBJOURNAL = 0x00002000 # VFS only
SQLITE_OPEN_MASTER_JOURNAL = 0x00004000 # VFS only
SQLITE_OPEN_NOMUTEX = 0x00008000 # Ok for sqlite3_open_v2()
SQLITE_OPEN_FULLMUTEX = 0x00010000 # Ok for sqlite3_open_v2()
SQLITE_OPEN_SHAREDCACHE = 0x00020000 # Ok for sqlite3_open_v2()
SQLITE_OPEN_PRIVATECACHE = 0x00040000 # Ok for sqlite3_open_v2()
SQLITE_OPEN_WAL = 0x00080000 # VFS only
# Device Characteristics
SQLITE_IOCAP_ATOMIC = 0x00000001
SQLITE_IOCAP_ATOMIC512 = 0x00000002
SQLITE_IOCAP_ATOMIC1K = 0x00000004
SQLITE_IOCAP_ATOMIC2K = 0x00000008
SQLITE_IOCAP_ATOMIC4K = 0x00000010
SQLITE_IOCAP_ATOMIC8K = 0x00000020
SQLITE_IOCAP_ATOMIC16K = 0x00000040
SQLITE_IOCAP_ATOMIC32K = 0x00000080
SQLITE_IOCAP_ATOMIC64K = 0x00000100
SQLITE_IOCAP_SAFE_APPEND = 0x00000200
SQLITE_IOCAP_SEQUENTIAL = 0x00000400
SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN = 0x00000800
SQLITE_IOCAP_POWERSAFE_OVERWRITE = 0x00001000
SQLITE_IOCAP_IMMUTABLE = 0x00002000
SQLITE_IOCAP_BATCH_ATOMIC = 0x00004000
# File Locking Levels
SQLITE_LOCK_NONE = 0
SQLITE_LOCK_SHARED = 1
SQLITE_LOCK_RESERVED = 2
SQLITE_LOCK_PENDING = 3
SQLITE_LOCK_EXCLUSIVE = 4
# Synchronization Type Flags
SQLITE_SYNC_NORMAL = 0x00002
SQLITE_SYNC_FULL = 0x00003
SQLITE_SYNC_DATAONLY = 0x00010
# OS Interface Open File Handle
class sqlite3_file(Structure):
pass
class sqlite3_io_methods(Structure):
pass
sqlite3_file._fields_ = [
("pMethods", POINTER(sqlite3_io_methods)), # Methods for an open file
]
sqlite3_io_methods._fields_ = [
("iVersion", c_int),
("xClose", c_void_p),
("xRead", c_void_p),
("xWrite", c_void_p),
("xTruncate", c_void_p),
("xSync", c_void_p),
("xFileSize", c_void_p),
("xLock", c_void_p),
("xUnlock", c_void_p),
("xCheckReservedLock", c_void_p),
("xFileControl", c_void_p),
("xSectorSize", c_void_p),
("xDeviceCharacteristics", c_void_p),
# Methods above are valid for version 1
("xShmMap", c_void_p),
("xShmLock", c_void_p),
("xShmBarrier", c_void_p),
("xShmUnmap", c_void_p),
# Methods above are valid for version 2
("xFetch", c_void_p),
("xUnfetch", c_void_p),
# Methods above are valid for version 3
# Additional methods may be added in future releases
]
# Standard File Control Opcodes
SQLITE_FCNTL_LOCKSTATE = 1
SQLITE_FCNTL_GET_LOCKPROXYFILE = 2
SQLITE_FCNTL_SET_LOCKPROXYFILE = 3
SQLITE_FCNTL_LAST_ERRNO = 4
SQLITE_FCNTL_SIZE_HINT = 5
SQLITE_FCNTL_CHUNK_SIZE = 6
SQLITE_FCNTL_FILE_POINTER = 7
SQLITE_FCNTL_SYNC_OMITTED = 8
SQLITE_FCNTL_WIN32_AV_RETRY = 9
SQLITE_FCNTL_PERSIST_WAL = 10
SQLITE_FCNTL_OVERWRITE = 11
SQLITE_FCNTL_VFSNAME = 12
SQLITE_FCNTL_POWERSAFE_OVERWRITE = 13
SQLITE_FCNTL_PRAGMA = 14
SQLITE_FCNTL_BUSYHANDLER = 15
SQLITE_FCNTL_TEMPFILENAME = 16
SQLITE_FCNTL_MMAP_SIZE = 18
SQLITE_FCNTL_TRACE = 19
SQLITE_FCNTL_HAS_MOVED = 20
SQLITE_FCNTL_SYNC = 21
SQLITE_FCNTL_COMMIT_PHASETWO = 22
SQLITE_FCNTL_WIN32_SET_HANDLE = 23
SQLITE_FCNTL_WAL_BLOCK = 24
SQLITE_FCNTL_ZIPVFS = 25
SQLITE_FCNTL_RBU = 26
SQLITE_FCNTL_VFS_POINTER = 27
SQLITE_FCNTL_JOURNAL_POINTER = 28
SQLITE_FCNTL_WIN32_GET_HANDLE = 29
SQLITE_FCNTL_PDB = 30
SQLITE_FCNTL_BEGIN_ATOMIC_WRITE = 31
SQLITE_FCNTL_COMMIT_ATOMIC_WRITE = 32
SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE = 33
SQLITE_FCNTL_LOCK_TIMEOUT = 34
SQLITE_FCNTL_DATA_VERSION = 35
SQLITE_FCNTL_SIZE_LIMIT = 36
# deprecated names
SQLITE_GET_LOCKPROXYFILE = SQLITE_FCNTL_GET_LOCKPROXYFILE
SQLITE_SET_LOCKPROXYFILE = SQLITE_FCNTL_SET_LOCKPROXYFILE
SQLITE_LAST_ERRNO = SQLITE_FCNTL_LAST_ERRNO
# Mutex Handle
sqlite3_mutex_p = c_void_p
# OS Interface Objecth
class sqlite3_vfs(Structure):
pass
sqlite3_syscall_ptr = CFUNCTYPE(None);
sqlite3_vfs._fields_ = [
("iVersion", c_int), # Structure version number (currently 3)
("szOsFile", c_int), # Size of subclassed sqlite3_file
("mxPathname", c_int), # Maximum file pathname length
("pNext", POINTER(sqlite3_vfs)), # Next registered VFS
("zName", c_char_p), # Name of this virtual file system
("pAppData", c_void_p), # Pointer to application-specific data
("xOpen", c_void_p),
("xDelete", c_void_p),
("xAccess", c_void_p),
("xFullPathname", c_void_p),
("xDlOpen", c_void_p),
("xDlError", c_void_p),
("xDlSym", c_void_p),
("xDlClose", c_void_p),
("xRandomness", c_void_p),
("xSleep", c_void_p),
("xCurrentTime", c_void_p),
("xGetLastError", c_void_p),
# The methods above are in version 1 of the sqlite_vfs object
# definition. Those that follow are added in version 2 or later
("xCurrentTimeInt64", c_void_p),
# The methods above are in versions 1 and 2 of the sqlite_vfs object.
# Those below are for version 3 and greater.
("xSetSystemCall", c_void_p),
("xGetSystemCall", c_void_p),
("xNextSystemCall", c_void_p),
# The methods above are in versions 1 through 3 of the sqlite_vfs object.
# New fields may be appended in future versions. The iVersion
# value will increment whenever this happens.
]
# Flags for the xAccess VFS method
SQLITE_ACCESS_EXISTS = 0
SQLITE_ACCESS_READWRITE = 1 # Used by PRAGMA temp_store_directory
SQLITE_ACCESS_READ = 2 # Unused
# Flags for the xShmLock VFS method
SQLITE_SHM_UNLOCK = 1
SQLITE_SHM_LOCK = 2
SQLITE_SHM_SHARED = 4
SQLITE_SHM_EXCLUSIVE = 8
# Maximum xShmLock index
SQLITE_SHM_NLOCK = 8
# Configuring The SQLite Library
sqlite3_config = libsqlite.sqlite3_config
# Configure database connections
sqlite3_db_config = libsqlite.sqlite3_db_config
# Memory Allocation Routines
class sqlite3_mem_methods(Structure):
_fields_ = [
("xMalloc", c_void_p), # Memory allocation function
("xFree", c_void_p), # Free a prior allocation
| |
<gh_stars>1-10
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import paddle
import paddle.static as static
import unittest
from fleet_meta_optimizer_base import TestFleetMetaOptimizer
from paddle.distributed.fleet.meta_optimizers.common import is_loss_grad_op
paddle.enable_static()
class TestFleetHybridOptimizer(TestFleetMetaOptimizer):
def setUp(self):
os.environ["PADDLE_TRAINER_ID"] = "3"
os.environ["PADDLE_TRAINER_ENDPOINTS"] = \
"127.0.0.1:36001,127.0.0.1:36002,127.0.0.1:36003,127.0.0.1:36004"
# pre-assigned ring id
self.mp_ring_id = 0
self.sharding_ring_id = 1
self.dp_ring_id = 2
self.global_ring_id = 3
self.pp_pair_ring_id = 20
self._debug = False
def test_opt_sharding_with_pp(self):
train_prog, startup_prog = static.Program(), static.Program()
avg_cost, strategy = self.pp_net(train_prog, startup_prog)
self.set_strategy(strategy, 'pipeline')
strategy.sharding = True
strategy.sharding_configs = {
"sharding_degree": 1,
"pp_degree": 2,
"dp_degree": 2,
"_dp_as_optimizer_sharding": True,
}
strategy.fuse_all_reduce_ops = False
self.optimizer(avg_cost, strategy, train_prog, startup_prog)
train_prog = train_prog._pipeline_opt['section_program']
startup_prog = startup_prog._pipeline_opt['startup_program']
self.debug_program(train_prog, startup_prog)
startup_prog_ops = startup_prog.global_block().ops
main_prog_ops = train_prog.global_block().ops
# check program
startup_prog_op_types = [op.type for op in startup_prog_ops]
main_prog_op_types = [op.type for op in main_prog_ops]
# global, sharding, pp_send, pp_recv
self.assertEqual(startup_prog_op_types, [
'uniform_random', 'fill_constant', 'uniform_random',
'fill_constant', 'uniform_random', 'fill_constant',
'uniform_random', 'fill_constant', 'fill_constant', 'fill_constant',
'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant',
'c_gen_nccl_id', 'c_comm_init', 'c_gen_nccl_id', 'c_comm_init',
'c_gen_nccl_id', 'c_comm_init', 'c_gen_nccl_id', 'c_comm_init',
'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast',
'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast',
'c_sync_comm_stream'
])
self.assertEqual(main_prog_op_types, [
'recv_v2', 'mul', 'elementwise_add', 'tanh', 'mul',
'elementwise_add', 'tanh', 'mul', 'elementwise_add', 'tanh', 'mul',
'elementwise_add', 'softmax', 'cross_entropy2', 'mean',
'fill_constant', 'mean_grad', 'cross_entropy_grad2', 'softmax_grad',
'elementwise_add_grad', 'mul_grad', 'tanh_grad',
'elementwise_add_grad', 'mul_grad', 'tanh_grad',
'elementwise_add_grad', 'mul_grad', 'tanh_grad',
'elementwise_add_grad', 'mul_grad', 'c_sync_calc_stream', 'send_v2',
'fill_constant', 'sum', 'fill_constant', 'sum', 'fill_constant',
'sum', 'fill_constant', 'sum', 'fill_constant', 'sum',
'fill_constant', 'sum', 'fill_constant', 'sum', 'fill_constant',
'sum', 'c_reduce_sum', 'c_reduce_sum', 'c_reduce_sum',
'c_reduce_sum', 'c_reduce_sum', 'c_reduce_sum', 'c_reduce_sum',
'c_reduce_sum', 'c_sync_comm_stream', 'momentum', 'momentum',
'momentum', 'momentum', 'momentum', 'c_broadcast', 'c_broadcast',
'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast',
'c_broadcast', 'c_broadcast'
])
# should has ring id for pp
created_ring_ids = [
op.desc.attr("ring_id") for op in startup_prog_ops
if op.type == "c_comm_init"
]
self.assertIn(self.dp_ring_id, created_ring_ids)
self.assertIn(self.pp_pair_ring_id, created_ring_ids)
# check correctness of pp group
pp_group_waiting_prots = None
for op in startup_prog_ops:
if op.type == "c_gen_nccl_id" and \
op.desc.output_arg_names()[0] == "comm_id_0":
pp_group_waiting_prots = op.desc.attr("other_endpoints")
self.assertEqual(pp_group_waiting_prots, ['127.0.0.1:36003'])
# check correctness of sharding group
dp_group_waiting_ports = None
for op in startup_prog_ops:
if op.type == "c_gen_nccl_id" \
and op.desc.output_arg_names()[0] == "comm_id_3":
dp_group_waiting_ports = op.desc.attr("other_endpoints")
self.assertEqual(dp_group_waiting_ports, ['127.0.0.1:36002'])
def test_opt_sharding_with_pp_with_allreduce_fuse(self):
train_prog, startup_prog = static.Program(), static.Program()
avg_cost, strategy = self.pp_net(train_prog, startup_prog)
self.set_strategy(strategy, 'pipeline')
strategy.sharding = True
strategy.sharding_configs = {
"sharding_degree": 1,
"pp_degree": 2,
"dp_degree": 2,
"_dp_as_optimizer_sharding": True,
}
strategy.fuse_all_reduce_ops = True
strategy.fuse_grad_size_in_MB = 32
self.optimizer(avg_cost, strategy, train_prog, startup_prog)
train_prog = train_prog._pipeline_opt['section_program']
startup_prog = startup_prog._pipeline_opt['startup_program']
startup_prog_ops = startup_prog.global_block().ops
main_prog_ops = train_prog.global_block().ops
# check program
startup_prog_op_types = [op.type for op in startup_prog_ops]
main_prog_op_types = [op.type for op in main_prog_ops]
# global, sharding, pp_send, pp_recv
self.assertEqual(startup_prog_op_types, [
'uniform_random', 'fill_constant', 'uniform_random',
'fill_constant', 'uniform_random', 'fill_constant',
'uniform_random', 'fill_constant', 'fill_constant', 'fill_constant',
'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant',
'c_gen_nccl_id', 'c_comm_init', 'c_gen_nccl_id', 'c_comm_init',
'c_gen_nccl_id', 'c_comm_init', 'c_gen_nccl_id', 'c_comm_init',
'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast',
'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast',
'c_sync_comm_stream'
])
self.assertEqual(main_prog_op_types, [
'recv_v2', 'mul', 'elementwise_add', 'tanh', 'mul',
'elementwise_add', 'tanh', 'mul', 'elementwise_add', 'tanh', 'mul',
'elementwise_add', 'softmax', 'cross_entropy2', 'mean',
'fill_constant', 'mean_grad', 'cross_entropy_grad2', 'softmax_grad',
'elementwise_add_grad', 'mul_grad', 'tanh_grad',
'elementwise_add_grad', 'mul_grad', 'tanh_grad',
'elementwise_add_grad', 'mul_grad', 'tanh_grad',
'elementwise_add_grad', 'mul_grad', 'c_sync_calc_stream', 'send_v2',
'fill_constant', 'sum', 'fill_constant', 'sum', 'fill_constant',
'sum', 'fill_constant', 'sum', 'fill_constant', 'sum',
'fill_constant', 'sum', 'fill_constant', 'sum', 'fill_constant',
'sum', 'coalesce_tensor', 'c_reduce_sum', 'coalesce_tensor',
'c_reduce_sum', 'c_sync_comm_stream', 'momentum', 'momentum',
'momentum', 'momentum', 'momentum', 'coalesce_tensor',
'c_broadcast', 'coalesce_tensor', 'c_broadcast'
])
def test_opt_sharding_with_pp_amp_gclip(self):
train_prog, startup_prog = static.Program(), static.Program()
avg_cost, strategy = self.pp_net(train_prog, startup_prog)
self.set_strategy(strategy, 'amp')
self.set_strategy(strategy, 'pipeline')
strategy.sharding = True
strategy.sharding_configs = {
"sharding_degree": 1,
"pp_degree": 2,
"dp_degree": 2,
"_dp_as_optimizer_sharding": True,
}
strategy.fuse_all_reduce_ops = True
strategy.fuse_grad_size_in_MB = 32
clip = paddle.fluid.clip.GradientClipByGlobalNorm(1.0)
self.optimizer(
avg_cost, strategy, train_prog, startup_prog, grad_clip=clip)
train_prog = train_prog._pipeline_opt['section_program']
startup_prog = startup_prog._pipeline_opt['startup_program']
self.debug_program(train_prog, startup_prog)
startup_prog_ops = startup_prog.global_block().ops
main_prog_ops = train_prog.global_block().ops
# check program
startup_prog_op_types = [op.type for op in startup_prog_ops]
main_prog_op_types = [op.type for op in main_prog_ops]
# global, sharding, pp_send, pp_recv
self.assertEqual(startup_prog_op_types, [
'uniform_random', 'fill_constant', 'uniform_random',
'fill_constant', 'uniform_random', 'fill_constant',
'uniform_random', 'fill_constant', 'fill_constant', 'fill_constant',
'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant',
'fill_constant', 'fill_constant', 'fill_constant', 'c_gen_nccl_id',
'c_comm_init', 'c_gen_nccl_id', 'c_comm_init', 'c_gen_nccl_id',
'c_comm_init', 'c_gen_nccl_id', 'c_comm_init', 'c_broadcast',
'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast',
'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_sync_comm_stream'
])
self.assertEqual(main_prog_op_types, [
'recv_v2', 'cast', 'cast', 'mul', 'cast', 'elementwise_add', 'cast',
'tanh', 'cast', 'cast', 'mul', 'cast', 'elementwise_add', 'cast',
'tanh', 'cast', 'cast', 'mul', 'cast', 'elementwise_add', 'cast',
'tanh', 'cast', 'cast', 'mul', 'cast', 'elementwise_add', 'softmax',
'cast', 'cross_entropy2', 'mean', 'elementwise_mul',
'fill_constant', 'elementwise_mul_grad', 'mean_grad',
'cross_entropy_grad2', 'cast', 'softmax_grad',
'elementwise_add_grad', 'mul_grad', 'cast', 'tanh_grad', 'cast',
'elementwise_add_grad', 'mul_grad', 'cast', 'tanh_grad', 'cast',
'elementwise_add_grad', 'mul_grad', 'cast', 'tanh_grad', 'cast',
'elementwise_add_grad', 'mul_grad', 'cast', 'c_sync_calc_stream',
'send_v2', 'fill_constant', 'cast', 'sum', 'fill_constant', 'cast',
'sum', 'fill_constant', 'cast', 'sum', 'fill_constant', 'cast',
'sum', 'fill_constant', 'cast', 'sum', 'fill_constant', 'cast',
'sum', 'fill_constant', 'cast', 'sum', 'fill_constant', 'cast',
'sum', 'coalesce_tensor', 'c_reduce_sum', 'coalesce_tensor',
'c_reduce_sum', 'c_sync_comm_stream', 'check_finite_and_unscale',
'cast', 'c_allreduce_max', 'c_allreduce_max', 'cast',
'update_loss_scaling', 'squared_l2_norm', 'squared_l2_norm',
'squared_l2_norm', 'squared_l2_norm', 'squared_l2_norm', 'sum',
'c_allreduce_sum', 'c_allreduce_sum', 'sqrt', 'fill_constant',
'elementwise_max', 'elementwise_div', 'elementwise_mul',
'elementwise_mul', 'elementwise_mul', 'elementwise_mul',
'elementwise_mul', 'momentum', 'momentum', 'momentum', 'momentum',
'momentum', 'coalesce_tensor', 'c_broadcast', 'coalesce_tensor',
'c_broadcast'
])
def test_opt_sharding_with_pp_amp_gclip_fuse_gm(self):
train_prog, startup_prog = static.Program(), static.Program()
avg_cost, strategy = self.pp_net(train_prog, startup_prog)
self.set_strategy(strategy, 'amp')
self.set_strategy(strategy, 'pipeline')
strategy.sharding = True
strategy.sharding_configs = {
"sharding_degree": 1,
"pp_degree": 2,
"dp_degree": 2,
"_dp_as_optimizer_sharding": True,
}
strategy.fuse_all_reduce_ops = True
strategy.fuse_grad_size_in_MB = 32
strategy.fuse_grad_merge = True
clip = paddle.fluid.clip.GradientClipByGlobalNorm(1.0)
self.optimizer(
avg_cost, strategy, train_prog, startup_prog, grad_clip=clip)
train_prog = train_prog._pipeline_opt['section_program']
startup_prog = startup_prog._pipeline_opt['startup_program']
self.debug_program(train_prog, startup_prog)
startup_prog_ops = startup_prog.global_block().ops
main_prog_ops = train_prog.global_block().ops
# check program
startup_prog_op_types = [op.type for op in startup_prog_ops]
main_prog_op_types = [op.type for op in main_prog_ops]
# global, sharding, pp_send, pp_recv
self.assertEqual(startup_prog_op_types, [
'uniform_random', 'fill_constant', 'uniform_random',
'fill_constant', 'uniform_random', 'fill_constant',
'uniform_random', 'fill_constant', 'fill_constant', 'fill_constant',
'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant',
'fill_constant', 'fill_constant', 'fill_constant', 'c_gen_nccl_id',
'c_comm_init', 'c_gen_nccl_id', 'c_comm_init', 'c_gen_nccl_id',
'c_comm_init', 'c_gen_nccl_id', 'c_comm_init', 'c_broadcast',
'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_broadcast',
'c_broadcast', 'c_broadcast', 'c_broadcast', 'c_sync_comm_stream'
])
self.assertEqual(main_prog_op_types, [
'recv_v2', 'cast', 'cast', 'mul', 'cast', 'elementwise_add', 'cast',
'tanh', 'cast', 'cast', 'mul', 'cast', 'elementwise_add', 'cast',
'tanh', 'cast', 'cast', 'mul', 'cast', 'elementwise_add', 'cast',
'tanh', 'cast', 'cast', 'mul', 'cast', 'elementwise_add', 'softmax',
'cast', 'cross_entropy2', 'mean', 'elementwise_mul',
'coalesce_tensor', 'coalesce_tensor', 'coalesce_tensor',
'coalesce_tensor', 'fill_constant', 'elementwise_mul_grad',
'mean_grad', 'cross_entropy_grad2', 'cast', 'softmax_grad',
'elementwise_add_grad', 'mul_grad', 'cast', 'tanh_grad', 'cast',
'elementwise_add_grad', 'mul_grad', 'cast', 'tanh_grad', 'cast',
'elementwise_add_grad', 'mul_grad', 'cast', 'tanh_grad', 'cast',
'elementwise_add_grad', 'mul_grad', 'cast', 'c_sync_calc_stream',
'send_v2', 'cast', 'sum', 'cast', 'sum', 'c_reduce_sum',
'c_reduce_sum', 'c_sync_comm_stream', 'check_finite_and_unscale',
'cast', 'c_allreduce_max', 'c_allreduce_max', 'cast',
'update_loss_scaling', 'squared_l2_norm', 'squared_l2_norm',
'squared_l2_norm', 'squared_l2_norm', 'squared_l2_norm', 'sum',
'c_allreduce_sum', 'c_allreduce_sum', 'sqrt', 'fill_constant',
'elementwise_max', 'elementwise_div', 'elementwise_mul',
'elementwise_mul', 'elementwise_mul', 'elementwise_mul',
'elementwise_mul', 'momentum', 'momentum', 'momentum', 'momentum',
'momentum', 'coalesce_tensor', 'c_broadcast', 'coalesce_tensor',
'c_broadcast'
])
class TestFleetHybridOptimizerBoundary(TestFleetMetaOptimizer):
def setUp(self):
os.environ["PADDLE_TRAINER_ID"] = "3"
os.environ["PADDLE_TRAINER_ENDPOINTS"] = \
"127.0.0.1:36001,127.0.0.1:36002,127.0.0.1:36003,127.0.0.1:36004"
# pre-assigned ring id
self.mp_ring_id = 0
self.sharding_ring_id = 1
self.dp_ring_id = 2
self.global_ring_id = 3
self.pp_pair_ring_id = 20
self._debug = False
def test_opt_sharding_with_pp_amp_gclip_boundary(self):
"""
test optimizer sharding without parameter
test loss grad scale value
"""
train_prog, startup_prog = static.Program(), static.Program()
avg_cost, strategy = self.boundary_net(train_prog, startup_prog)
self.set_strategy(strategy, 'amp')
self.set_strategy(strategy, 'pipeline')
strategy.sharding = True
strategy.sharding_configs = {
"sharding_degree": 1,
"pp_degree": 2,
"dp_degree": 2,
"_dp_as_optimizer_sharding": True,
}
strategy.fuse_all_reduce_ops = True
strategy.fuse_grad_size_in_MB = 32
clip = paddle.fluid.clip.GradientClipByGlobalNorm(1.0)
self.optimizer(
avg_cost, strategy, train_prog, startup_prog, grad_clip=clip)
train_prog = train_prog._pipeline_opt['section_program']
startup_prog = startup_prog._pipeline_opt['startup_program']
self.debug_program(train_prog, startup_prog)
startup_prog_ops = startup_prog.global_block().ops
main_prog_ops = train_prog.global_block().ops
# check program
startup_prog_op_types = [op.type for op in startup_prog_ops]
main_prog_op_types = [op.type for op in main_prog_ops]
# check loss scale for hybrid
for op in main_prog_ops:
if is_loss_grad_op(op):
self.assertEqual(op.type, 'fill_constant')
self.assertTrue(op.has_attr('value'))
scale = strategy.pipeline_configs[
'accumulate_steps'] * strategy.sharding_configs['dp_degree']
loss_scale = 1.0 / scale
self.assertAlmostEqual(float(op.attr('value')), loss_scale)
# global, sharding, pp_send, pp_recv
self.assertEqual(startup_prog_op_types, [
'uniform_random', 'fill_constant', 'fill_constant', 'fill_constant',
'fill_constant', 'c_gen_nccl_id', 'c_comm_init', 'c_gen_nccl_id',
'c_comm_init', 'c_gen_nccl_id', 'c_comm_init', 'c_gen_nccl_id',
'c_comm_init', 'c_broadcast', 'c_sync_comm_stream'
])
self.assertEqual(main_prog_op_types, [
'recv_v2', 'cast', 'matmul', 'cast', 'reduce_mean',
'elementwise_mul', 'fill_constant', 'elementwise_mul_grad',
'reduce_mean_grad', 'cast', 'matmul_grad', 'c_sync_calc_stream',
'send_v2', 'fill_constant', 'cast', 'sum', 'c_reduce_sum',
'c_sync_comm_stream', 'check_finite_and_unscale', 'cast',
'c_allreduce_max', 'c_allreduce_max', 'cast', 'update_loss_scaling',
'fill_constant', 'c_allreduce_sum', 'c_allreduce_sum', 'sqrt',
'fill_constant', 'elementwise_max', 'elementwise_div', 'c_broadcast'
])
def test_opt_sharding_with_pp_amp_gclip_boundary_card1(self):
""" test optimizer sharding without parameter in card0 """
os.environ["PADDLE_TRAINER_ID"] = "1"
train_prog, startup_prog = static.Program(), static.Program()
avg_cost, strategy = self.boundary_net(train_prog, startup_prog)
self.set_strategy(strategy, 'amp')
self.set_strategy(strategy, 'pipeline')
strategy.sharding = True
strategy.sharding_configs = {
"sharding_degree": 1,
"pp_degree": 2,
"dp_degree": 2,
"_dp_as_optimizer_sharding": True,
}
strategy.fuse_all_reduce_ops = True
| |
<reponame>ChristopheVdE/Bacterial-WGS-pipeline
############################################################################################################
# NAME: hybrid assembly.py
# AUTHOR: <NAME>
# FUNCTION: creates some texts files containing location variables that are used by the snakefile as input
# USAGE LINUX/MAC: python3 hybrid_assembly.py
# USAGE WINDOWS: python.exe hybrid_assembly.py
############################################################################################################
# IMPORT PACKAGES===========================================================================================
import os
import platform
import subprocess
import string
from datetime import date, datetime
from pathlib import Path
import shutil
import sys
import importlib
# ==========================================================================================================
# CLASSES ==================================================================================================
# System Info ----------------------------------------------------------------------------------------------
class SystemInfo:
def __init__(self):
# SystemType: What System is the host running: Windows, MacOS, Unix (Darwin = MacOS)
# SytemThreads: total amount of threads available to the system
# Dockerthreads: total amount of threads availble to docker (dependend on docker-settings or the settings of the VM running docker)
if "Windows" in platform.system():
self.SystemType = platform.system()
self.SystemThreads = subprocess.Popen('WMIC CPU Get NumberOfLogicalProcessors', shell=True, stdout=subprocess.PIPE)
elif "Darwin" in platform.system():
self.SystemType = platform.system()
self.SystemThreads = subprocess.Popen('sysctl -n hw.ncpu', shell=True, stdout=subprocess.PIPE)
else:
self.SystemType = "Unix"
self.SystemThreads = subprocess.Popen('nproc --all', shell=True, stdout=subprocess.PIPE)
self.DockerThreads = subprocess.Popen('docker run -it --rm --name ubuntu_bash christophevde/ubuntu_bash:v2.0_stable nproc --all', shell=True, stdout=subprocess.PIPE)
def TranslateSubprocesOutput(self):
# Translates the output of the "Threads"-related subprocesses to something useable
for key, value in self.__dict__.items():
if key in ["SystemThreads", "DockerThreads"]:
for line in value.stdout:
if any(char.isdigit() for char in line.decode("UTF-8")):
self.__dict__[key] = int(line.decode("UTF-8"))
def GetThreadsToUse(self):
# Tip to increase maximum aoount of threads for Docker
if self.DockerThreads < self.SystemThreads:
print("\nYou might still be able to increase the amount of threads available to Docker. Check your Docker or Virtual-Machine Settings\n")
# Ask user for the amount of threads to use for the analysis
self.UseThreads = str(input("How many threads do you want to use for the analysis (min = 1, max = {}): ".format(self.DockerThreads)))
while int(self.UseThreads) not in range(1, int(self.DockerThreads) + 1):
self.UseThreads = str(input("[ERROR] Chosen amount of threads is outside the possible range (min = 1, max = {}): ".format(self.DockerThreads)))
# User settings --------------------------------------------------------------------------------------------
class Settings:
def __init__(self):
self.Illumina = input("\nLocation of Illumina samples:\n")
self.MinIon = input("Location of Minion samples:\n ")
self.Results = input("Where do you want to save the results:\n ")
self.CorrespondingSamples = input("Input location of text file containing info on wich Illumina sample corresponds with which MinIon sample:\n ")
self.Adaptors = input("Location of the adaptorfile for trimming:\n ")
self.BarcodeKit = input("Which barcode-kit was used for the Minion samples: ")
self.Run = date.today().strftime("%Y%m%d")
self.Scripts = os.path.dirname(os.path.realpath(__file__))
def CheckLocations(self):
for key in self.__dict__.keys():
# locations that should be a directory
if key in ["Illumina", "MinIon", "Scripts", "Results"]:
while not os.path.isdir(self.__dict__[key]):
self.__dict__[key] = input("[ERROR] Directory not found, please provide correct location of {}: ".format(key))
# locations that should be a file
elif key in ["Adaptors"]:
while not os.path.isfile(self.__dict__[key]):
self.__dict__[key] = input("[ERROR] File not found, please provide correct location of {}: ".format(key))
def CreateFoldersIfNotExist(self):
folders = [self.Results+"/Hybrid/"+self.Run,]
for i in folders:
os.makedirs(i, exist_ok=True)
def CreateSettingsFile(self):
file = open(os.path.dirname(os.path.dirname(os.path.dirname((os.path.realpath(__file__))))) + "\\Modules\\Settings\\Hybrid\\UserSettings" + self.Run + ".py", "w")
for key, value in self.__dict__.items():
if key in ["Illumina", "MinIon", "Adaptors", "Results", "Scripts", "StartGenes", "CorrespondingSamples"]:
file.write("{} = r'{}'\n".format(key, value))
else:
file.write("{} = r'{}'\n".format(key, value))
file.close()
# Organism specific settings -------------------------------------------------------------------------------
class OrganismData:
def __init__(self):
self.Kingdom = input("\nKingdom of sample-organism: ")
self.Genus = input("Genus of sample-organism: ")
self.Species = input("Species of sample-organism: ")
self.StartGenes = input("Location of multifasta containing start-genes for annotation:\n")
def CheckLocations(self):
for key in self.__dict__.keys():
# locations that should be a file
if key == "StartGenes":
while not os.path.isfile(self.__dict__[key]):
self.__dict__[key] = input("[ERROR] File not found, please provide correct location of {}: ".format(key))
def CreateOrganismFile(self):
file = open(os.path.dirname(os.path.dirname(os.path.dirname((os.path.realpath(__file__))))) + "\\Modules\\OrganismData\\OrganismInfo\\" + self.Genus + "_" + self.Species + ".py", "w")
for key, value in self.__dict__.items():
if key in ["Illumina", "MinIon", "Adaptors", "Results", "Scripts", "StartGenes", "CorrespondingSamples"]:
file.write("{} = r'{}'\n".format(key, value))
else:
file.write("{} = '{}'\n".format(key, value))
file.close()
# Converst Windows folder paths for mountig in Docker ------------------------------------------------------
class PathConverter:
def __init__(self, SystemType, class1, class2):
if SystemType == 'Windows':
for data in [class1, class2]:
for key, value in data.__dict__.items():
if key in ["Illumina", "MinIon", "Adaptors", "Results", "Scripts", "StartGenes", "CorrespondingSamples"]:
for letter in list(string.ascii_lowercase+string.ascii_uppercase):
if value.startswith(letter+":/"):
self.__dict__[key] = value.replace(letter+":/","/"+letter.lower()+"//").replace('\\','/')
elif value.startswith(letter+":\\"):
self.__dict__[key] = value.replace(letter+":\\","/"+letter.lower()+"//").replace('\\','/')
else:
for key, value in data.__dict__.items():
if key in ["Illumina", "MinIon", "Adaptors", "Results", "Scripts", "StartGenes", "CorrespondingSamples"]:
self.__dict__[key] = value
# Timer ----------------------------------------------------------------------------------------------------
class Timer:
def __init__(self):
self.AnalysisTime = datetime.now()
def NewTimer(self, step):
self.__dict__[step] = datetime.now()
def StopTimer(self, step):
self.__dict__[step] = datetime.now() - self.__dict__[step]
self.__dict__[step] = str(self.__dict__[step]).split(":")
# self.__dict__[step] = "{}H, {}MM, {}SS".format(self.__dict__[step][0], self.__dict__[step][1], self.__dict__[step][2].split(".")[0])
# FUNCTIONS==================================================================================================
# List modules ----------------------------------------------------------------------------------------------
def ListModules(path):
# List available modules --------------------------------------------------------------------------------
modules = []
for module in os.listdir(path):
if ".py" in module and module != "__init__.py":
modules.append(module.replace(".py", ""))
print(modules)
# Ask for module to import ------------------------------------------------------------------------------
toImport = input("module to import: ")
while not toImport in modules:
toImport = input("module to import: ")
return toImport
# SHORT READ SAMPLE LIST CREATION----------------------------------------------------------------------------
def sample_list(Illumina):
global ids
ids =[]
for sample in os.listdir(Illumina):
if ".fastq.gz" in sample:
ids.append(sample.replace('_L001_R1_001.fastq.gz','').replace('_L001_R2_001.fastq.gz',''))
ids = sorted(set(ids))
return ids
# ===========================================================================================================
# ASSEMBLY PREPARATION: USER INPUT===========================================================================
# Ask For "Settings" & "Organism"-file ----------------------------------------------------------------------
settingsfile = input("Do you have a premade Settings-file that you want to use? (y/n): ").lower()
organismfile = input("Do you have a premade file containing info about the organism of your samples? (y/n): ").lower()
# Get sytem info --------------------------------------------------------------------------------------------
system = SystemInfo()
system.TranslateSubprocesOutput()
system.GetThreadsToUse()
# import settings-file if exists -----------------------------------------------------------------------------
if settingsfile == 'y':
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname((os.path.realpath(__file__))))) + '\Modules\Settings\Hybrid')
UserSettings = importlib.import_module(ListModules(os.path.dirname(os.path.dirname(os.path.dirname((os.path.realpath(__file__))))) + '\Modules\Settings\Hybrid'))
# Get general settings --------------------------------------------------------------------------------------
else:
UserSettings = Settings()
UserSettings.CheckLocations()
UserSettings.CreateFoldersIfNotExist()
UserSettings.CreateSettingsFile()
# import organism-file if exists -----------------------------------------------------------------------------
if organismfile == 'y':
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname((os.path.realpath(__file__))))) + '\Modules\OrganismData\OrganismInfo')
Organism = importlib.import_module(ListModules(os.path.dirname(os.path.dirname(os.path.dirname((os.path.realpath(__file__))))) + '\Modules\OrganismData\OrganismInfo'))
# Get organism specific info --------------------------------------------------------------------------------
else:
Organism = OrganismData()
Organism.CheckLocations()
Organism.CreateOrganismFile()
# Convert folderpaths for mounting in docker-container when using Windows -----------------------------------
ConvertedPaths = PathConverter(system.SystemType, UserSettings, Organism)
# Activate Timer: Full analysis -----------------------------------------------------------------------------
timer = Timer()
# Enable error collection -----------------------------------------------------------------------------------
errors = []
error_count = 0
# ===========================================================================================================
# ASSEMBLY PREPARATION: GENERAL PREPARATION =================================================================
# Move and rename required "info"-files to "Results"-folder -------------------------------------------------
shutil.copy(UserSettings.CorrespondingSamples, UserSettings.Results+"/Hybrid/"+UserSettings.Run+"/corresponding_samples.txt")
if not Organism.StartGenes == '':
shutil.copy(Organism.StartGenes, UserSettings.Results + "/Hybrid/" + UserSettings.Run + "/start_genes.fasta")
# List content of "Illumina"-folder and save it in a file ---------------------------------------------------
file = open(UserSettings.Results+"/Hybrid/"+UserSettings.Run+"/sampleList.txt",mode="w")
for i in sample_list(UserSettings.Illumina):
file.write(i+"\n")
file.close()
# COPY ILLUMINA SAMPLES TO RESULTS---------------------------------------------------------------------------
print("\n[HYBRID][SHORT READS] Copying rawdata")
copy = 'docker run -it --rm \
--name copy_rawdata \
-v "'+ConvertedPaths.Illumina+':/home/rawdata/" \
-v "'+ConvertedPaths.Results+':/home/Pipeline/" \
-v "'+ConvertedPaths.Scripts+':/home/Scripts/" \
christophevde/ubuntu_bash:v2.2_stable \
/bin/bash -c "dos2unix -q /home/Scripts/Short_read/01_copy_rawdata.sh \
&& sh /home/Scripts/Short_read/01_copy_rawdata.sh '+UserSettings.Run+'"'
os.system(copy)
print("Done")
# ============================================================================================================
while error_count == 0:
# [HYBRID ASSEMBLY] SHORT READS ==============================================================================
# SHORT READS: FASTQC RAWDATA (DOCKER)-------------------------------------------------------------------------
print("\n[HYBRID][SHORT READS] FastQC rawdata")
for sample in ids:
my_file1 = Path(UserSettings.Results+"/Hybrid/"+UserSettings.Run+"/01_Short_reads/"+sample+"/01_QC-Rawdata/QC_FastQC/"+sample+"_L001_R1_001_fastqc.html")
my_file2 = Path(UserSettings.Results+"/Hybrid/"+UserSettings.Run+"/01_Short_reads/"+sample+"/01_QC-Rawdata/QC_FastQC/"+sample+"_L001_R1_001_fastqc.html")
if not my_file1.is_file() and not my_file2.is_file():
os.system('docker run -it --rm \
--name fastqc_raw \
-v "'+ConvertedPaths.Scripts+':/home/Scripts/" \
-v "'+ConvertedPaths.Results+':/home/Pipeline/" \
christophevde/fastqc:v2.2_stable \
/bin/bash -c "dos2unix -q /home/Scripts/Short_read/QC01_FastQC_Raw.sh \
&& /home/Scripts/Short_read/QC01_FastQC_Raw.sh '+sample+' '+UserSettings.Run+' '+system.UseThreads+'"')
if not my_file1.is_file() or not my_file2.is_file():
errors.append("[ERROR] STEP 1: FastQC; quality control rawdata (short reads)")
error_count +=1
else:
print(" - FastQC results for the rawdata of sample "+sample+" already exists")
print("Done")
#SHORT READS: MULTIQC RAWDATA (DOCKER)-----------------------------------------------------------------------
print("\n[HYBRID][SHORT READS] MultiQC rawdata")
#FULL RUN------------------------------------------------------------------------------------------------
my_file = Path(UserSettings.Results+"/Hybrid/"+UserSettings.Run+"/01_Short_reads/QC_MultiQC/QC-Rawdata/multiqc_report.html")
if not my_file.is_file():
#CREATE TEMP FOLDER----------------------------------------------------------------------------------
os.makedirs(UserSettings.Results+"/Hybrid/"+UserSettings.Run+"/temp/", exist_ok=True)
#COPY FASTQC RESULTS---------------------------------------------------------------------------------
print("creating temporary directory to copy all fastqc results of rawdata")
for sample in ids:
os.system('docker run -it --rm\
--name copy_fastqc\
-v "'+UserSettings.Results+'/Hybrid/'+UserSettings.Run+'/01_Short_reads/'+sample+'/01_QC-Rawdata/QC_FastQC/:/home/fastqc" \
-v "'+UserSettings.Results+'/Hybrid/'+UserSettings.Run+'/temp/:/home/multiqc" \
christophevde/ubuntu_bash:v2.2_stable \
/bin/bash -c "cp -rn /home/fastqc/* /home/multiqc"')
#EXECUTE MULTIQC-------------------------------------------------------------------------------------
os.system('docker run -it --rm \
--name multiqc_raw \
-v "'+ConvertedPaths.Scripts+':/home/Scripts/" \
-v "'+ConvertedPaths.Results+':/home/Pipeline/" \
christophevde/multiqc:v2.2_stable \
/bin/bash -c "dos2unix -q /home/Scripts/Short_read/QC01_MultiQC_Raw_FullRun.sh \
&& /home/Scripts/Short_read/QC01_MultiQC_Raw_FullRun.sh '+UserSettings.Run+'"')
if not my_file.is_file():
errors.append("[ERROR] STEP 2: MultiQC; quality control rawdata (short reads)")
error_count +=1
else:
print(" - MultiQC results for the full run (rawdata) already exists")
#REMOVE TEMP FOLDER----------------------------------------------------------------------------------
os.system('docker run -it --rm\
--name delete_temp_folder\
-v "'+UserSettings.Results+'/Hybrid/'+UserSettings.Run+':/home/multiqc" \
christophevde/ubuntu_bash:v2.2_stable \
/bin/bash -c "rm -R /home/multiqc/temp"')
#EACH SAMPLE SEPARALTY-----------------------------------------------------------------------------------
for sample in ids:
my_file = Path(UserSettings.Results+"/Hybrid/"+UserSettings.Run+"/01_Short_reads/"+sample+"/01_QC-Rawdata/QC_MultiQC/multiqc_report.html")
if not my_file.is_file():
os.system('docker run -it --rm \
--name multiqc_raw \
-v "'+ConvertedPaths.Scripts+':/home/Scripts/" \
-v "'+ConvertedPaths.Results+':/home/Pipeline/" \
christophevde/multiqc:v2.2_stable \
/bin/bash -c "dos2unix -q /home/Scripts/Short_read/QC01_MultiQC_Raw_oneSample.sh \
&& /home/Scripts/Short_read/QC01_MultiQC_Raw_oneSample.sh '+sample+' '+UserSettings.Run+'"')
if not my_file.is_file():
errors.append("[ERROR] STEP 2: MultiQC; quality control rawdata (short reads)")
error_count +=1
else:
print(" - MultiQC results for the rawdata of sample "+sample+" already exists")
print("Done")
#SHORT READS: TRIMMING (DOCKER)------------------------------------------------------------------------------
print("\n[HYBRID][SHORT READS] Trimming")
for sample in ids:
my_file1 = Path(UserSettings.Results+"/Hybrid/"+UserSettings.Run+"/01_Short_reads/"+sample+"/02_Trimmomatic/"+sample+"_L001_R1_001_P.fastq.gz")
my_file2 = Path(UserSettings.Results+"/Hybrid/"+UserSettings.Run+"/01_Short_reads/"+sample+"/02_Trimmomatic/"+sample+"_L001_R2_001_P.fastq.gz")
if not my_file1.is_file() | |
<filename>tests/composition/test_composition.py
import functools
import logging
from timeit import timeit
import numpy as np
import pytest
from psyneulink.components.functions.function import Linear, SimpleIntegrator
from psyneulink.components.mechanisms.processing import integratormechanism
from psyneulink.components.mechanisms.processing.transfermechanism import TransferMechanism
from psyneulink.components.projections.pathway.mappingprojection import MappingProjection
from psyneulink.composition import Composition, CompositionError, MechanismRole
from psyneulink.scheduling.condition import EveryNCalls
from psyneulink.scheduling.scheduler import Scheduler
from psyneulink.scheduling.time import TimeScale
logger = logging.getLogger(__name__)
# All tests are set to run. If you need to skip certain tests,
# see http://doc.pytest.org/en/latest/skipping.html
# Unit tests for each function of the Composition class #######################
# Unit tests for Composition.Composition()
@pytest.mark.skip
class TestConstructor:
def test_no_args(self):
comp = Composition()
assert isinstance(comp, Composition)
def test_two_calls_no_args(self):
comp = Composition()
assert isinstance(comp, Composition)
comp_2 = Composition()
assert isinstance(comp, Composition)
@pytest.mark.stress
@pytest.mark.parametrize(
'count', [
10000,
]
)
def test_timing_no_args(self, count):
t = timeit('comp = Composition()', setup='from psyneulink.composition import Composition', number=count)
print()
logger.info('completed {0} creation{2} of Composition() in {1:.8f}s'.format(count, t, 's' if count != 1 else ''))
# Unit tests for Composition.add_mechanism
@pytest.mark.skip
class TestAddMechanism:
def test_add_once(self):
comp = Composition()
comp.add_mechanism(TransferMechanism())
def test_add_twice(self):
comp = Composition()
comp.add_mechanism(TransferMechanism())
comp.add_mechanism(TransferMechanism())
def test_add_same_twice(self):
comp = Composition()
mech = TransferMechanism()
comp.add_mechanism(mech)
comp.add_mechanism(mech)
@pytest.mark.stress
@pytest.mark.parametrize(
'count', [
100,
]
)
def test_timing_stress(self, count):
t = timeit(
'comp.add_mechanism(TransferMechanism())',
setup='''
<<<<<<< HEAD
from psyneulink.components.mechanisms.Mechanism import mechanism
from PsyNeuLink.Composition import Composition
=======
from psyNeuLink.components.mechanisms.processing.transfermechanism import TransferMechanism
from psyneulink.composition import Composition
>>>>>>> devel
comp = Composition()
''',
number=count
)
print()
logger.info('completed {0} addition{2} of a Mechanism to a Composition in {1:.8f}s'.
format(count, t, 's' if count != 1 else ''))
# Unit tests for Composition.add_projection
@pytest.mark.skip
class TestAddProjection:
def test_add_once(self):
comp = Composition()
A = TransferMechanism(name='A')
B = TransferMechanism(name='B')
comp.add_mechanism(A)
comp.add_mechanism(B)
comp.add_projection(A, MappingProjection(), B)
def test_add_twice(self):
comp = Composition()
A = TransferMechanism(name='A')
B = TransferMechanism(name='B')
comp.add_mechanism(A)
comp.add_mechanism(B)
comp.add_projection(A, MappingProjection(), B)
comp.add_projection(A, MappingProjection(), B)
def test_add_same_twice(self):
comp = Composition()
A = TransferMechanism(name='A')
B = TransferMechanism(name='B')
comp.add_mechanism(A)
comp.add_mechanism(B)
proj = MappingProjection()
comp.add_projection(A, proj, B)
comp.add_projection(A, proj, B)
@pytest.mark.stress
@pytest.mark.parametrize(
'count', [
1000,
]
)
def test_timing_stress(self, count):
t = timeit('comp.add_projection(A, MappingProjection(), B)',
setup='''
<<<<<<< HEAD
from psyneulink.components.mechanisms.ProcessingMechanisms.TransferMechanism import TransferMechanism
from psyneulink.components.Projections.PathwayProjections.MappingProjection import MappingProjection
from PsyNeuLink.Composition import Composition
=======
from psyneulink.components.mechanisms.processingmechanisms.transfermechanism import TransferMechanism
from psyneulink.components.projections.pathwayprojections.mappingprojection import MappingProjection
from psyneulink.composition import Composition
>>>>>>> devel
comp = Composition()
A = TransferMechanism(name='A')
B = TransferMechanism(name='B')
comp.add_mechanism(A)
comp.add_mechanism(B)
''',
number=count
)
print()
logger.info('completed {0} addition{2} of a projection to a composition in {1:.8f}s'.format(count, t, 's' if count != 1 else ''))
@pytest.mark.skip
class TestAnalyzeGraph:
def test_empty_call(self):
comp = Composition()
comp._analyze_graph()
def test_singleton(self):
comp = Composition()
A = TransferMechanism(name='A')
comp.add_mechanism(A)
comp._analyze_graph()
assert A in comp.get_mechanisms_by_role(MechanismRole.ORIGIN)
assert A in comp.get_mechanisms_by_role(MechanismRole.TERMINAL)
def test_two_independent(self):
comp = Composition()
A = TransferMechanism(name='A')
B = TransferMechanism(name='B')
comp.add_mechanism(A)
comp.add_mechanism(B)
comp._analyze_graph()
assert A in comp.get_mechanisms_by_role(MechanismRole.ORIGIN)
assert B in comp.get_mechanisms_by_role(MechanismRole.ORIGIN)
assert A in comp.get_mechanisms_by_role(MechanismRole.TERMINAL)
assert B in comp.get_mechanisms_by_role(MechanismRole.TERMINAL)
def test_two_in_a_row(self):
comp = Composition()
A = TransferMechanism(name='A')
B = TransferMechanism(name='B')
comp.add_mechanism(A)
comp.add_mechanism(B)
comp.add_projection(A, MappingProjection(), B)
comp._analyze_graph()
assert A in comp.get_mechanisms_by_role(MechanismRole.ORIGIN)
assert B not in comp.get_mechanisms_by_role(MechanismRole.ORIGIN)
assert A not in comp.get_mechanisms_by_role(MechanismRole.TERMINAL)
assert B in comp.get_mechanisms_by_role(MechanismRole.TERMINAL)
# (A)<->(B)
def test_two_recursive(self):
comp = Composition()
A = TransferMechanism(name='A')
B = TransferMechanism(name='B')
comp.add_mechanism(A)
comp.add_mechanism(B)
comp.add_projection(A, MappingProjection(), B)
comp.add_projection(B, MappingProjection(), A)
comp._analyze_graph()
assert A not in comp.get_mechanisms_by_role(MechanismRole.ORIGIN)
assert B not in comp.get_mechanisms_by_role(MechanismRole.ORIGIN)
assert A not in comp.get_mechanisms_by_role(MechanismRole.TERMINAL)
assert B not in comp.get_mechanisms_by_role(MechanismRole.TERMINAL)
assert A in comp.get_mechanisms_by_role(MechanismRole.CYCLE)
assert B in comp.get_mechanisms_by_role(MechanismRole.RECURRENT_INIT)
# (A)->(B)<->(C)<-(D)
def test_two_origins_pointing_to_recursive_pair(self):
comp = Composition()
A = TransferMechanism(name='A')
B = TransferMechanism(name='B')
C = TransferMechanism(name='C')
D = TransferMechanism(name='D')
comp.add_mechanism(A)
comp.add_mechanism(B)
comp.add_mechanism(C)
comp.add_mechanism(D)
comp.add_projection(A, MappingProjection(), B)
comp.add_projection(C, MappingProjection(), B)
comp.add_projection(B, MappingProjection(), C)
comp.add_projection(D, MappingProjection(), C)
comp._analyze_graph()
assert A in comp.get_mechanisms_by_role(MechanismRole.ORIGIN)
assert D in comp.get_mechanisms_by_role(MechanismRole.ORIGIN)
assert B in comp.get_mechanisms_by_role(MechanismRole.CYCLE)
assert C in comp.get_mechanisms_by_role(MechanismRole.RECURRENT_INIT)
@pytest.mark.skip
class TestValidateFeedDict:
def test_empty_feed_dicts(self):
comp = Composition()
A = TransferMechanism(name='A')
B = TransferMechanism(name='B')
comp.add_mechanism(A)
comp.add_mechanism(B)
comp.add_projection(A, MappingProjection(), B)
comp._analyze_graph()
feed_dict_origin = {}
feed_dict_terminal = {}
comp._validate_feed_dict(feed_dict_origin, comp.get_mechanisms_by_role(MechanismRole.ORIGIN), "origin")
comp._validate_feed_dict(feed_dict_terminal, comp.get_mechanisms_by_role(MechanismRole.TERMINAL), "terminal")
def test_origin_and_terminal_with_mapping(self):
comp = Composition()
A = TransferMechanism(name='A')
B = TransferMechanism(name='B')
comp.add_mechanism(A)
comp.add_mechanism(B)
comp.add_projection(A, MappingProjection(), B)
comp._analyze_graph()
feed_dict_origin = {A: [[0]]}
feed_dict_terminal = {B: [[0]]}
comp._validate_feed_dict(feed_dict_origin, comp.get_mechanisms_by_role(MechanismRole.ORIGIN), "origin")
comp._validate_feed_dict(feed_dict_terminal, comp.get_mechanisms_by_role(MechanismRole.TERMINAL), "terminal")
def test_origin_and_terminal_with_swapped_feed_dicts_1(self):
comp = Composition()
A = TransferMechanism(name='A')
B = TransferMechanism(name='B')
comp.add_mechanism(A)
comp.add_mechanism(B)
comp.add_projection(A, MappingProjection(), B)
comp._analyze_graph()
feed_dict_origin = {B: [[0]]}
feed_dict_terminal = {A: [[0]]}
with pytest.raises(ValueError):
comp._validate_feed_dict(feed_dict_origin, comp.get_mechanisms_by_role(MechanismRole.ORIGIN), "origin")
def test_origin_and_terminal_with_swapped_feed_dicts_2(self):
comp = Composition()
A = TransferMechanism(name='A')
B = TransferMechanism(name='B')
comp.add_mechanism(A)
comp.add_mechanism(B)
comp.add_projection(A, MappingProjection(), B)
comp._analyze_graph()
feed_dict_origin = {B: [[0]]}
feed_dict_terminal = {A: [[0]]}
with pytest.raises(ValueError):
comp._validate_feed_dict(feed_dict_terminal, comp.get_mechanisms_by_role(MechanismRole.TERMINAL), "terminal")
def test_multiple_origin_mechs(self):
comp = Composition()
A = TransferMechanism(name='A')
B = TransferMechanism(name='B')
C = TransferMechanism(name='C')
comp.add_mechanism(A)
comp.add_mechanism(B)
comp.add_mechanism(C)
comp.add_projection(A, MappingProjection(), C)
comp.add_projection(B, MappingProjection(), C)
comp._analyze_graph()
feed_dict_origin = {A: [[0]], B: [[0]]}
feed_dict_terminal = {C: [[0]]}
comp._validate_feed_dict(feed_dict_origin, comp.get_mechanisms_by_role(MechanismRole.ORIGIN), "origin")
comp._validate_feed_dict(feed_dict_terminal, comp.get_mechanisms_by_role(MechanismRole.TERMINAL), "terminal")
def test_multiple_origin_mechs_only_one_in_feed_dict(self):
comp = Composition()
A = TransferMechanism(name='A')
B = TransferMechanism(name='B')
C = TransferMechanism(name='C')
comp.add_mechanism(A)
comp.add_mechanism(B)
comp.add_mechanism(C)
comp.add_projection(A, MappingProjection(), C)
comp.add_projection(B, MappingProjection(), C)
comp._analyze_graph()
feed_dict_origin = {B: [[0]]}
feed_dict_terminal = {C: [[0]]}
comp._validate_feed_dict(feed_dict_origin, comp.get_mechanisms_by_role(MechanismRole.ORIGIN), "origin")
comp._validate_feed_dict(feed_dict_terminal, comp.get_mechanisms_by_role(MechanismRole.TERMINAL), "terminal")
def test_input_state_len_3(self):
comp = Composition()
A = TransferMechanism(default_variable=[0, 1, 2], name='A')
B = TransferMechanism(default_variable=[0, 1, 2], name='B')
comp.add_mechanism(A)
comp.add_mechanism(B)
comp.add_projection(A, MappingProjection(), B)
comp._analyze_graph()
feed_dict_origin = {A: [[0, 1, 2]]}
feed_dict_terminal = {B: [[0, 1, 2]]}
comp._validate_feed_dict(feed_dict_origin, comp.get_mechanisms_by_role(MechanismRole.ORIGIN), "origin")
comp._validate_feed_dict(feed_dict_terminal, comp.get_mechanisms_by_role(MechanismRole.TERMINAL), "terminal")
def test_input_state_len_3_feed_dict_len_2(self):
comp = Composition()
A = TransferMechanism(default_variable=[0, 1, 2], name='A')
B = TransferMechanism(default_variable=[0, 1, 2], name='B')
comp.add_mechanism(A)
comp.add_mechanism(B)
comp.add_projection(A, MappingProjection(), B)
comp._analyze_graph()
feed_dict_origin = {A: [[0, 1]]}
feed_dict_terminal = {B: [[0]]}
with pytest.raises(ValueError):
comp._validate_feed_dict(feed_dict_origin, comp.get_mechanisms_by_role(MechanismRole.ORIGIN), "origin")
def test_input_state_len_2_feed_dict_len_3(self):
comp = Composition()
A = TransferMechanism(default_variable=[0, 1], name='A')
B = TransferMechanism(default_variable=[0, 1], name='B')
comp.add_mechanism(A)
comp.add_mechanism(B)
comp.add_projection(A, MappingProjection(), B)
comp._analyze_graph()
feed_dict_origin = {A: [[0, 1, 2]]}
feed_dict_terminal = {B: [[0]]}
with pytest.raises(ValueError):
comp._validate_feed_dict(feed_dict_origin, comp.get_mechanisms_by_role(MechanismRole.ORIGIN), "origin")
def test_feed_dict_includes_mechs_of_correct_and_incorrect_types(self):
comp = Composition()
A = TransferMechanism(default_variable=[0], name='A')
B = TransferMechanism(default_variable=[0], name='B')
comp.add_mechanism(A)
comp.add_mechanism(B)
comp.add_projection(A, MappingProjection(), B)
comp._analyze_graph()
feed_dict_origin = {A: [[0]], B: [[0]]}
with pytest.raises(ValueError):
comp._validate_feed_dict(feed_dict_origin, comp.get_mechanisms_by_role(MechanismRole.ORIGIN), "origin")
def test_input_state_len_3_brackets_extra_1(self):
comp = Composition()
A = TransferMechanism(default_variable=[0, 1, 2], name='A')
B = TransferMechanism(default_variable=[0, 1, 2], name='B')
comp.add_mechanism(A)
comp.add_mechanism(B)
comp.add_projection(A, MappingProjection(), B)
comp._analyze_graph()
feed_dict_origin = {A: [[[0, 1, 2]]]}
feed_dict_terminal = {B: [[[0, 1, 2]]]}
comp._validate_feed_dict(feed_dict_origin, comp.get_mechanisms_by_role(MechanismRole.ORIGIN), "origin")
comp._validate_feed_dict(feed_dict_terminal, comp.get_mechanisms_by_role(MechanismRole.TERMINAL), "terminal")
def test_input_state_len_3_brackets_missing_1(self):
comp = Composition()
A = TransferMechanism(default_variable=[0, 1, 2], name='A')
B = TransferMechanism(default_variable=[0, 1, 2], name='B')
comp.add_mechanism(A)
comp.add_mechanism(B)
comp.add_projection(A, MappingProjection(), B)
comp._analyze_graph()
feed_dict_origin = {A: [0, 1, 2]}
feed_dict_terminal = {B: [[0]]}
with pytest.raises(TypeError):
comp._validate_feed_dict(feed_dict_origin, comp.get_mechanisms_by_role(MechanismRole.ORIGIN), "origin")
def test_empty_feed_dict_for_empty_type(self):
comp = Composition()
A = TransferMechanism(default_variable=[0], name='A')
B = TransferMechanism(default_variable=[0], name='B')
comp.add_mechanism(A)
comp.add_mechanism(B)
comp.add_projection(A, MappingProjection(), B)
comp._analyze_graph()
feed_dict_origin = {A: [[0]]}
feed_dict_monitored = {}
comp._validate_feed_dict(feed_dict_monitored, comp.get_mechanisms_by_role(MechanismRole.MONITORED), "monitored")
def test_mech_in_feed_dict_for_empty_type(self):
comp = Composition()
A = TransferMechanism(default_variable=[0])
B = TransferMechanism(name='B')
comp.add_mechanism(A)
comp.add_mechanism(B)
comp.add_projection(A, MappingProjection(), B)
comp._analyze_graph()
feed_dict_origin = {A: [[0]]}
feed_dict_monitored = {B: [[0]]}
with pytest.raises(ValueError):
comp._validate_feed_dict(feed_dict_monitored, comp.get_mechanisms_by_role(MechanismRole.MONITORED), "monitored")
def test_one_mech_1(self):
comp = Composition()
A = TransferMechanism(default_variable=[0])
comp.add_mechanism(A)
comp._analyze_graph()
feed_dict_origin = {A: [[0]]}
feed_dict_terminal = {A: [[0]]}
comp._validate_feed_dict(feed_dict_origin, comp.get_mechanisms_by_role(MechanismRole.ORIGIN), "origin")
def test_one_mech_2(self):
comp = Composition()
A = TransferMechanism(default_variable=[0])
comp.add_mechanism(A)
comp._analyze_graph()
feed_dict_origin = {A: [[0]]}
feed_dict_terminal = {A: [[0]]}
comp._validate_feed_dict(feed_dict_terminal, comp.get_mechanisms_by_role(MechanismRole.TERMINAL), "terminal")
def test_multiple_time_steps_1(self):
comp = Composition()
A = TransferMechanism(default_variable=[[0, 1, 2]], name='A')
B = TransferMechanism(default_variable=[[0, 1, 2]], name='B')
comp.add_mechanism(A)
comp.add_mechanism(B)
comp.add_projection(A, MappingProjection(), B)
comp._analyze_graph()
feed_dict_origin = {A: [[0, 1, 2], [0, 1, 2]]}
feed_dict_terminal = {B: [[0, 1, 2]]}
comp._validate_feed_dict(feed_dict_origin, comp.get_mechanisms_by_role(MechanismRole.ORIGIN), "origin")
comp._validate_feed_dict(feed_dict_terminal, comp.get_mechanisms_by_role(MechanismRole.TERMINAL), "terminal")
def test_multiple_time_steps_2(self):
comp = Composition()
A = TransferMechanism(default_variable=[[0, 1, 2]], name='A')
B = TransferMechanism(default_variable=[[0, 1, 2]], name='B')
comp.add_mechanism(A)
comp.add_mechanism(B)
comp.add_projection(A, MappingProjection(), B)
comp._analyze_graph()
feed_dict_origin = {A: [[[0, 1, 2]], [[0, 1, 2]]]}
feed_dict_terminal = {B: [[0, 1, 2]]}
comp._validate_feed_dict(feed_dict_origin, comp.get_mechanisms_by_role(MechanismRole.ORIGIN), "origin")
comp._validate_feed_dict(feed_dict_terminal, comp.get_mechanisms_by_role(MechanismRole.TERMINAL), "terminal")
@pytest.mark.skip
class TestGetMechanismsByRole:
def test_multiple_roles(self):
comp = Composition()
mechs = [TransferMechanism() for x in range(4)]
for mech in mechs:
comp.add_mechanism(mech)
comp._add_mechanism_role(mechs[0], MechanismRole.ORIGIN)
comp._add_mechanism_role(mechs[1], MechanismRole.INTERNAL)
comp._add_mechanism_role(mechs[2], MechanismRole.INTERNAL)
comp._add_mechanism_role(mechs[3], MechanismRole.CYCLE)
for role in list(MechanismRole):
if role is MechanismRole.ORIGIN:
assert comp.get_mechanisms_by_role(role) == {mechs[0]}
elif role is MechanismRole.INTERNAL:
assert comp.get_mechanisms_by_role(role) == set([mechs[1], mechs[2]])
elif role is MechanismRole.CYCLE:
assert comp.get_mechanisms_by_role(role) == {mechs[3]}
else:
assert comp.get_mechanisms_by_role(role) == set()
def test_nonexistent_role(self):
comp = Composition()
with pytest.raises(CompositionError):
comp.get_mechanisms_by_role(None)
@pytest.mark.skip
class TestGraph:
@pytest.mark.skip
class TestProcessingGraph:
def test_all_mechanisms(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='A')
B = TransferMechanism(function=Linear(intercept=4.0), name='B')
C = TransferMechanism(function=Linear(intercept=1.5), name='C')
mechs = [A, B, C]
for m in mechs:
comp.add_mechanism(m)
assert len(comp.graph_processing.vertices) == 3
assert len(comp.graph_processing.comp_to_vertex) == 3
for m in mechs:
assert m in comp.graph_processing.comp_to_vertex
assert comp.graph_processing.get_parents_from_component(A) == []
assert comp.graph_processing.get_parents_from_component(B) == []
assert comp.graph_processing.get_parents_from_component(C) == []
assert comp.graph_processing.get_children_from_component(A) == []
assert comp.graph_processing.get_children_from_component(B) == []
assert comp.graph_processing.get_children_from_component(C) == []
def test_triangle(self):
| |
= self.RECURSIVE
self.NONSYSTEMATIC = RM_Field_FRC_CONVGENERATOR_NONSYSTEMATIC(self)
self.zz_fdict['NONSYSTEMATIC'] = self.NONSYSTEMATIC
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_PUNCTCTRL(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_PUNCTCTRL, self).__init__(rmio, label,
0xa8004000, 0x060,
'PUNCTCTRL', 'FRC.PUNCTCTRL', 'read-write',
u"",
0x00000101, 0x00007F7F,
0x00001000, 0x00002000,
0x00003000)
self.PUNCT0 = RM_Field_FRC_PUNCTCTRL_PUNCT0(self)
self.zz_fdict['PUNCT0'] = self.PUNCT0
self.PUNCT1 = RM_Field_FRC_PUNCTCTRL_PUNCT1(self)
self.zz_fdict['PUNCT1'] = self.PUNCT1
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_PAUSECTRL(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_PAUSECTRL, self).__init__(rmio, label,
0xa8004000, 0x064,
'PAUSECTRL', 'FRC.PAUSECTRL', 'read-write',
u"",
0x00000000, 0x001FFFFF,
0x00001000, 0x00002000,
0x00003000)
self.FRAMEDETPAUSEEN = RM_Field_FRC_PAUSECTRL_FRAMEDETPAUSEEN(self)
self.zz_fdict['FRAMEDETPAUSEEN'] = self.FRAMEDETPAUSEEN
self.TXINTERLEAVEWRITEPAUSEEN = RM_Field_FRC_PAUSECTRL_TXINTERLEAVEWRITEPAUSEEN(self)
self.zz_fdict['TXINTERLEAVEWRITEPAUSEEN'] = self.TXINTERLEAVEWRITEPAUSEEN
self.RXINTERLEAVEWRITEPAUSEEN = RM_Field_FRC_PAUSECTRL_RXINTERLEAVEWRITEPAUSEEN(self)
self.zz_fdict['RXINTERLEAVEWRITEPAUSEEN'] = self.RXINTERLEAVEWRITEPAUSEEN
self.INTERLEAVEREADPAUSEEN = RM_Field_FRC_PAUSECTRL_INTERLEAVEREADPAUSEEN(self)
self.zz_fdict['INTERLEAVEREADPAUSEEN'] = self.INTERLEAVEREADPAUSEEN
self.TXSUBFRAMEPAUSEEN = RM_Field_FRC_PAUSECTRL_TXSUBFRAMEPAUSEEN(self)
self.zz_fdict['TXSUBFRAMEPAUSEEN'] = self.TXSUBFRAMEPAUSEEN
self.CONVPAUSECNT = RM_Field_FRC_PAUSECTRL_CONVPAUSECNT(self)
self.zz_fdict['CONVPAUSECNT'] = self.CONVPAUSECNT
self.INTERLEAVEWRITEPAUSECNT = RM_Field_FRC_PAUSECTRL_INTERLEAVEWRITEPAUSECNT(self)
self.zz_fdict['INTERLEAVEWRITEPAUSECNT'] = self.INTERLEAVEWRITEPAUSECNT
self.INTERLEAVEREADPAUSECNT = RM_Field_FRC_PAUSECTRL_INTERLEAVEREADPAUSECNT(self)
self.zz_fdict['INTERLEAVEREADPAUSECNT'] = self.INTERLEAVEREADPAUSECNT
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_IF(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_IF, self).__init__(rmio, label,
0xa8004000, 0x068,
'IF', 'FRC.IF', 'read-write',
u"",
0x00000000, 0x7F7FFFFF,
0x00001000, 0x00002000,
0x00003000)
self.TXDONE = RM_Field_FRC_IF_TXDONE(self)
self.zz_fdict['TXDONE'] = self.TXDONE
self.TXAFTERFRAMEDONE = RM_Field_FRC_IF_TXAFTERFRAMEDONE(self)
self.zz_fdict['TXAFTERFRAMEDONE'] = self.TXAFTERFRAMEDONE
self.TXABORTED = RM_Field_FRC_IF_TXABORTED(self)
self.zz_fdict['TXABORTED'] = self.TXABORTED
self.TXUF = RM_Field_FRC_IF_TXUF(self)
self.zz_fdict['TXUF'] = self.TXUF
self.RXDONE = RM_Field_FRC_IF_RXDONE(self)
self.zz_fdict['RXDONE'] = self.RXDONE
self.RXABORTED = RM_Field_FRC_IF_RXABORTED(self)
self.zz_fdict['RXABORTED'] = self.RXABORTED
self.FRAMEERROR = RM_Field_FRC_IF_FRAMEERROR(self)
self.zz_fdict['FRAMEERROR'] = self.FRAMEERROR
self.BLOCKERROR = RM_Field_FRC_IF_BLOCKERROR(self)
self.zz_fdict['BLOCKERROR'] = self.BLOCKERROR
self.RXOF = RM_Field_FRC_IF_RXOF(self)
self.zz_fdict['RXOF'] = self.RXOF
self.WCNTCMP0 = RM_Field_FRC_IF_WCNTCMP0(self)
self.zz_fdict['WCNTCMP0'] = self.WCNTCMP0
self.WCNTCMP1 = RM_Field_FRC_IF_WCNTCMP1(self)
self.zz_fdict['WCNTCMP1'] = self.WCNTCMP1
self.WCNTCMP2 = RM_Field_FRC_IF_WCNTCMP2(self)
self.zz_fdict['WCNTCMP2'] = self.WCNTCMP2
self.ADDRERROR = RM_Field_FRC_IF_ADDRERROR(self)
self.zz_fdict['ADDRERROR'] = self.ADDRERROR
self.BUSERROR = RM_Field_FRC_IF_BUSERROR(self)
self.zz_fdict['BUSERROR'] = self.BUSERROR
self.RXRAWEVENT = RM_Field_FRC_IF_RXRAWEVENT(self)
self.zz_fdict['RXRAWEVENT'] = self.RXRAWEVENT
self.TXRAWEVENT = RM_Field_FRC_IF_TXRAWEVENT(self)
self.zz_fdict['TXRAWEVENT'] = self.TXRAWEVENT
self.SNIFFOF = RM_Field_FRC_IF_SNIFFOF(self)
self.zz_fdict['SNIFFOF'] = self.SNIFFOF
self.WCNTCMP3 = RM_Field_FRC_IF_WCNTCMP3(self)
self.zz_fdict['WCNTCMP3'] = self.WCNTCMP3
self.WCNTCMP4 = RM_Field_FRC_IF_WCNTCMP4(self)
self.zz_fdict['WCNTCMP4'] = self.WCNTCMP4
self.BOISET = RM_Field_FRC_IF_BOISET(self)
self.zz_fdict['BOISET'] = self.BOISET
self.PKTBUFSTART = RM_Field_FRC_IF_PKTBUFSTART(self)
self.zz_fdict['PKTBUFSTART'] = self.PKTBUFSTART
self.PKTBUFTHRESHOLD = RM_Field_FRC_IF_PKTBUFTHRESHOLD(self)
self.zz_fdict['PKTBUFTHRESHOLD'] = self.PKTBUFTHRESHOLD
self.RXRAWOF = RM_Field_FRC_IF_RXRAWOF(self)
self.zz_fdict['RXRAWOF'] = self.RXRAWOF
self.FRAMEDETPAUSED = RM_Field_FRC_IF_FRAMEDETPAUSED(self)
self.zz_fdict['FRAMEDETPAUSED'] = self.FRAMEDETPAUSED
self.INTERLEAVEWRITEPAUSED = RM_Field_FRC_IF_INTERLEAVEWRITEPAUSED(self)
self.zz_fdict['INTERLEAVEWRITEPAUSED'] = self.INTERLEAVEWRITEPAUSED
self.INTERLEAVEREADPAUSED = RM_Field_FRC_IF_INTERLEAVEREADPAUSED(self)
self.zz_fdict['INTERLEAVEREADPAUSED'] = self.INTERLEAVEREADPAUSED
self.TXSUBFRAMEPAUSED = RM_Field_FRC_IF_TXSUBFRAMEPAUSED(self)
self.zz_fdict['TXSUBFRAMEPAUSED'] = self.TXSUBFRAMEPAUSED
self.CONVPAUSED = RM_Field_FRC_IF_CONVPAUSED(self)
self.zz_fdict['CONVPAUSED'] = self.CONVPAUSED
self.RXWORD = RM_Field_FRC_IF_RXWORD(self)
self.zz_fdict['RXWORD'] = self.RXWORD
self.TXWORD = RM_Field_FRC_IF_TXWORD(self)
self.zz_fdict['TXWORD'] = self.TXWORD
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_IEN(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_IEN, self).__init__(rmio, label,
0xa8004000, 0x06C,
'IEN', 'FRC.IEN', 'read-write',
u"",
0x00000000, 0x7F7FFFFF,
0x00001000, 0x00002000,
0x00003000)
self.TXDONE = RM_Field_FRC_IEN_TXDONE(self)
self.zz_fdict['TXDONE'] = self.TXDONE
self.TXAFTERFRAMEDONE = RM_Field_FRC_IEN_TXAFTERFRAMEDONE(self)
self.zz_fdict['TXAFTERFRAMEDONE'] = self.TXAFTERFRAMEDONE
self.TXABORTED = RM_Field_FRC_IEN_TXABORTED(self)
self.zz_fdict['TXABORTED'] = self.TXABORTED
self.TXUF = RM_Field_FRC_IEN_TXUF(self)
self.zz_fdict['TXUF'] = self.TXUF
self.RXDONE = RM_Field_FRC_IEN_RXDONE(self)
self.zz_fdict['RXDONE'] = self.RXDONE
self.RXABORTED = RM_Field_FRC_IEN_RXABORTED(self)
self.zz_fdict['RXABORTED'] = self.RXABORTED
self.FRAMEERROR = RM_Field_FRC_IEN_FRAMEERROR(self)
self.zz_fdict['FRAMEERROR'] = self.FRAMEERROR
self.BLOCKERROR = RM_Field_FRC_IEN_BLOCKERROR(self)
self.zz_fdict['BLOCKERROR'] = self.BLOCKERROR
self.RXOF = RM_Field_FRC_IEN_RXOF(self)
self.zz_fdict['RXOF'] = self.RXOF
self.WCNTCMP0 = RM_Field_FRC_IEN_WCNTCMP0(self)
self.zz_fdict['WCNTCMP0'] = self.WCNTCMP0
self.WCNTCMP1 = RM_Field_FRC_IEN_WCNTCMP1(self)
self.zz_fdict['WCNTCMP1'] = self.WCNTCMP1
self.WCNTCMP2 = RM_Field_FRC_IEN_WCNTCMP2(self)
self.zz_fdict['WCNTCMP2'] = self.WCNTCMP2
self.ADDRERROR = RM_Field_FRC_IEN_ADDRERROR(self)
self.zz_fdict['ADDRERROR'] = self.ADDRERROR
self.BUSERROR = RM_Field_FRC_IEN_BUSERROR(self)
self.zz_fdict['BUSERROR'] = self.BUSERROR
self.RXRAWEVENT = RM_Field_FRC_IEN_RXRAWEVENT(self)
self.zz_fdict['RXRAWEVENT'] = self.RXRAWEVENT
self.TXRAWEVENT = RM_Field_FRC_IEN_TXRAWEVENT(self)
self.zz_fdict['TXRAWEVENT'] = self.TXRAWEVENT
self.SNIFFOF = RM_Field_FRC_IEN_SNIFFOF(self)
self.zz_fdict['SNIFFOF'] = self.SNIFFOF
self.WCNTCMP3 = RM_Field_FRC_IEN_WCNTCMP3(self)
self.zz_fdict['WCNTCMP3'] = self.WCNTCMP3
self.WCNTCMP4 = RM_Field_FRC_IEN_WCNTCMP4(self)
self.zz_fdict['WCNTCMP4'] = self.WCNTCMP4
self.BOISET = RM_Field_FRC_IEN_BOISET(self)
self.zz_fdict['BOISET'] = self.BOISET
self.PKTBUFSTART = RM_Field_FRC_IEN_PKTBUFSTART(self)
self.zz_fdict['PKTBUFSTART'] = self.PKTBUFSTART
self.PKTBUFTHRESHOLD = RM_Field_FRC_IEN_PKTBUFTHRESHOLD(self)
self.zz_fdict['PKTBUFTHRESHOLD'] = self.PKTBUFTHRESHOLD
self.RXRAWOF = RM_Field_FRC_IEN_RXRAWOF(self)
self.zz_fdict['RXRAWOF'] = self.RXRAWOF
self.FRAMEDETPAUSED = RM_Field_FRC_IEN_FRAMEDETPAUSED(self)
self.zz_fdict['FRAMEDETPAUSED'] = self.FRAMEDETPAUSED
self.INTERLEAVEWRITEPAUSED = RM_Field_FRC_IEN_INTERLEAVEWRITEPAUSED(self)
self.zz_fdict['INTERLEAVEWRITEPAUSED'] = self.INTERLEAVEWRITEPAUSED
self.INTERLEAVEREADPAUSED = RM_Field_FRC_IEN_INTERLEAVEREADPAUSED(self)
self.zz_fdict['INTERLEAVEREADPAUSED'] = self.INTERLEAVEREADPAUSED
self.TXSUBFRAMEPAUSED = RM_Field_FRC_IEN_TXSUBFRAMEPAUSED(self)
self.zz_fdict['TXSUBFRAMEPAUSED'] = self.TXSUBFRAMEPAUSED
self.CONVPAUSED = RM_Field_FRC_IEN_CONVPAUSED(self)
self.zz_fdict['CONVPAUSED'] = self.CONVPAUSED
self.RXWORD = RM_Field_FRC_IEN_RXWORD(self)
self.zz_fdict['RXWORD'] = self.RXWORD
self.TXWORD = RM_Field_FRC_IEN_TXWORD(self)
self.zz_fdict['TXWORD'] = self.TXWORD
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_OTACNT(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_OTACNT, self).__init__(rmio, label,
0xa8004000, 0x070,
'OTACNT', 'FRC.OTACNT', 'read-only',
u"",
0x00000000, 0xFFFFFFFF,
0x00001000, 0x00002000,
0x00003000)
self.OTARXCNT = RM_Field_FRC_OTACNT_OTARXCNT(self)
self.zz_fdict['OTARXCNT'] = self.OTARXCNT
self.OTATXCNT = RM_Field_FRC_OTACNT_OTATXCNT(self)
self.zz_fdict['OTATXCNT'] = self.OTATXCNT
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_BUFFERMODE(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_BUFFERMODE, self).__init__(rmio, label,
0xa8004000, 0x078,
'BUFFERMODE', 'FRC.BUFFERMODE', 'read-write',
u"",
0x00000000, 0x0000000F,
0x00001000, 0x00002000,
0x00003000)
self.TXBUFFERMODE = RM_Field_FRC_BUFFERMODE_TXBUFFERMODE(self)
self.zz_fdict['TXBUFFERMODE'] = self.TXBUFFERMODE
self.RXBUFFERMODE = RM_Field_FRC_BUFFERMODE_RXBUFFERMODE(self)
self.zz_fdict['RXBUFFERMODE'] = self.RXBUFFERMODE
self.RXFRCBUFMUX = RM_Field_FRC_BUFFERMODE_RXFRCBUFMUX(self)
self.zz_fdict['RXFRCBUFMUX'] = self.RXFRCBUFMUX
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_SNIFFCTRL(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_SNIFFCTRL, self).__init__(rmio, label,
0xa8004000, 0x084,
'SNIFFCTRL', 'FRC.SNIFFCTRL', 'read-write',
u"",
0x000007FC, 0x0003FFFF,
0x00001000, 0x00002000,
0x00003000)
self.SNIFFMODE = RM_Field_FRC_SNIFFCTRL_SNIFFMODE(self)
self.zz_fdict['SNIFFMODE'] = self.SNIFFMODE
self.SNIFFBITS = RM_Field_FRC_SNIFFCTRL_SNIFFBITS(self)
self.zz_fdict['SNIFFBITS'] = self.SNIFFBITS
self.SNIFFRXDATA = RM_Field_FRC_SNIFFCTRL_SNIFFRXDATA(self)
self.zz_fdict['SNIFFRXDATA'] = self.SNIFFRXDATA
self.SNIFFTXDATA = RM_Field_FRC_SNIFFCTRL_SNIFFTXDATA(self)
self.zz_fdict['SNIFFTXDATA'] = self.SNIFFTXDATA
self.SNIFFRSSI = RM_Field_FRC_SNIFFCTRL_SNIFFRSSI(self)
self.zz_fdict['SNIFFRSSI'] = self.SNIFFRSSI
self.SNIFFSTATE = RM_Field_FRC_SNIFFCTRL_SNIFFSTATE(self)
self.zz_fdict['SNIFFSTATE'] = self.SNIFFSTATE
self.SNIFFAUXDATA = RM_Field_FRC_SNIFFCTRL_SNIFFAUXDATA(self)
self.zz_fdict['SNIFFAUXDATA'] = self.SNIFFAUXDATA
self.SNIFFBR = RM_Field_FRC_SNIFFCTRL_SNIFFBR(self)
self.zz_fdict['SNIFFBR'] = self.SNIFFBR
self.SNIFFSLEEPCTRL = RM_Field_FRC_SNIFFCTRL_SNIFFSLEEPCTRL(self)
self.zz_fdict['SNIFFSLEEPCTRL'] = self.SNIFFSLEEPCTRL
self.SNIFFSYNCWORD = RM_Field_FRC_SNIFFCTRL_SNIFFSYNCWORD(self)
self.zz_fdict['SNIFFSYNCWORD'] = self.SNIFFSYNCWORD
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_AUXDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_AUXDATA, self).__init__(rmio, label,
0xa8004000, 0x088,
'AUXDATA', 'FRC.AUXDATA', 'write-only',
u"",
0x00000000, 0x000001FF,
0x00001000, 0x00002000,
0x00003000)
self.AUXDATA = RM_Field_FRC_AUXDATA_AUXDATA(self)
self.zz_fdict['AUXDATA'] = self.AUXDATA
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_RAWCTRL(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_RAWCTRL, self).__init__(rmio, label,
0xa8004000, 0x08C,
'RAWCTRL', 'FRC.RAWCTRL', 'read-write',
u"",
0x00000000, 0x000021BF,
0x00001000, 0x00002000,
0x00003000)
self.TXRAWMODE = RM_Field_FRC_RAWCTRL_TXRAWMODE(self)
self.zz_fdict['TXRAWMODE'] = self.TXRAWMODE
self.RXRAWMODE = RM_Field_FRC_RAWCTRL_RXRAWMODE(self)
self.zz_fdict['RXRAWMODE'] = self.RXRAWMODE
self.RXRAWRANDOM = RM_Field_FRC_RAWCTRL_RXRAWRANDOM(self)
self.zz_fdict['RXRAWRANDOM'] = self.RXRAWRANDOM
self.RXRAWTRIGGER = RM_Field_FRC_RAWCTRL_RXRAWTRIGGER(self)
self.zz_fdict['RXRAWTRIGGER'] = self.RXRAWTRIGGER
self.DEMODRAWDATAMUX = RM_Field_FRC_RAWCTRL_DEMODRAWDATAMUX(self)
self.zz_fdict['DEMODRAWDATAMUX'] = self.DEMODRAWDATAMUX
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_RXRAWDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_RXRAWDATA, self).__init__(rmio, label,
0xa8004000, 0x090,
'RXRAWDATA', 'FRC.RXRAWDATA', 'read-only',
u"",
0x00000000, 0xFFFFFFFF,
0x00001000, 0x00002000,
0x00003000)
self.RXRAWDATA = RM_Field_FRC_RXRAWDATA_RXRAWDATA(self)
self.zz_fdict['RXRAWDATA'] = self.RXRAWDATA
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_PAUSEDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_PAUSEDATA, self).__init__(rmio, label,
0xa8004000, 0x094,
'PAUSEDATA', 'FRC.PAUSEDATA', 'read-only',
u"",
0x00000000, 0xFFFFFFFF,
0x00001000, 0x00002000,
0x00003000)
self.PAUSEDATA = RM_Field_FRC_PAUSEDATA_PAUSEDATA(self)
self.zz_fdict['PAUSEDATA'] = self.PAUSEDATA
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_LIKELYCONVSTATE(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_LIKELYCONVSTATE, self).__init__(rmio, label,
0xa8004000, 0x098,
'LIKELYCONVSTATE', 'FRC.LIKELYCONVSTATE', 'read-only',
u"",
0x00000000, 0x0000003F,
0x00001000, 0x00002000,
0x00003000)
self.LIKELYCONVSTATE = RM_Field_FRC_LIKELYCONVSTATE_LIKELYCONVSTATE(self)
self.zz_fdict['LIKELYCONVSTATE'] = self.LIKELYCONVSTATE
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_INTELEMENTNEXT(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_INTELEMENTNEXT, self).__init__(rmio, label,
0xa8004000, 0x09C,
'INTELEMENTNEXT', 'FRC.INTELEMENTNEXT', 'read-only',
u"",
0x00000000, 0x000000FF,
0x00001000, 0x00002000,
0x00003000)
self.INTELEMENTNEXT = RM_Field_FRC_INTELEMENTNEXT_INTELEMENTNEXT(self)
self.zz_fdict['INTELEMENTNEXT'] = self.INTELEMENTNEXT
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_INTWRITEPOINT(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_INTWRITEPOINT, self).__init__(rmio, label,
0xa8004000, 0x0A0,
'INTWRITEPOINT', 'FRC.INTWRITEPOINT', 'read-write',
u"",
0x00000000, 0x0000001F,
0x00001000, 0x00002000,
0x00003000)
self.INTWRITEPOINT = RM_Field_FRC_INTWRITEPOINT_INTWRITEPOINT(self)
self.zz_fdict['INTWRITEPOINT'] = self.INTWRITEPOINT
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_INTREADPOINT(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_INTREADPOINT, self).__init__(rmio, label,
0xa8004000, 0x0A4,
'INTREADPOINT', 'FRC.INTREADPOINT', 'read-write',
u"",
0x00000000, 0x0000001F,
0x00001000, 0x00002000,
0x00003000)
self.INTREADPOINT = RM_Field_FRC_INTREADPOINT_INTREADPOINT(self)
self.zz_fdict['INTREADPOINT'] = self.INTREADPOINT
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_AUTOCG(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_AUTOCG, self).__init__(rmio, label,
0xa8004000, 0x0A8,
'AUTOCG', 'FRC.AUTOCG', 'read-write',
u"",
0x00000000, 0x0000FFFF,
0x00001000, 0x00002000,
0x00003000)
self.AUTOCGEN = RM_Field_FRC_AUTOCG_AUTOCGEN(self)
self.zz_fdict['AUTOCGEN'] = self.AUTOCGEN
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_CGCLKSTOP(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_CGCLKSTOP, self).__init__(rmio, label,
0xa8004000, 0x0AC,
'CGCLKSTOP', 'FRC.CGCLKSTOP', 'read-write',
u"",
0x00000000, 0x0000FFFF,
0x00001000, 0x00002000,
0x00003000)
self.FORCEOFF = RM_Field_FRC_CGCLKSTOP_FORCEOFF(self)
self.zz_fdict['FORCEOFF'] = self.FORCEOFF
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_SEQIF(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_SEQIF, self).__init__(rmio, label,
0xa8004000, 0x0B4,
'SEQIF', 'FRC.SEQIF', 'read-write',
u"",
0x00000000, 0x7F7FFFFF,
0x00001000, 0x00002000,
0x00003000)
self.TXDONE = RM_Field_FRC_SEQIF_TXDONE(self)
self.zz_fdict['TXDONE'] = self.TXDONE
self.TXAFTERFRAMEDONE = RM_Field_FRC_SEQIF_TXAFTERFRAMEDONE(self)
self.zz_fdict['TXAFTERFRAMEDONE'] = self.TXAFTERFRAMEDONE
self.TXABORTED = RM_Field_FRC_SEQIF_TXABORTED(self)
self.zz_fdict['TXABORTED'] = self.TXABORTED
self.TXUF = RM_Field_FRC_SEQIF_TXUF(self)
self.zz_fdict['TXUF'] = self.TXUF
self.RXDONE = RM_Field_FRC_SEQIF_RXDONE(self)
self.zz_fdict['RXDONE'] = self.RXDONE
self.RXABORTED = RM_Field_FRC_SEQIF_RXABORTED(self)
self.zz_fdict['RXABORTED'] = self.RXABORTED
self.FRAMEERROR = RM_Field_FRC_SEQIF_FRAMEERROR(self)
self.zz_fdict['FRAMEERROR'] = self.FRAMEERROR
self.BLOCKERROR = RM_Field_FRC_SEQIF_BLOCKERROR(self)
self.zz_fdict['BLOCKERROR'] = self.BLOCKERROR
self.RXOF = RM_Field_FRC_SEQIF_RXOF(self)
self.zz_fdict['RXOF'] = self.RXOF
self.WCNTCMP0 = RM_Field_FRC_SEQIF_WCNTCMP0(self)
self.zz_fdict['WCNTCMP0'] = self.WCNTCMP0
self.WCNTCMP1 = RM_Field_FRC_SEQIF_WCNTCMP1(self)
self.zz_fdict['WCNTCMP1'] = self.WCNTCMP1
self.WCNTCMP2 = RM_Field_FRC_SEQIF_WCNTCMP2(self)
self.zz_fdict['WCNTCMP2'] = self.WCNTCMP2
self.ADDRERROR = RM_Field_FRC_SEQIF_ADDRERROR(self)
self.zz_fdict['ADDRERROR'] = self.ADDRERROR
self.BUSERROR = RM_Field_FRC_SEQIF_BUSERROR(self)
self.zz_fdict['BUSERROR'] = self.BUSERROR
self.RXRAWEVENT = RM_Field_FRC_SEQIF_RXRAWEVENT(self)
self.zz_fdict['RXRAWEVENT'] = self.RXRAWEVENT
self.TXRAWEVENT = RM_Field_FRC_SEQIF_TXRAWEVENT(self)
self.zz_fdict['TXRAWEVENT'] = self.TXRAWEVENT
self.SNIFFOF = RM_Field_FRC_SEQIF_SNIFFOF(self)
self.zz_fdict['SNIFFOF'] = self.SNIFFOF
self.WCNTCMP3 = RM_Field_FRC_SEQIF_WCNTCMP3(self)
self.zz_fdict['WCNTCMP3'] = self.WCNTCMP3
self.WCNTCMP4 = RM_Field_FRC_SEQIF_WCNTCMP4(self)
self.zz_fdict['WCNTCMP4'] = self.WCNTCMP4
self.BOISET = RM_Field_FRC_SEQIF_BOISET(self)
self.zz_fdict['BOISET'] = self.BOISET
self.PKTBUFSTART = RM_Field_FRC_SEQIF_PKTBUFSTART(self)
self.zz_fdict['PKTBUFSTART'] = self.PKTBUFSTART
self.PKTBUFTHRESHOLD = RM_Field_FRC_SEQIF_PKTBUFTHRESHOLD(self)
self.zz_fdict['PKTBUFTHRESHOLD'] = self.PKTBUFTHRESHOLD
self.RXRAWOF = RM_Field_FRC_SEQIF_RXRAWOF(self)
self.zz_fdict['RXRAWOF'] = self.RXRAWOF
self.FRAMEDETPAUSED = RM_Field_FRC_SEQIF_FRAMEDETPAUSED(self)
self.zz_fdict['FRAMEDETPAUSED'] = self.FRAMEDETPAUSED
self.INTERLEAVEWRITEPAUSED = RM_Field_FRC_SEQIF_INTERLEAVEWRITEPAUSED(self)
self.zz_fdict['INTERLEAVEWRITEPAUSED'] = self.INTERLEAVEWRITEPAUSED
self.INTERLEAVEREADPAUSED = RM_Field_FRC_SEQIF_INTERLEAVEREADPAUSED(self)
self.zz_fdict['INTERLEAVEREADPAUSED'] = self.INTERLEAVEREADPAUSED
self.TXSUBFRAMEPAUSED = RM_Field_FRC_SEQIF_TXSUBFRAMEPAUSED(self)
self.zz_fdict['TXSUBFRAMEPAUSED'] = self.TXSUBFRAMEPAUSED
self.CONVPAUSED = RM_Field_FRC_SEQIF_CONVPAUSED(self)
self.zz_fdict['CONVPAUSED'] = self.CONVPAUSED
self.RXWORD = RM_Field_FRC_SEQIF_RXWORD(self)
self.zz_fdict['RXWORD'] = self.RXWORD
self.TXWORD = RM_Field_FRC_SEQIF_TXWORD(self)
self.zz_fdict['TXWORD'] = self.TXWORD
self.__dict__['zz_frozen'] = True
class RM_Register_FRC_SEQIEN(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_FRC_SEQIEN, self).__init__(rmio, label,
0xa8004000, 0x0B8,
'SEQIEN', 'FRC.SEQIEN', 'read-write',
u"",
0x00000000, 0x7F7FFFFF,
0x00001000, 0x00002000,
0x00003000)
self.TXDONE = RM_Field_FRC_SEQIEN_TXDONE(self)
self.zz_fdict['TXDONE'] = self.TXDONE
self.TXAFTERFRAMEDONE = RM_Field_FRC_SEQIEN_TXAFTERFRAMEDONE(self)
self.zz_fdict['TXAFTERFRAMEDONE'] = self.TXAFTERFRAMEDONE
self.TXABORTED = RM_Field_FRC_SEQIEN_TXABORTED(self)
self.zz_fdict['TXABORTED'] = self.TXABORTED
self.TXUF = RM_Field_FRC_SEQIEN_TXUF(self)
self.zz_fdict['TXUF'] = | |
#!/usr/local/bin/python
# <NAME> | 05/29/2018
#|__This script requires Python 3.4 and modules - numpy & scipy
#|__extracts the quality string and determine the length and average quality score of each read
#|__Converts the raw values for each read set into descriptive statistics
#|__Provides descriptive stats for Read Lengths and Read Qualities, number and percentage of reads below Q30 and Ambiguous base counts
#|__Outputs separate tables for different read length buckets (150bp,250bp and 300bp)
# Usage: ./read_length_quality_and_stats_fastq.py
import numpy as np
from scipy.stats import skew,mstats
import glob
import os
import re
# ------------------------------------------ DECLARATIONS AND INITIALIZATIONS ------------------------------------------------#
quality_scores_R1 = []
quality_scores_R2 = []
average_quality = 0
read1_length = []
read2_length = []
inserts = []
insert_sizes = []
countN1 = 0
countN2 = 0
Q1_lt_30 = 0
Q2_lt_30 = 0
R1 = []
R2 = []
Q1 = []
Q2 = []
file1 = []
file2 = []
files_149 = [] #Stores paired read files
files_249 = [] #Stores paired read files
files_299 = [] #Stores paired read files
# Following lists are to store all results for 149bp bucket
N_mean_149 = ["Mean:"]
SD_149 = ["Std_Deviation:"]
Variance_149 = ["Variance"]
median_149 = ["Median"]
Q1_149 = ["1st_Quartile:"]
Q3_149 = ["3rd_Quartile:"]
lwhisker_149 = ["Lower_whisker:"]
hwhisker_149 = ["Upper_Whisker:"]
Skew_149 = ["Skewness:"]
G_mean_149 = ["Geometric_Mean:"]
qual_N_mean_149 = ["Mean:"]
qual_SD_149 = ["Std_Deviation:"]
qual_Variance_149 = ["Variance:"]
qual_median_149 = ["Median:"]
qual_Q1_149 = ["1st_Quartile:"]
qual_Q3_149 = ["3rd_Quartile:"]
qual_lwhisker_149 = ["Lower_whisker:"]
qual_hwhisker_149 = ["Upper_Whisker:"]
qual_skew_149 = ["Skewness:"]
qual_G_mean_149 = ["Geometric_Mean:"]
# Following lists are to store all results for 249bp bucket
N_mean_249 = ["Mean:"]
SD_249 = ["Std_Deviation:"]
Variance_249 = ["Variance"]
median_249 = ["Median"]
Q1_249 = ["1st_Quartile:"]
Q3_249 = ["3rd_Quartile:"]
lwhisker_249 = ["Lower_whisker:"]
hwhisker_249 = ["Upper_Whisker:"]
Skew_249 = ["Skewness:"]
G_mean_249 = ["Geometric_Mean:"]
qual_N_mean_249 = ["Mean:"]
qual_SD_249 = ["Std_Deviation:"]
qual_Variance_249 = ["Variance:"]
qual_median_249 = ["Median:"]
qual_Q1_249 = ["1st_Quartile:"]
qual_Q3_249 = ["3rd_Quartile:"]
qual_lwhisker_249 = ["Lower_whisker:"]
qual_hwhisker_249 = ["Upper_Whisker:"]
qual_skew_249 = ["Skewness:"]
qual_G_mean_249 = ["Geometric_Mean:"]
# Following lists are to store all results for 299bp bucket
N_mean_299 = ["Mean:"]
SD_299 = ["Std_Deviation:"]
Variance_299 = ["Variance"]
median_299 = ["Median"]
Q1_299 = ["1st_Quartile:"]
Q3_299 = ["3rd_Quartile:"]
lwhisker_299 = ["Lower_whisker:"]
hwhisker_299 = ["Upper_Whisker:"]
Skew_299 = ["Skewness:"]
G_mean_299 = ["Geometric_Mean:"]
qual_N_mean_299 = ["Mean:"]
qual_SD_299 = ["Std_Deviation:"]
qual_Variance_299 = ["Variance:"]
qual_median_299 = ["Median:"]
qual_Q1_299 = ["1st_Quartile:"]
qual_Q3_299 = ["3rd_Quartile:"]
qual_lwhisker_299 = ["Lower_Whisker:"]
qual_hwhisker_299 = ["Upper_Whisker:"]
qual_skew_299 = ["Skewness:"]
qual_G_mean_299 = ["Geometric_Mean:"]
total_no_reads_149 = ["Read_count:"]
total_no_reads_249 = ["Read_count:"]
total_no_reads_299 = ["Read_count:"]
qual_lt_30_149 = ["Reads_<_Q30:"]
qual_lt_30_249 = ["Reads_<_Q30:"]
qual_lt_30_299 = ["Reads_<_Q30:"]
perc_qual_lt_30_149 = ["Percentage_reads_<_Q30"]
perc_qual_lt_30_249 = ["Percentage_reads_<_Q30"]
perc_qual_lt_30_299 = ["Percentage_reads_<_Q30"]
ambi_calls_149 = ["Ambiguous_base_calls:"]
ambi_calls_249 = ["Ambiguous_base_calls:"]
ambi_calls_299 = ["Ambiguous_base_calls:"]
R_lt_149 = ["Reads_<_149:"]
R_ge_149 = ["Reads_>=_149:"]
R_lt_249 = ["Reads_<_249:"]
R_ge_249 = ["Reads_>=_249:"]
R_lt_299 = ["Reads_<_299:"]
R_ge_299 = ["Reads_>=_299:"]
r_median = 0
i_median = 0
final_perc_R1_lt_149 = ["%_Reads_<_149:"]
final_perc_R1_ge_149 = ["%_Reads_>=_149:"]
final_perc_R1_lt_249 = ["%_Reads_<_249:"]
final_perc_R1_gt_249 = ["%_Reads_>=_249:"]
final_perc_R1_lt_299 = ["%_Reads_<_299:"]
final_perc_R1_gt_299 = ["%_Reads_>=_299:"]
final_avg_quality_lt_149 = ["Average_Quality_<_149:"]
final_avg_quality_ge_149 = ["Average_Quality_>=_149:"]
final_avg_length_lt_149 = ["Average_Length_<_149"]
final_avg_length_ge_149 = ["Average_Length_>=_149"]
final_avg_quality_lt_249 = ["Average_Quality_<_249:"]
final_avg_quality_ge_249 = ["Average_Quality_>=_249:"]
final_avg_length_lt_249 = ["Average_Length_<_249"]
final_avg_length_ge_249 = ["Average_Length_>=_249"]
final_avg_quality_lt_299 = ["Average_Quality_<_299:"]
final_avg_quality_ge_299 = ["Average_Quality_>=_299:"]
final_avg_length_lt_299 = ["Average_Length_<_299"]
final_avg_length_ge_299 = ["Average_Length_>=_299"]
# ------------------------------------------ FUNCTIONS ------------------------------------------------#
# To parse fastq file
def parseFastq(fastq_infile):
sequences = []
qualities = []
with open(fastq_infile,"r", encoding="utf8", errors='ignore') as f:
while True:
f.readline()
seq = f.readline().rstrip() # gets sequence line
f.readline()
qual = f.readline().rstrip() # gets quality line
if len(seq) == 0: # if seq length is 0; reached end of file so break out of the loop
break
sequences.append(seq) # append seq to sequences list
qualities.append(qual) # append qual to sequences list
return sequences,qualities
# To convert ASCII to quality scores
def phred33toQ(qual):
return ord(qual) - 33 # ord converts char to ASCII values and returns
# To calculate descriptive stats
def stats(in_array):
a = np.array(in_array)
mean = a.mean()
mean = round(mean) # rounding off
std_dev = a.std()
std_dev = round(std_dev) # rounding off
variance = np.var(a)
variance = round(variance) # rounding off
Q1 = np.percentile(a,25)
Q1 = round(Q1) # rounding off
median = np.percentile(a,50)
median = round(median) # rounding off
Q3 = np.percentile(a,75)
Q3 = round(Q3) # rounding off
skewness = skew(a)
skewness = round(skewness) # rounding off
geometric_mean = mstats.gmean(a)
geometric_mean = round(geometric_mean) # rounding off
high = []
low = []
IQR = Q3 - Q1
lower = Q1 - (1.5*IQR)
upper = Q3 - (1.5*IQR)
if(min(in_array) < lower):
low_whisker = min(in_array)
else:
low_whisker = min(in_array)
if(max(in_array) > upper):
high_whisker = max(in_array)
else:
high_whisker = max(in_array)
low_whisker = round(low_whisker) # rounding off
high_whisker = round(high_whisker) # rounding off
return mean,std_dev,variance,Q1,median,Q3,skewness,geometric_mean,low_whisker,high_whisker
# Ambiguous base counts
def countN(seq):
count = 0
for s in seq:
count += s.count("N")
return count
# quality thresholds
def Q30(qual_list):
count_lt_30 = 0
for x in qual_list:
if(x >= 0 and x < 30):
#print(x,"<","30") # Sanity check!
count_lt_30 += 1
else:
continue
return count_lt_30
# To get average quality scores for each read1
def qual_score(qual):
quality_scores = []
read_len = []
for Q in qual:
score = 0
read_len.append(len(Q))
for val in Q:
score += phred33toQ(val)
average_quality = (score/len(Q))
quality_scores.append(average_quality)
return read_len,quality_scores
def print_150bp():
print("\n\n-----Stats_for_149_bucket---------")
print('\t','\t'.join(files_149))
print("Read_Length_Stats:")
print(*lwhisker_149, sep='\t')
print(*Q1_149, sep='\t')
print(*median_149, sep='\t')
print(*N_mean_149, sep='\t')
print(*G_mean_149, sep='\t')
print(*Q3_149, sep='\t')
print(*hwhisker_149, sep='\t')
print(*SD_149, sep='\t')
print(*Variance_149, sep='\t')
print(*Skew_149, sep='\t')
print(*total_no_reads_149, sep='\t')
print(*R_lt_149, sep='\t')
print(*R_ge_149, sep='\t')
print(*final_perc_R1_lt_149, sep='\t')
print(*final_perc_R1_ge_149, sep='\t')
print(*final_avg_quality_lt_149, sep='\t')
print(*final_avg_quality_ge_149, sep='\t')
print(*final_avg_length_lt_149, sep='\t')
print(*final_avg_length_ge_149, sep='\t')
print("\nRead_Quality_Stats:")
print(*qual_lwhisker_149, sep='\t')
print(*qual_Q1_149, sep='\t')
print(*qual_median_149, sep='\t')
print(*qual_N_mean_149, sep='\t')
print(*qual_G_mean_149, sep='\t')
print(*qual_Q3_149, sep='\t')
print(*qual_hwhisker_149, sep='\t')
print(*qual_SD_149, sep='\t')
print(*qual_Variance_149, sep='\t')
print(*qual_skew_149, sep='\t')
print(*qual_lt_30_149, sep='\t')
print(*perc_qual_lt_30_149, sep='\t')
print(*ambi_calls_149, sep='\t')
def print_250bp():
print("\n\n-----Stats_for_249_bucket---------")
print('\t','\t'.join(files_249))
print("Read_Length_Stats:")
print(*lwhisker_249, sep='\t')
print(*Q1_249, sep='\t')
print(*median_249, sep='\t')
print(*N_mean_249, sep='\t')
print(*G_mean_249, sep='\t')
print(*Q3_249, sep='\t')
print(*hwhisker_249, sep='\t')
print(*SD_249, sep='\t')
print(*Variance_249, sep='\t')
print(*Skew_249, sep='\t')
print(*total_no_reads_249, sep='\t')
print(*R_lt_249, sep='\t')
print(*R_ge_249, sep='\t')
print(*final_perc_R1_lt_249, sep='\t')
print(*final_perc_R1_gt_249, sep='\t')
print(*final_avg_quality_lt_249, sep='\t')
print(*final_avg_quality_ge_249, sep='\t')
print(*final_avg_length_lt_249, sep='\t')
print(*final_avg_length_ge_249, sep='\t')
print("\nRead_Quality_Stats:")
print(*qual_lwhisker_249, sep='\t')
print(*qual_Q1_249, sep='\t')
print(*qual_median_249, sep='\t')
print(*qual_N_mean_249, sep='\t')
print(*qual_G_mean_249, sep='\t')
print(*qual_Q3_249, sep='\t')
print(*qual_hwhisker_249, sep='\t')
print(*qual_SD_249, sep='\t')
print(*qual_Variance_249, sep='\t')
print(*qual_skew_249, sep='\t')
print(*qual_lt_30_249, sep='\t')
print(*perc_qual_lt_30_249, sep='\t')
print(*ambi_calls_249, sep='\t')
def print_300bp():
print("\n\n-----Stats_for_299_bucket---------")
print('\t','\t'.join(files_299))
print("Read_Length_Stats:")
print(*lwhisker_299, sep='\t')
print(*Q1_299, sep='\t')
print(*median_299, sep='\t')
print(*N_mean_299, sep='\t')
print(*G_mean_299, sep='\t')
print(*Q3_299, sep='\t')
print(*hwhisker_299, sep='\t')
print(*SD_299, sep='\t')
print(*Variance_299, sep='\t')
print(*Skew_299, sep='\t')
print(*total_no_reads_299, sep='\t')
print(*R_lt_299, sep='\t')
print(*R_ge_299, sep='\t')
print(*final_perc_R1_lt_299, sep='\t')
print(*final_perc_R1_gt_299, sep='\t')
print(*final_avg_quality_lt_299, sep='\t')
print(*final_avg_quality_ge_299, sep='\t')
print(*final_avg_length_lt_299, sep='\t')
print(*final_avg_length_ge_299, sep='\t')
print("\nRead_Quality_Stats:")
print(*qual_lwhisker_299, sep='\t')
print(*qual_Q1_299, sep='\t')
print(*qual_median_299, sep='\t')
print(*qual_N_mean_299, sep='\t')
print(*qual_G_mean_299, sep='\t')
print(*qual_Q3_299, sep='\t')
print(*qual_hwhisker_299, sep='\t')
print(*qual_SD_299, sep='\t')
print(*qual_Variance_299, sep='\t')
print(*qual_skew_299, sep='\t')
print(*qual_lt_30_299, sep='\t')
print(*perc_qual_lt_30_299, sep='\t')
print(*ambi_calls_299, sep='\t')
# ---------------------------------------------------- MAIN ----------------------------------------------------------------- #
for x in os.listdir('.'):
if re.match('.*_R1.*.fastq$|.*_1.fastq$', x):
file1.append(x)
for x in os.listdir('.'):
if re.match('.*_R2.*.*fastq$|.*_2.fastq$', x):
file2.append(x)
# sorting lists for pairs to be in the same order
file1 = sorted(file1)
file2 = sorted(file2)
for f1,f2 in zip(file1,file2):
# command line arguments
fastq1 = f1
fastq2 = f2
# Parsing fastq: function call
seqs1,quals1 = parseFastq(fastq1) # takes in fastq file as an input from command line and passes it as an argument to parseFastq function. Returns sequences and qualities and stores in seqs & quals
seqs2,quals2 = parseFastq(fastq2)
# total number of reads
read_count1 = len(seqs1)
read_count2 = len(seqs2)
# average quality scores for each read: function call
read1_length,quality_scores_R1 = qual_score(quals1)
read2_length,quality_scores_R2 = qual_score(quals2)
# Descriptive stats for read1 length: function call (getting the median for both R1 and R2)
mean1,stdDev1,var1,Q1_1,r_median,Q3_1,skew1,gmean1,lwhisker1,hwhisker1 = stats(read1_length)
mean2,stdDev2,var2,Q1_2,i_median,Q3_2,skew2,gmean2,lwhisker2,hwhisker2 = stats(read2_length)
# Result lists
if(hwhisker1 >= 149 and hwhisker1 <= 152 and hwhisker2 >= 149 and hwhisker2 <= 152):
files_149.extend((f1,f2))
# command line arguments
fastq1 = f1
fastq2 = f2
# Parsing fastq: function call
seqs1,quals1 = parseFastq(fastq1) # takes in fastq file as an input from command line and passes it as an argument to parseFastq function. Returns sequences and qualities and stores in seqs & quals
seqs2,quals2 = parseFastq(fastq2)
# total number of reads
read_count1 = len(seqs1)
read_count2 = len(seqs2)
total_no_reads_149.extend((read_count1,read_count2)) # read count
# average quality scores for each read: function call
read1_length,quality_scores_R1 = qual_score(quals1)
read2_length,quality_scores_R2 = qual_score(quals2)
R1_lt_149 = 0
R1_ge_149 = 0
R2_lt_149 = 0
R2_ge_149 = 0
tot_len1_ge_149 = 0
tot_len1_lt_149 = 0
tot_len2_lt_149 = 0
tot_len2_ge_149 = 0
for x in read1_length:
if(x < 149):
R1_lt_149 += 1
tot_len1_lt_149 += x
elif(x >= 149):
R1_ge_149 += 1
tot_len1_ge_149 += x
for x in read2_length:
if(x < 149):
R2_lt_149 += 1
tot_len2_lt_149 += x
elif(x >= 149):
R2_ge_149 += 1
tot_len2_ge_149 += x
R_lt_149.extend((R1_lt_149,R2_lt_149))
R_ge_149.extend((R1_ge_149,R2_ge_149))
# quality threshold function call: function call
Q1_lt_30 = Q30(quality_scores_R1)
Q2_lt_30 = Q30(quality_scores_R2)
qual_lt_30_149.extend((Q1_lt_30,Q2_lt_30))
percent_reads_lt_30_R1 = Q1_lt_30/read_count1 * 100
percent_reads_lt_30_R2 = Q2_lt_30/read_count2 * 100
# rounding off
percent_reads_lt_30_R1 = round(percent_reads_lt_30_R1)
percent_reads_lt_30_R2 = round(percent_reads_lt_30_R2)
perc_qual_lt_30_149.extend((percent_reads_lt_30_R1,percent_reads_lt_30_R2))
# Ambiguous base function call: function call
countN1 = countN(seqs1)
countN2 = countN(seqs2)
ambi_calls_149.extend((countN1,countN2))
# Descriptive stats for read1 length: function call
r_mean,r_stdDev,r_var,r_Q1,r_median,r_Q3,r_skew,r_gmean,r_lwhisker,r_hwhisker = stats(read1_length)
i_mean,i_stdDev,i_var,i_Q1,i_median,i_Q3,i_skew,i_gmean,i_lwhisker,i_hwhisker = stats(read2_length)
N_mean_149.extend((r_mean,i_mean))
SD_149.extend((r_stdDev,i_stdDev))
Variance_149.extend((r_var,i_var))
median_149.extend((r_median,i_median))
Q1_149.extend((r_Q1,i_Q1))
Q3_149.extend((r_Q3,i_Q3))
lwhisker_149.extend((r_lwhisker,i_lwhisker))
hwhisker_149.extend((r_hwhisker,i_hwhisker))
Skew_149.extend((r_skew,i_skew))
G_mean_149.extend((r_gmean,i_gmean))
# Descriptive stats for Q1 quality: function call
q_mean,q_stdDev,q_var,q_Q1,q_median,q_Q3,q_skew,q_gmean,q_lwhisker,q_hwhisker = stats(quality_scores_R1)
s_mean,s_stdDev,s_var,s_Q1,s_median,s_Q3,s_skew,s_gmean,s_lwhisker,s_hwhisker = stats(quality_scores_R2)
qual_N_mean_149.extend((q_mean,s_mean))
qual_SD_149.extend((q_stdDev,s_stdDev))
qual_Variance_149.extend((q_var,s_var))
qual_median_149.extend((q_median,s_median))
qual_Q1_149.extend((q_Q1,s_Q1))
qual_Q3_149.extend((q_Q3,s_Q3))
qual_lwhisker_149.extend((q_lwhisker,s_lwhisker))
qual_hwhisker_149.extend((q_hwhisker,s_hwhisker))
qual_skew_149.extend((q_skew,s_skew))
qual_G_mean_149.extend((q_gmean,s_gmean))
# Calculating percent reads above and below 149
perc_R1_lt_149 = (R1_lt_149/read_count1) * 100
perc_R1_ge_149 = (R1_ge_149/read_count1) * 100
perc_R2_lt_149 = (R2_lt_149/read_count2) * 100
perc_R2_ge_149 = (R2_ge_149/read_count2) * 100
# rounding off
perc_R1_lt_149 = round(perc_R1_lt_149)
perc_R1_ge_149 = round(perc_R1_ge_149)
perc_R2_lt_149 = round(perc_R2_lt_149)
perc_R2_ge_149 = round(perc_R2_ge_149)
final_perc_R1_lt_149.extend((perc_R1_lt_149,perc_R2_lt_149))
final_perc_R1_ge_149.extend((perc_R1_ge_149,perc_R2_ge_149))
# Average Quality score calculation
avg_quality_1_le_149 = 0
avg_quality_1_gt_149 = 0
avg_quality_2_le_149 = 0
avg_quality_2_gt_149 = 0
avg_length_1_le_149 = 0
avg_length_1_gt_149 = 0
avg_length_2_le_149 = 0
avg_length_2_gt_149 = 0
tot_qual1_lt_149 = 0
tot_qual1_ge_149 = 0
tot_qual2_lt_149 = 0
tot_qual2_ge_149 = 0
for l,q in zip(read1_length,quality_scores_R1):
if(l < 149): # for lengths le 149
tot_qual1_lt_149 += q
elif(l >= 149):
tot_qual1_ge_149 += q
for l,q in zip(read2_length,quality_scores_R2):
if(l < 149): # for lengths le 149
tot_qual2_lt_149 += q
elif(l >= 149):
tot_qual2_ge_149 += q
if(R1_lt_149 == 0 and R2_lt_149 == 0):
avg_quality_1_le_149 = 0
avg_quality_2_le_149 = 0
avg_quality_1_gt_149 = tot_qual1_ge_149 / R1_ge_149
avg_quality_2_gt_149 = tot_qual2_ge_149 / R2_ge_149
elif(R1_ge_149 == 0 and R2_ge_149 == 0):
avg_quality_1_le_149 = tot_qual1_lt_149 / R1_lt_149
avg_quality_2_le_149 = tot_qual2_lt_149 / R2_lt_149
avg_quality_1_gt_149 = 0
avg_quality_2_gt_149 = 0
else:
avg_quality_1_le_149 = tot_qual1_lt_149 / R1_lt_149
avg_quality_2_le_149 = tot_qual2_lt_149 / R2_lt_149
avg_quality_1_gt_149 = tot_qual1_ge_149 / R1_ge_149
avg_quality_2_gt_149 = tot_qual2_ge_149 / R2_ge_149
# rounding off
avg_quality_1_le_149 = round(avg_quality_1_le_149)
avg_quality_1_gt_149 = round(avg_quality_1_gt_149)
avg_quality_2_le_149 = round(avg_quality_2_le_149)
avg_quality_2_gt_149 = round(avg_quality_2_gt_149)
final_avg_quality_lt_149.extend((avg_quality_1_le_149,avg_quality_2_le_149))
final_avg_quality_ge_149.extend((avg_quality_1_gt_149,avg_quality_2_gt_149))
# Calculating average length of reads above and below 149
if(R1_lt_149 == 0 and R2_lt_149 == 0):
avg_length_1_le_149 = 0
avg_length_1_gt_149 = tot_len1_ge_149/R1_ge_149
avg_length_2_le_149 = 0
avg_length_2_gt_149 = tot_len2_ge_149/R2_ge_149
elif(R1_ge_149 == 0 and R2_ge_149 == 0):
avg_length_1_le_149 = tot_len1_lt_149/R1_lt_149
avg_length_1_gt_149 = 0
avg_length_2_le_149 = tot_len2_lt_149/R2_lt_149
avg_length_2_gt_149 = 0
else:
avg_length_1_le_149 = tot_len1_lt_149/R1_lt_149
avg_length_1_gt_149 = tot_len1_ge_149/R1_ge_149
avg_length_2_le_149 = tot_len2_lt_149/R2_lt_149
avg_length_2_gt_149 = tot_len2_ge_149/R2_ge_149
# rounding off
avg_length_1_le_149 = round(avg_length_1_le_149)
avg_length_1_gt_149 = round(avg_length_1_gt_149)
avg_length_2_le_149 = round(avg_length_2_le_149)
avg_length_2_gt_149 = round(avg_length_2_gt_149)
final_avg_length_lt_149.extend((avg_length_1_le_149,avg_length_2_le_149))
final_avg_length_ge_149.extend((avg_length_1_gt_149,avg_length_2_gt_149))
elif(hwhisker1 >= 249 and hwhisker1 <= 252 and hwhisker2 >= 249 and hwhisker2 <= 252 ):
files_249.extend((f1,f2))
# command line arguments
fastq1 = f1
fastq2 = f2
# Parsing fastq: function call
seqs1,quals1 = parseFastq(fastq1) # takes in fastq file as an input from command line and passes it as an argument to parseFastq function. Returns sequences and qualities and stores in seqs & quals
seqs2,quals2 = parseFastq(fastq2)
# total number of reads
read_count1 = len(seqs1)
read_count2 = len(seqs2)
total_no_reads_249.extend((read_count1,read_count2))
# average quality scores for each read: function | |
"HS256",
# "kid": "018c0ae5-4d9b-471b-bfd6-eef314bc7037"
# },
# "signature": "xuLifqLGiblpv9zBpuZczWhNj1gARaLV3UxvxhJxZuk"
# }
# Figure 55: Flattened JWS JSON Serialization
# 4.8. Multiple Signatures
# This example illustrates multiple signatures applied to the same
# payload. Since this example contains more than one signature, only
# the JSON General Serialization is possible.
# Note that whitespace is added for readability as described in
# Section 1.1.
# 4.8.1. Input Factors
# The following are supplied before beginning the signing operation:
# o Payload content; this example uses the content from Figure 7,
# encoded using base64url [RFC4648] to produce Figure 8.
# o Signing keys; this example uses the following:
# * RSA private key from Figure 4 for the first signature
# * EC private key from Figure 2 for the second signature
# * AES symmetric key from Figure 5 for the third signature
# o Signing algorithms; this example uses the following:
# * "RS256" for the first signature
# * "ES512" for the second signature
# * "HS256" for the third signature
# Miller Informational [Page 32]
# RFC 7520 JOSE Cookbook May 2015
# 4.8.2. First Signing Operation
# The following are generated before completing the first signing
# operation:
# o JWS Protected Header; this example uses the header from Figure 56,
# encoded using base64url [RFC4648] to produce Figure 57.
# o JWS Unprotected Header; this example uses the header from
# Figure 58.
# {
# "alg": "RS256"
# }
# Figure 56: Signature #1 JWS Protected Header JSON
# eyJhbGciOiJSUzI1NiJ9
# Figure 57: Signature #1 JWS Protected Header, base64url-encoded
# {
# "kid": "<EMAIL>"
# }
# Figure 58: Signature #1 JWS Unprotected Header JSON
# The JWS Protected Header (Figure 57) and JWS Payload (Figure 8) are
# combined as described in [JWS] to produce the JWS Signing Input
# (Figure 59).
# eyJhbGciOiJSUzI1NiJ9
# .
# SXTigJlzIGEgZGFuZ2Vyb3VzIGJ1c2luZXNzLCBGcm9kbywgZ29pbmcgb3V0IH
# lvdXIgZG9vci4gWW91IHN0ZXAgb250byB0aGUgcm9hZCwgYW5kIGlmIHlvdSBk
# b24ndCBrZWVwIHlvdXIgZmVldCwgdGhlcmXigJlzIG5vIGtub3dpbmcgd2hlcm
# UgeW91IG1pZ2h0IGJlIHN3ZXB0IG9mZiB0by4
# Figure 59: JWS Signing Input
# Miller Informational [Page 33]
# RFC 7520 JOSE Cookbook May 2015
# Performing the signature operation over the JWS Signing Input
# (Figure 59) produces the JWS Signature (Figure 60).
# MIsjqtVlOpa71KE-Mss8_Nq2YH4FGhiocsqrgi5NvyG53uoimic1tcMdSg-qpt
# rzZc7CG6Svw2Y13TDIqHzTUrL_lR2ZFcryNFiHkSw129EghGpwkpxaTn_THJTC
# glNbADko1MZBCdwzJxwqZc-1RlpO2HibUYyXSwO97BSe0_evZKdjvvKSgsIqjy
# tKSeAMbhMBdMma622_BG5t4sdbuCHtFjp9iJmkio47AIwqkZV1aIZsv33uPUqB
# BCXbYoQJwt7mxPftHmNlGoOSMxR_3thmXTCm4US-xiNOyhbm8afKK64jU6_TPt
# QHiJeQJxz9G3Tx-083B745_AfYOnlC9w
# Figure 60: JWS Signature #1, base64url-encoded
# The following is the assembled first signature serialized as JSON:
# {
# "protected": "<KEY>",
# "header": {
# "kid": "<EMAIL>"
# },
# "signature": "MIsjqtVlOpa71KE-Mss8_Nq2YH4FGhiocsqrgi5NvyG53u
# oimic1tcMdSg-qptrzZc7CG6Svw2Y13TDIqHzTUrL_lR2ZFcryNFiHkS
# w129EghGpwkpxaTn_THJTCglNbADko1MZBCdwzJxwqZc-1RlpO2HibUY
# yXSwO97BSe0_evZKdjvvKSgsIqjytKSeAMbhMBdMma622_BG5t4sdbuC
# HtFjp9iJmkio47AIwqkZV1aIZsv33uPUqBBCXbYoQJwt7mxPftHmNlGo
# OSMxR_3thmXTCm4US-xiNOyhbm8afKK64jU6_TPtQHiJeQJxz9G3Tx-0
# 83B745_AfYOnlC9w"
# }
# Figure 61: Signature #1 JSON
# 4.8.3. Second Signing Operation
# The following is generated before completing the second signing
# operation:
# o JWS Unprotected Header; this example uses the header from
# Figure 62.
# {
# "alg": "ES512",
# "kid": "<EMAIL>"
# }
# Figure 62: Signature #2 JWS Unprotected Header JSON
# Miller Informational [Page 34]
# RFC 7520 JOSE Cookbook May 2015
# The empty string (as there is no JWS Protected Header) and JWS
# Payload (Figure 8) are combined as described in [JWS] to produce the
# JWS Signing Input (Figure 63).
# .
# SXTigJlzIGEgZGFuZ2Vyb3VzIGJ1c2luZXNzLCBGcm9kbywgZ29pbmcgb3V0IH
# lvdXIgZG9vci4gWW91IHN0ZXAgb250byB0aGUgcm9hZCwgYW5kIGlmIHlvdSBk
# b24ndCBrZWVwIHlvdXIgZmVldCwgdGhlcmXigJlzIG5vIGtub3dpbmcgd2hlcm
# UgeW91IG1pZ2h0IGJlIHN3ZXB0IG9mZiB0by4
# Figure 63: JWS Signing Input
# Performing the signature operation over the JWS Signing Input
# (Figure 63) produces the JWS Signature (Figure 64).
# ARcVLnaJJaUWG8fG-8t5BREVAuTY8n8YHjwDO1muhcdCoFZFFjfISu0Cdkn9Yb
# dlmi54ho0x924DUz8sK7ZXkhc7AFM8ObLfTvNCrqcI3Jkl2U5IX3utNhODH6v7
# xgy1Qahsn0fyb4zSAkje8bAWz4vIfj5pCMYxxm4fgV3q7ZYhm5eD
# Figure 64: JWS Signature #2, base64url-encoded
# The following is the assembled second signature serialized as JSON:
# {
# "header": {
# "alg": "ES512",
# "kid": "<EMAIL>"
# },
# "signature": "ARcVLnaJJaUWG8fG-8t5BREVAuTY8n8YHjwDO1muhcdCoF
# ZFFjfISu0Cdkn9Ybdlmi54ho0x924DUz8sK7ZXkhc7AFM8ObLfTvNCrq
# cI3Jkl2U5IX3utNhODH6v7xgy1Qahsn0fyb4zSAkje8bAWz4vIfj5pCM
# Yxxm4fgV3q7ZYhm5eD"
# }
# Figure 65: Signature #2 JSON
# Miller Informational [Page 35]
# RFC 7520 JOSE Cookbook May 2015
# 4.8.4. Third Signing Operation
# The following is generated before completing the third signing
# operation:
# o JWS Protected Header; this example uses the header from Figure 66,
# encoded using base64url [RFC4648] to produce Figure 67.
# {
# "alg": "HS256",
# "kid": "018c0ae5-4d9b-471b-bfd6-eef314bc7037"
# }
# Figure 66: Signature #3 JWS Protected Header JSON
# <KEY>
# VlZjMxNGJjNzAzNyJ9
# Figure 67: Signature #3 JWS Protected Header, base64url-encoded
# The JWS Protected Header (Figure 67) and JWS Payload (Figure 8) are
# combined as described in [JWS] to produce the JWS Signing Input
# (Figure 68).
# <KEY>
# VlZjMxNGJjNzAzNyJ9
# .
# S<KEY>
# lvdXIgZG9vci4gWW91IHN0ZXAgb250byB0aGUgcm9hZCwgYW5kIGlmIHlvdSBk
# b24ndCBrZWVwIHlvdXIgZmVldCwgdGhlcmXigJlzIG5vIGtub3dpbmcgd2hlcm
# U<KEY>
# Figure 68: JWS Signing Input
# Performing the signature operation over the JWS Signing Input
# (Figure 68) produces the JWS Signature (Figure 69).
# s0h6KThzkfBBBkLspW1h84VsJZFTsPPqMDA7g1Md7p0
# Figure 69: JWS Signature #3, base64url-encoded
# Miller Informational [Page 36]
# RFC 7520 JOSE Cookbook May 2015
# The following is the assembled third signature serialized as JSON:
# {
# "protected": "<KEY>
# <KEY>",
# "signature": "s0h6KThzkfBBBkLspW1h84VsJZFTsPPqMDA7g1Md7p0"
# }
# Figure 70: Signature #3 JSON
# 4.8.5. Output Results
# The following compose the resulting JWS object:
# o Payload content (Figure 8)
# o Signature #1 JSON (Figure 61)
# o Signature #2 JSON (Figure 65)
# o Signature #3 JSON (Figure 70)
# The JWS Compact Serialization is not presented because it does not
# support this use case; the flattened JWS JSON Serialization is not
# presented because there is more than one signature.
# Miller Informational [Page 37]
# RFC 7520 JOSE Cookbook May 2015
# The resulting JWS object using the general JWS JSON Serialization:
# {
# "payload": "SXTigJlzIGEgZGFuZ2Vyb3VzIGJ1c2luZXNzLCBGcm9kbywg
# Z29pbmcgb3V0IHlvdXIgZG9vci4gWW91IHN0ZXAgb250byB0aGUgcm9h
# ZCwgYW5kIGlmIHlvdSBkb24ndCBrZWVwIHlvdXIgZmVldCwgdGhlcmXi
# gJlzIG5vIGtub3dpbmcgd2hlcmUgeW91IG1pZ2h0IGJlIHN3ZXB0IG9m
# ZiB0by4",
# "signatures": [
# {
# "protected": "eyJhbGciOiJSUzI1NiJ9",
# "header": {
# "kid": "<EMAIL>"
# },
# "signature": "MIsjqtVlOpa71KE-Mss8_Nq2YH4FGhiocsqrgi5Nvy
# G53uoimic1tcMdSg-qptrzZc7CG6Svw2Y13TDIqHzTUrL_lR2ZFc
# ryNFiHkSw129EghGpwkpxaTn_THJTCglNbADko1MZBCdwzJxwqZc
# -1RlpO2HibUYyXSwO97BSe0_evZKdjvvKSgsIqjytKSeAMbhMBdM
# ma622_BG5t4sdbuCHtFjp9iJmkio47AIwqkZV1aIZsv33uPUqBBC
# XbYoQJwt7mxPftHmNlGoOSMxR_3thmXTCm4US-xiNOyhbm8afKK6
# 4jU6_TPtQHiJeQJxz9G3Tx-083B745_AfYOnlC9w"
# },
# {
# "header": {
# "alg": "ES512",
# "kid": "<EMAIL>"
# },
# "signature": "ARcVLnaJJaUWG8fG-8t5BREVAuTY8n8YHjwDO1muhc
# dCoFZFFjfISu0Cdkn9Ybdlmi54ho0x924DUz8sK7ZXkhc7AFM8Ob
# LfTvNCrqcI3Jkl2U5IX3utNhODH6v7xgy1Qahsn0fyb4zSAkje8b
# AWz4vIfj5pCMYxxm4fgV3q7ZYhm5eD"
# },
# {
# "protected": "<KEY>
# RkOWItNDcxYi1iZmQ2LWVlZjMxNGJjNzAzNyJ9",
# "signature": "s0h6KThzkfBBBkLspW1h84VsJZFTsPPqMDA7g1Md7p
# 0"
# }
# ]
# }
# Figure 71: General JWS JSON Serialization
# Miller Informational [Page 38]
# RFC 7520 JOSE Cookbook May 2015
# 5. JSON Web Encryption Examples
# The following sections demonstrate how to generate various JWE
# objects.
# All of the encryption examples (unless otherwise noted) use the
# following Plaintext content (an abridged quote from "The Fellowship
# of the Ring" [LOTR-FELLOWSHIP]), serialized as UTF-8. The Plaintext
# is presented here as a series of quoted strings that are concatenated
# to produce the JWE Plaintext. The sequence "\xe2\x80\x93" is
# substituted for (U+2013 EN DASH), and quotation marks (U+0022
# QUOTATION MARK) are added for readability but are not present in the
# JWE Plaintext.
# "You can trust us to stick with you through thick and "
# "thin\xe2\x80\x93to the bitter end. And you can trust us to "
# "keep any secret of yours\xe2\x80\x93closer than you keep it "
# "yourself. But you cannot trust us to let you face trouble "
# "alone, and go off without a word. We are your friends, Frodo."
# Figure 72: Plaintext Content
# 5.1. Key Encryption Using RSA v1.5 and AES-HMAC-SHA2
# This example illustrates encrypting content using the "RSA1_5"
# (RSAES-PKCS1-v1_5) key encryption algorithm and the "A128CBC-HS256"
# (AES-128-CBC-HMAC-SHA-256) content encryption algorithm.
# Note that RSAES-PKCS1-v1_5 uses random data to generate the
# ciphertext; it might not be possible to exactly replicate the results
# in this section.
# Note that only the RSA public key is necessary to perform the
# encryption. However, the example includes the RSA private key to
# allow readers to validate the output.
# Note that whitespace is added for readability as described in
# Section 1.1.
# 5.1.1. Input Factors
# The following are supplied before beginning the encryption process:
# o Plaintext content; this example uses the content from Figure 72.
# o RSA public key; this example uses the key from Figure 73.
# Miller Informational [Page 39]
# RFC 7520 JOSE Cookbook May 2015
# o "alg" parameter of "RSA1_5".
# o "enc" parameter of "A128CBC-HS256".
# {
# "kty": "RSA",
# "kid": "<EMAIL>",
# "use": "enc",
# "n": "maxhbsmBtdQ3CNrKvprUE6n9lYcregDMLYNeTAWcLj8NnPU9XIYegT
# HVHQjxKDSHP2l-F5jS7sppG1wgdAqZyhnWvXhYNvcM7RfgKxqNx_xAHx
# 6f3yy7s-M9PSNCwPC2lh6UAkR4I00EhV9lrypM9Pi4lBUop9t5fS9W5U
# NwaAllhrd-osQGPjIeI1deHTwx-ZTHu3C60Pu_LJIl6hKn9wbwaUmA4c
# R5Bd2pgbaY7ASgsjCUbtYJaNIHSoHXprUdJZKUMAzV0WOKPfA6OPI4oy
# pBadjvMZ4ZAj3BnXaSYsEZhaueTXvZB4eZOAjIyh2e_VOIKVMsnDrJYA
# VotGlvMQ",
# "e": "AQAB",
# "d": "<KEY>
# bQ1PLAHmpIbNTztfrheoAniRV1NCIqXaW_qS461xiDTp4ntEPnqcKsyO
# 5jMAji7-CL8vhpYYowNFvIesgMoVaPRYMYT9TW63hNM0aWs7USZ_hLg6
# O<KEY>
# 1GFULimrRdndm-P8q8kvN3KHlNAtEgrQAgTTgz80S-3VD0FgWfgnb1PN
# miuPUxO8OpI9KDIfu_acc6fg14nsNaJqXe6RESvhGPH2afjHqSy_Fd2v
# pzj85bQQ",
# "p": "<KEY>
# <KEY>
# 7HHTklLpYAzOOx1ZgVljoxAdWNn3hiEFrjZLZGS7lOH-a3QQlDDQoJOJ
# 2VFmU",
# "q": "te8LY4-W7IyaqH1ExujjMqkTAlTeRbv0VLQnfLY2xINnrWdwiQ93_V
# F099aP1ESeLja2nw-6iKIe-qT7mtCPozKfVtUYfz5HrJ_XY2kfexJINb
# 9lhZHMv5p1skZpeIS-GPHCC6gRlKo1q-idn_qxyusfWv7WAxlSVfQfk8
# d6Et0",
# "dp": "UfYKcL_or492vVc0PzwLSplbg4L3-Z5wL48mwiswbpzOyIgd2xHTH
# QmjJpFAIZ8q-zf9RmgJXkDrFs9rkdxPtAsL1WYdeCT5c125Fkdg317JV
# RDo1inX7x2Kdh8ERCreW8_4zXItuTl_KiXZNU5lvMQjWbIw2eTx1lpsf
# lo0rYU",
# "dq": "iEgcO-QfpepdH8FWd7mUFyrXdnOkXJBCogChY6YKuIHGc_p8Le9Mb
# pFKESzEaLlN1Ehf3B6oGBl5Iz_ayUlZj2IoQZ82znoUrpa9fVYNot87A
# CfzIG7q9Mv7RiPAderZi03tkVXAdaBau_9vs5rS-7HMtxkVrxSUvJY14
# TkXlHE",
# "qi": "<KEY>
# lXYx6RtE1n_AagjqajlkjieGlxTTThHD8Iga6foGBMaAr5uR1hGQpSc7
# Gl7CF1DZkBJMTQN6EshYzZfxW08mIO8M6Rzuh0beL6fG9mkDcIyPrBXx
# 2bQ_mM"
# }
# Figure 73: RSA 2048-Bit Key, in JWK Format
# Miller Informational [Page 40]
# RFC 7520 JOSE Cookbook May 2015
# (NOTE: While the key includes the private parameters, only the public
# parameters "e" and "n" are necessary for the encryption operation.)
# 5.1.2. Generated Factors
# The following are generated before encrypting:
# o AES symmetric key as the Content Encryption Key (CEK); this
# example uses the key from Figure 74.
# o Initialization Vector; this example uses the Initialization Vector
# from Figure 75.
# 3qyTVhIWt5juqZUCpfRqpvauwB956MEJL2Rt-8qXKSo
# Figure 74: Content Encryption Key, base64url-encoded
# bbd5sTkYwhAIqfHsx8DayA
# Figure 75: Initialization Vector, base64url-encoded
# 5.1.3. Encrypting the Key
# Performing the key encryption operation over the CEK (Figure 74) with
# the RSA key (Figure 73) results in the following Encrypted Key:
# <KEY>
# vG2K-pvSlWc9BRIazDrn50RcRai__3TDON395H3c62tIouJJ4XaRvYHFjZTZ2G
# Xfz8YAImcc91Tfk0WXC2F5Xbb71ClQ1DDH151tlpH77f2ff7xiSxh9oSewYrcG
# TSLUeeCt36r1Kt3OSj7EyBQXoZlN7IxbyhMAfgIe7Mv1rOTOI5I8NQqeXXW8Vl
# zNmoxaGMny3YnGir5Wf6Qt2nBq4qDaPdnaAuuGUGEecelIO1wx1BpyIfgvfjOh
# MBs9M8XL223Fg47xlGsMXdfuY-4jaqVw
# Figure 76: Encrypted Key, base64url-encoded
# Miller Informational [Page 41]
# RFC 7520 JOSE | |
"""
Conversion functions between corresponding data structures.
"""
import json
import logging
from collections import Hashable, OrderedDict # pylint: disable=E0611,no-name-in-module # moved to .abc in Python 3
from copy import deepcopy
from tempfile import TemporaryDirectory
from typing import TYPE_CHECKING
from urllib.parse import urlparse
from owslib.wps import (
ComplexData,
Input as OWS_Input_Type,
Metadata as OWS_Metadata,
Output as OWS_Output_Type,
is_reference
)
from pywps import Process as ProcessWPS
from pywps.app.Common import Metadata as WPS_Metadata
from pywps.inout import BoundingBoxInput, BoundingBoxOutput, ComplexInput, ComplexOutput, LiteralInput, LiteralOutput
from pywps.inout.basic import BasicIO
from pywps.inout.formats import Format
from pywps.inout.literaltypes import ALLOWEDVALUETYPE, RANGECLOSURETYPE, AllowedValue, AnyValue
from pywps.validator.mode import MODE
from weaver import xml_util
from weaver.exceptions import PackageTypeError
from weaver.execute import (
EXECUTE_MODE_ASYNC,
EXECUTE_RESPONSE_DOCUMENT,
EXECUTE_TRANSMISSION_MODE_REFERENCE,
EXECUTE_TRANSMISSION_MODE_VALUE
)
from weaver.formats import (
CONTENT_TYPE_ANY,
CONTENT_TYPE_APP_JSON,
CONTENT_TYPE_TEXT_PLAIN,
get_cwl_file_format,
get_extension,
get_format
)
from weaver.processes.constants import (
CWL_REQUIREMENT_APP_WPS1,
PACKAGE_ARRAY_BASE,
PACKAGE_ARRAY_ITEMS,
PACKAGE_ARRAY_MAX_SIZE,
PACKAGE_ARRAY_TYPES,
PACKAGE_CUSTOM_TYPES,
PACKAGE_ENUM_BASE,
PACKAGE_LITERAL_TYPES,
PROCESS_SCHEMA_OGC,
PROCESS_SCHEMA_OLD,
WPS_BOUNDINGBOX,
WPS_COMPLEX,
WPS_COMPLEX_DATA,
WPS_INPUT,
WPS_LITERAL,
WPS_LITERAL_DATA_TYPE_NAMES,
WPS_OUTPUT,
WPS_REFERENCE
)
from weaver.utils import (
bytes2str,
fetch_file,
get_any_id,
get_sane_name,
get_url_without_query,
null,
str2bytes,
transform_json
)
from weaver.wps.utils import get_wps_client
if TYPE_CHECKING:
from typing import Any, Dict, List, Optional, Tuple, Type, Union
from urllib.parse import ParseResult
from pywps.app import WPSRequest
from owslib.wps import Process as ProcessOWS
from requests.models import Response
from weaver.typedefs import (
AnySettingsContainer,
AnyValueType,
CWL,
CWL_IO_EnumSymbols,
CWL_IO_Value,
CWL_Input_Type,
CWL_Output_Type,
JSON
)
# typing shortcuts
# pylint: disable=C0103,invalid-name
WPS_Input_Type = Union[LiteralInput, ComplexInput, BoundingBoxInput]
WPS_Output_Type = Union[LiteralOutput, ComplexOutput, BoundingBoxOutput]
WPS_IO_Type = Union[WPS_Input_Type, WPS_Output_Type]
OWS_IO_Type = Union[OWS_Input_Type, OWS_Output_Type]
JSON_IO_Type = JSON
JSON_IO_ListOrMap = Union[List[JSON], Dict[str, Union[JSON, str]]]
CWL_IO_Type = Union[CWL_Input_Type, CWL_Output_Type]
PKG_IO_Type = Union[JSON_IO_Type, WPS_IO_Type]
ANY_IO_Type = Union[CWL_IO_Type, JSON_IO_Type, WPS_IO_Type, OWS_IO_Type]
ANY_Format_Type = Union[Dict[str, Optional[str]], Format]
ANY_Metadata_Type = Union[OWS_Metadata, WPS_Metadata, Dict[str, str]]
# WPS object attribute -> all possible *other* naming variations (no need to repeat key name)
WPS_FIELD_MAPPING = {
"identifier": ["id", "ID", "Id", "Identifier"],
"title": ["Title", "Label", "label"],
"abstract": ["description", "Description", "Abstract"],
"version": ["processVersion", "Version"],
"metadata": ["Metadata"],
"keywords": ["Keywords"],
"allowed_values": ["AllowedValues", "allowedValues", "allowedvalues", "Allowed_Values", "Allowedvalues"],
"allowed_collections": ["AllowedCollections", "allowedCollections", "allowedcollections", "Allowed_Collections",
"Allowedcollections"],
"any_value": ["anyvalue", "anyValue", "AnyValue"],
"literal_data_domains": ["literalDataDomains"],
"default": ["default_value", "defaultValue", "DefaultValue", "Default", "data_format", "data"],
"supported_values": ["SupportedValues", "supportedValues", "supportedvalues", "Supported_Values"],
"supported_formats": ["SupportedFormats", "supportedFormats", "supportedformats", "Supported_Formats", "formats"],
"additional_parameters": ["AdditionalParameters", "additionalParameters", "additionalparameters",
"Additional_Parameters"],
"type": ["Type", "data_type", "dataType", "DataType", "Data_Type"],
"min_occurs": ["minOccurs", "MinOccurs", "Min_Occurs", "minoccurs"],
"max_occurs": ["maxOccurs", "MaxOccurs", "Max_Occurs", "maxoccurs"],
"max_megabytes": ["maximumMegabytes", "max_size"],
"mime_type": ["mimeType", "MimeType", "mime-type", "Mime-Type", "mimetype",
"mediaType", "MediaType", "media-type", "Media-Type", "mediatype"],
"range_minimum": ["minval", "minimum", "minimumValue"],
"range_maximum": ["maxval", "maximum", "maximumValue"],
"range_spacing": ["spacing"],
"range_closure": ["closure", "rangeClosure"],
"encoding": ["Encoding"],
"href": ["url", "link", "reference"],
}
# WPS fields that contain a structure corresponding to `Format` object
# - keys must match `WPS_FIELD_MAPPING` keys
# - fields are placed in order of relevance (prefer explicit format, then supported, and defaults as last resort)
WPS_FIELD_FORMAT = ["formats", "supported_formats", "supported_values", "default"]
# WPS 'type' string variations employed to indicate a Complex (file) I/O by different libraries
# for literal types, see 'any2cwl_literal_datatype' and 'any2wps_literal_datatype' functions
WPS_COMPLEX_TYPES = [WPS_COMPLEX, WPS_COMPLEX_DATA, WPS_REFERENCE]
# WPS 'type' string of all combinations (type of data / library implementation)
WPS_ALL_TYPES = [WPS_LITERAL, WPS_BOUNDINGBOX] + WPS_COMPLEX_TYPES
# default format if missing (minimal requirement of one)
DEFAULT_FORMAT = Format(mime_type=CONTENT_TYPE_TEXT_PLAIN)
DEFAULT_FORMAT_MISSING = "__DEFAULT_FORMAT_MISSING__"
setattr(DEFAULT_FORMAT, DEFAULT_FORMAT_MISSING, True)
INPUT_VALUE_TYPE_MAPPING = {
"bool": bool,
"boolean": bool,
"file": str,
"File": str,
"float": float,
"int": int,
"integer": int,
"str": str,
"string": str,
}
LOGGER = logging.getLogger(__name__)
def complex2json(data):
# type: (Union[ComplexData, Any]) -> Union[JSON, Any]
"""
Obtains the JSON representation of a :class:`ComplexData` or simply return the unmatched type.
"""
if not isinstance(data, ComplexData):
return data
# backward compat based on OWSLib version, field did not always exist
max_mb = getattr(data, "maximumMegabytes", None)
if isinstance(max_mb, str) and max_mb.isnumeric():
max_mb = int(max_mb)
return {
"mimeType": data.mimeType,
"encoding": data.encoding,
"schema": data.schema,
"maximumMegabytes": max_mb,
"default": False, # always assume it is a supported format/value, caller should override
}
def metadata2json(meta, force=False):
# type: (Union[ANY_Metadata_Type, Any], bool) -> Union[JSON, Any]
"""
Retrieve metadata information and generate its JSON representation.
Obtains the JSON representation of a :class:`OWS_Metadata` or :class:`pywps.app.Common.Metadata`.
Otherwise, simply return the unmatched type.
If requested, can enforce parsing a dictionary for the corresponding keys.
"""
if not force and not isinstance(meta, (OWS_Metadata, WPS_Metadata)):
return meta
title = get_field(meta, "title", search_variations=True, default=None)
href = get_field(meta, "href", search_variations=True, default=None)
role = get_field(meta, "role", search_variations=True, default=None)
rel = get_field(meta, "rel", search_variations=True, default=None)
# many remote servers do not provide the 'rel', but instead provide 'title' or 'role'
# build one by default to avoid failing schemas that expect 'rel' to exist
if not rel:
href_rel = urlparse(href).hostname
rel = str(title or role or href_rel).lower() # fallback to first available
rel = get_sane_name(rel, replace_character="-", assert_invalid=False)
return {"href": href, "title": title, "role": role, "rel": rel}
def ows2json_field(ows_field):
# type: (Union[ComplexData, OWS_Metadata, AnyValueType]) -> Union[JSON, AnyValueType]
"""
Obtains the JSON or raw value from an :mod:`owslib.wps` I/O field.
"""
if isinstance(ows_field, ComplexData):
return complex2json(ows_field)
if isinstance(ows_field, OWS_Metadata):
return metadata2json(ows_field)
return ows_field
def ows2json_io(ows_io):
# type: (OWS_IO_Type) -> JSON_IO_Type
"""
Converts I/O definition from :mod:`owslib.wps` to JSON.
"""
json_io = dict()
for field in WPS_FIELD_MAPPING:
value = get_field(ows_io, field, search_variations=True)
# preserve numeric values (ex: "minOccurs"=0) as actual parameters
# ignore undefined values represented by `null`, empty list, or empty string
if value or value in [0, 0.0]:
if isinstance(value, list):
# complex data is converted as is
# metadata converted and preserved if it results into a minimally valid definition (otherwise dropped)
json_io[field] = [
complex2json(v) if isinstance(v, ComplexData) else
metadata2json(v) if isinstance(v, OWS_Metadata) else v
for v in value if not isinstance(v, OWS_Metadata) or v.url is not None
]
elif isinstance(value, ComplexData):
json_io[field] = complex2json(value)
elif isinstance(value, OWS_Metadata):
json_io[field] = metadata2json(value)
else:
json_io[field] = value
json_io["id"] = get_field(json_io, "identifier", search_variations=True, pop_found=True)
io_type = json_io.get("type")
# add 'format' if missing, derived from other variants
if io_type == WPS_COMPLEX_DATA:
fmt_default = False
if "default" in json_io and isinstance(json_io["default"], dict):
json_io["default"]["default"] = True # provide for workflow extension (internal), schema drops it (API)
fmt_default = True
# retrieve alternate format definitions
if "formats" not in json_io:
# correct complex data 'formats' from OWSLib from initial fields loop can get stored in 'supported_values'
fmt_val = get_field(json_io, "supported_values", pop_found=True)
if fmt_val:
json_io["formats"] = fmt_val
else:
# search for format fields directly specified in I/O body
for field in WPS_FIELD_FORMAT:
fmt = get_field(json_io, field, search_variations=True)
if not fmt:
continue
if isinstance(fmt, dict):
fmt = [fmt]
fmt = filter(lambda f: isinstance(f, dict), fmt)
if not isinstance(json_io.get("formats"), list):
json_io["formats"] = []
for var_fmt in fmt:
# add it only if not exclusively provided by a previous variant
json_fmt_items = [j_fmt.items() for j_fmt in json_io["formats"]]
if any(all(var_item in items for var_item in var_fmt.items()) for items in json_fmt_items):
continue
json_io["formats"].append(var_fmt)
json_io.setdefault("formats", [])
# apply the default flag
for fmt in json_io["formats"]:
fmt["default"] = fmt_default and is_equal_formats(json_io["default"], fmt)
if fmt["default"]:
break
# NOTE:
# Don't apply 'minOccurs=0' as in below literal case because default 'format' does not imply that unspecified
# input is valid, but rather that given an input without explicit 'format' specified, that 'default' is used.
return json_io
# add value constraints in specifications
elif io_type in WPS_LITERAL_DATA_TYPE_NAMES:
domains = any2json_literal_data_domains(ows_io)
if domains:
json_io["literalDataDomains"] = domains
# fix inconsistencies of some process descriptions
# WPS are allowed to report 'minOccurs=1' although 'defaultValue' can also be provided
# (see https://github.com/geopython/pywps/issues/625)
if "defaultValue" in domains[0]:
json_io["min_occurs"] = 0
return json_io
# FIXME: add option to control auto-fetch, disable during workflow by default to avoid double downloads?
# (https://github.com/crim-ca/weaver/issues/183)
def ows2json_output_data(output, process_description, container=None):
# type: (OWS_Output_Type, ProcessOWS, Optional[AnySettingsContainer]) -> JSON
"""
Utility method to convert an :mod:`owslib.wps` process execution output data (result) to `JSON`.
In the case that a ``reference`` output of `JSON` content-type is specified and that it refers to a file that
contains an array list of URL references to simulate a multiple-output, this specific output gets expanded to
contain both the original URL ``reference`` field and the loaded URL list under ``data`` field for easier access
from the response body.
Referenced file(s) are fetched in order to store them locally if executed on a remote process, such that they can
become accessible as local job result for following reporting or use by other processes in a workflow chain.
If the ``dataType`` details is missing from the data | |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Postprocessing for anchor-based detection."""
from typing import List, Tuple
from absl import logging
import tensorflow as tf
import utils
from keras import anchors
T = tf.Tensor # a shortcut for typing check.
CLASS_OFFSET = 1
def to_list(inputs):
if isinstance(inputs, dict):
return [inputs[k] for k in sorted(inputs.keys())]
if isinstance(inputs, list):
return inputs
raise ValueError('Unrecognized inputs : {}'.format(inputs))
def batch_map_fn(map_fn, inputs, *args, **kwargs):
"""Apply map_fn at batch dimension."""
if isinstance(inputs[0], (list, tuple)):
batch_size = len(inputs[0])
else:
batch_size = inputs[0].shape.as_list()[0]
if not batch_size:
# use tf.map_fn to handle dynamic batch_size.
return tf.map_fn(map_fn, inputs, *args, **kwargs)
outputs = []
for i in range(batch_size):
outputs.append(map_fn([x[i] for x in inputs]))
return [tf.stack(y) for y in zip(*outputs)]
def clip_boxes(boxes: T, image_size: int) -> T:
"""Clip boxes to fit the image size."""
image_size = utils.parse_image_size(image_size) * 2
return tf.clip_by_value(boxes, [0], image_size)
def merge_class_box_level_outputs(params, cls_outputs: List[T],
box_outputs: List[T]) -> Tuple[T, T]:
"""Concatenates class and box of all levels into one tensor."""
cls_outputs_all, box_outputs_all = [], []
batch_size = tf.shape(cls_outputs[0])[0]
for level in range(0, params['max_level'] - params['min_level'] + 1):
if params['data_format'] == 'channels_first':
cls_outputs[level] = tf.transpose(cls_outputs[level], [0, 2, 3, 1])
box_outputs[level] = tf.transpose(box_outputs[level], [0, 2, 3, 1])
cls_outputs_all.append(
tf.reshape(cls_outputs[level], [batch_size, -1, params['num_classes']]))
box_outputs_all.append(tf.reshape(box_outputs[level], [batch_size, -1, 4]))
return tf.concat(cls_outputs_all, 1), tf.concat(box_outputs_all, 1)
def topk_class_boxes(params, cls_outputs: T,
box_outputs: T) -> Tuple[T, T, T, T]:
"""Pick the topk class and box outputs."""
batch_size = tf.shape(cls_outputs)[0]
num_classes = params['num_classes']
max_nms_inputs = params['nms_configs'].get('max_nms_inputs', 0)
if max_nms_inputs > 0:
# Prune anchors and detections to only keep max_nms_inputs.
# Due to some issues, top_k is currently slow in graph model.
logging.info('use max_nms_inputs for pre-nms topk.')
cls_outputs_reshape = tf.reshape(cls_outputs, [batch_size, -1])
_, cls_topk_indices = tf.math.top_k(
cls_outputs_reshape, k=max_nms_inputs, sorted=False)
indices = cls_topk_indices // num_classes
classes = cls_topk_indices % num_classes
cls_indices = tf.stack([indices, classes], axis=2)
cls_outputs_topk = tf.gather_nd(cls_outputs, cls_indices, batch_dims=1)
box_outputs_topk = tf.gather_nd(
box_outputs, tf.expand_dims(indices, 2), batch_dims=1)
else:
logging.info('use max_reduce for pre-nms topk.')
# Keep all anchors, but for each anchor, just keep the max probablity for
# each class.
cls_outputs_idx = tf.math.argmax(cls_outputs, axis=-1, output_type=tf.int32)
num_anchors = tf.shape(cls_outputs)[1]
classes = cls_outputs_idx
indices = tf.tile(
tf.expand_dims(tf.range(num_anchors), axis=0), [batch_size, 1])
cls_outputs_topk = tf.reduce_max(cls_outputs, -1)
box_outputs_topk = box_outputs
return cls_outputs_topk, box_outputs_topk, classes, indices
def pre_nms(params, cls_outputs, box_outputs, topk=True):
"""Detection post processing before nms.
It takes the multi-level class and box predictions from network, merge them
into unified tensors, and compute boxes, scores, and classes.
Args:
params: a dict of parameters.
cls_outputs: a list of tensors for classes, each tensor denotes a level of
logits with shape [N, H, W, num_class * num_anchors].
box_outputs: a list of tensors for boxes, each tensor ddenotes a level of
boxes with shape [N, H, W, 4 * num_anchors].
topk: if True, select topk before nms (mainly to speed up nms).
Returns:
A tuple of (boxes, scores, classes).
"""
# get boxes by apply bounding box regression to anchors.
eval_anchors = anchors.Anchors(params['min_level'], params['max_level'],
params['num_scales'], params['aspect_ratios'],
params['anchor_scale'], params['image_size'])
cls_outputs, box_outputs = merge_class_box_level_outputs(
params, cls_outputs, box_outputs)
if topk:
# select topK purely based on scores before NMS, in order to speed up nms.
cls_outputs, box_outputs, classes, indices = topk_class_boxes(
params, cls_outputs, box_outputs)
anchor_boxes = tf.gather(eval_anchors.boxes, indices)
else:
anchor_boxes = eval_anchors.boxes
classes = None
boxes = anchors.decode_box_outputs(box_outputs, anchor_boxes)
# convert logits to scores.
scores = tf.math.sigmoid(cls_outputs)
return boxes, scores, classes
def nms(params, boxes: T, scores: T, classes: T,
padded: bool) -> Tuple[T, T, T, T]:
"""Non-maximum suppression.
Args:
params: a dict of parameters.
boxes: a tensor with shape [N, 4], where N is the number of boxes. Box
format is [y_min, x_min, y_max, x_max].
scores: a tensor with shape [N].
classes: a tensor with shape [N].
padded: a bool vallue indicating whether the results are padded.
Returns:
A tuple (boxes, scores, classes, valid_lens), where valid_lens is a scalar
denoting the valid length of boxes/scores/classes outputs.
"""
nms_configs = params['nms_configs']
method = nms_configs['method']
max_output_size = nms_configs['max_output_size']
if method == 'hard' or not method:
# hard nms.
sigma = 0.0
iou_thresh = nms_configs['iou_thresh'] or 0.5
score_thresh = nms_configs['score_thresh'] or float('-inf')
elif method == 'gaussian':
sigma = nms_configs['sigma'] or 0.5
iou_thresh = nms_configs['iou_thresh'] or 0.3
score_thresh = nms_configs['score_thresh'] or 0.001
else:
raise ValueError('Inference has invalid nms method {}'.format(method))
# TF API's sigma is twice as the paper's value, so here we divide it by 2:
# https://github.com/tensorflow/tensorflow/issues/40253.
nms_top_idx, nms_scores, nms_valid_lens = tf.raw_ops.NonMaxSuppressionV5(
boxes=boxes,
scores=scores,
max_output_size=max_output_size,
iou_threshold=iou_thresh,
score_threshold=score_thresh,
soft_nms_sigma=(sigma / 2),
pad_to_max_output_size=padded)
nms_boxes = tf.gather(boxes, nms_top_idx)
nms_classes = tf.cast(
tf.gather(classes, nms_top_idx) + CLASS_OFFSET, tf.float32)
return nms_boxes, nms_scores, nms_classes, nms_valid_lens
def postprocess_combined(params, cls_outputs, box_outputs, image_scales=None):
"""Post processing with combined NMS.
Leverage the tf combined NMS. It is fast on TensorRT, but slow on CPU/GPU.
Args:
params: a dict of parameters.
cls_outputs: a list of tensors for classes, each tensor denotes a level of
logits with shape [N, H, W, num_class * num_anchors].
box_outputs: a list of tensors for boxes, each tensor ddenotes a level of
boxes with shape [N, H, W, 4 * num_anchors]. Each box format is [y_min,
x_min, y_max, x_man].
image_scales: scaling factor or the final image and bounding boxes.
Returns:
A tuple of batch level (boxes, scores, classess, valid_len) after nms.
"""
cls_outputs = to_list(cls_outputs)
box_outputs = to_list(box_outputs)
# Don't filter any outputs because combine_nms need the raw information.
boxes, scores, _ = pre_nms(params, cls_outputs, box_outputs, topk=False)
max_output_size = params['nms_configs']['max_output_size']
score_thresh = params['nms_configs']['score_thresh'] or float('-inf')
nms_boxes, nms_scores, nms_classes, nms_valid_len = (
tf.image.combined_non_max_suppression(
tf.expand_dims(boxes, axis=2),
scores,
max_output_size,
max_output_size,
score_threshold=score_thresh,
clip_boxes=False))
nms_classes += CLASS_OFFSET
nms_boxes = clip_boxes(nms_boxes, params['image_size'])
if image_scales is not None:
scales = tf.expand_dims(tf.expand_dims(image_scales, -1), -1)
nms_boxes = nms_boxes * tf.cast(scales, nms_boxes.dtype)
return nms_boxes, nms_scores, nms_classes, nms_valid_len
def postprocess_global(params, cls_outputs, box_outputs, image_scales=None):
"""Post processing with global NMS.
A fast but less accurate version of NMS. The idea is to treat the scores for
different classes in a unified way, and perform NMS globally for all classes.
Args:
params: a dict of parameters.
cls_outputs: a list of tensors for classes, each tensor denotes a level of
logits with shape [N, H, W, num_class * num_anchors].
box_outputs: a list of tensors for boxes, each tensor ddenotes a level of
boxes with shape [N, H, W, 4 * num_anchors]. Each box format is [y_min,
x_min, y_max, x_man].
image_scales: scaling factor or the final image and bounding boxes.
Returns:
A tuple of batch level (boxes, scores, classess, valid_len) after nms.
"""
cls_outputs = to_list(cls_outputs)
box_outputs = to_list(box_outputs)
boxes, scores, classes = pre_nms(params, cls_outputs, box_outputs)
def single_batch_fn(element):
return nms(params, element[0], element[1], element[2], True)
dtype = scores.dtype
nms_boxes, nms_scores, nms_classes, nms_valid_len = batch_map_fn(
single_batch_fn,
[boxes, scores, classes],
dtype=(dtype, dtype, dtype, tf.int32))
nms_boxes = clip_boxes(nms_boxes, params['image_size'])
if image_scales is not None:
scales = tf.expand_dims(tf.expand_dims(image_scales, -1), -1)
nms_boxes = nms_boxes * tf.cast(scales, nms_boxes.dtype)
return nms_boxes, nms_scores, nms_classes, nms_valid_len
def per_class_nms(params, boxes, scores, classes, image_scales=None):
"""Per-class nms, a utility for postprocess_per_class.
Args:
params: a dict of parameters.
boxes: A tensor with shape [N, K, 4], where N is batch_size, K is num_boxes.
Box format is [y_min, x_min, y_max, x_max].
scores: A tensor with shape [N, K].
classes: A tensor with shape [N, K].
image_scales: scaling factor or the final image and bounding boxes.
Returns:
A tuple of batch level (boxes, scores, classess, valid_len) after nms.
"""
def single_batch_fn(element):
"""A mapping function for a single batch."""
boxes_i, scores_i, classes_i = element[0], element[1], element[2]
nms_boxes_cls, nms_scores_cls, nms_classes_cls = [], [], []
nms_valid_len_cls = []
for cid in range(params['num_classes']):
indices = tf.where(tf.equal(classes_i, cid))
if indices.shape[0] == 0:
continue
classes_cls = tf.gather_nd(classes_i, indices)
boxes_cls = tf.gather_nd(boxes_i, | |
) from None
else:
try:
a.pop(b)
except TypeError:
raise TypeError(
"{} object cannot be interpreted as an integer".format(
type(b).__name__
)
) from None
return a
def removeByIndex_(self, b):
""" Removes the value at specified index or indices in-place. """
if isinstance(b, Iterable):
try:
c = 0
for i in b:
self.pop(i - c)
c += 1
except TypeError:
raise TypeError(
"{} object cannot be interpreted as an integer".format(
type(i).__name__
)
) from None
else:
try:
self.pop(b)
except TypeError:
raise TypeError(
"{} object cannot be interpreted as an integer".format(
type(b).__name__
)
) from None
return self
def get(self, key, default=None):
"""
Safe method for getting elements in the Array.
Returns default if the index does not exist.
"""
try:
return self[key]
except IndexError:
return default
def map(self, l):
"""
Returns an Array by applying provided function to all elements of this Array.
"""
return Array(map(l, self))
def starmap(self, l):
"""
Returns an Array by applying provided function
to this Array with itertools.starmap
"""
return Array(itertools.starmap(l, self))
def parmap(self, fun, processes=None):
"""
Returns an Array by applying a function to
all elements of this Array in parallel.
"""
with multiprocessing.Pool(processes=processes) as pool:
return Array(pool.map(fun, self))
def parstarmap(self, fun, processes=None):
""" Parallel starmap """
with multiprocessing.Pool(processes=processes) as pool:
return Array(pool.starmap(fun, self))
def asyncmap(self, fun, max_workers=None):
"""
Executes map asynchronously.
Returns a Future object.
"""
executor = ThreadPoolExecutor(max_workers=max_workers)
try:
return executor.submit(self.map, fun)
finally:
executor.shutdown(wait=False)
def asyncstarmap(self, fun, max_workers=None):
"""
Executes starmap asynchronously.
Returns a Future object.
"""
executor = ThreadPoolExecutor(max_workers=max_workers)
try:
return executor.submit(self.starmap, fun)
finally:
executor.shutdown(wait=False)
def filter(self, l):
""" Selects elements of this Array which satisfy the predicate. """
return Array(filter(l, self))
def forall(self, l):
"""
Returns whether the specified predicate
holds for all elements of this Array.
"""
return all(map(l, self))
def forany(self, l):
"""
Returns whether the specified predicate
holds for any element of this Array.
"""
return any(map(l, self))
def reduce(self, l, init=None):
""" Reduces the elements of this Array using the specified operator. """
if init is not None:
return reduce(l, self, init)
return reduce(l, self)
def contains(self, e):
""" Tests wheter element exists in this Array. """
return e in self
def indexWhere(self, l):
""" Finds the index of the first element satisfying a predicate. """
for i, v in enumerate(self):
if l(v):
return i
raise ValueError("No matches")
def indicesWhere(self, l):
""" Finds all the indices of the elements satisfying a predicate. """
return Array(i for i, v in enumerate(self) if l(v))
def indices(self, e):
""" Returns all the indices of provided value in this Array. """
r = []
o = -1
while True:
try:
o = self.index(e, o + 1)
except ValueError:
return Array(r)
r.append(o)
def split(self, c):
"""
Splits an Array into subarrays using provided argument as the delimiter element.
"""
try:
i = self.index(c)
v = self[:i].unsqueeze if i != 0 else Array()
for e in self[i + 1 :].split(c):
if len(e) != 0:
v.append(e)
return v
except ValueError:
return self.unsqueeze
def splitTo(self, n):
""" Splits this Array to n equal length subarrays """
if self.size % n != 0:
raise ValueError("Split does not result in an equal division")
d = self.size // n
return Array(self[d * i : d * (i + 1)] for i in range(n))
def splitAt(self, n):
""" Splits this Array into subarrays at specified index or indices. """
if isinstance(n, int):
return Array(self[:n], self[n:])
n = Array(0, *n, self.size)
return Array(self[n[i] : n[i + 1]] for i in range(n.size - 1))
def takeWhile(self, l):
""" Takes the longest prefix of elements that satisfy the given predicate. """
return Array(itertools.takewhile(l, self))
def dropWhile(self, l):
""" Drops the longest prefix of elements that satisfy the given predicate. """
return Array(itertools.dropwhile(l, self))
def groupBy(self, l):
"""
Groups this Array into an Array of Array-tuples according
to given discriminator function.
"""
m = {}
for v in self:
k = l(v)
if k in m:
m[k].append(v)
else:
m[k] = Array([v])
return Array(m.items())
def maxBy(self, l):
""" Finds the maximum value measured by a function. """
return max(self, key=l)
def minBy(self, l):
""" Finds the minimum value measured by a function. """
return min(self, key=l)
def sortBy(self, l, reverse=False):
"""
Sorts this Array according to a function
defining the sorting criteria.
"""
return Array(sorted(self, key=l, reverse=reverse))
def sortBy_(self, l, reverse=False):
"""
Sorts this Array in place according to a function
defining the sorting criteria.
"""
super().sort(key=l, reverse=reverse)
return self
def argsortBy(self, l, reverse=False):
"""
Returns the indices that would sort this Array according to
provided sorting criteria.
"""
return self.enumerate.sortBy(lambda e: l(e[1]), reverse=reverse)[:, 0]
def sort(self, **kwargs):
""" Sorts this Array. """
return Array(sorted(self, **kwargs))
def sort_(self, **kwargs):
""" Sorts this Array in place. """
super().sort(**kwargs)
return self
def argsort(self, reverse=False):
""" Returns the indices that would sort this Array """
return self.enumerate.sortBy(lambda e: e[1], reverse=reverse)[:, 0]
def reverse(self):
""" Reverses this Array. """
return Array(reversed(self))
def reverse_(self):
""" Reverses this Array in-place. """
super().reverse()
return self
def copy(self):
return Array(super().copy())
def asType(self, t):
"""
Converts the elements in this Array to given type.
"""
return Array(map(t, self))
def join(self, delimiter=" "):
"""
Creates a string representation of this Array
with elements separated with `delimiter`
"""
return delimiter.join(str(v) for v in self)
def append(self, e):
"""
Appends an element to the end of this Array.
"""
super().append(self.__convert(e))
return self
def prepend(self, e):
"""
Prepends an element to the beginning of this Array.
"""
super().insert(0, self.__convert(e))
return self
def extend(self, e):
""" Extend this Array by appending elements from the iterable. """
super().extend(Array(e))
return self
def extendLeft(self, e):
""" Extends this Array by prepending elements from the iterable. """
self[0:0] = Array(e)
return self
def insert(self, i, e):
""" Inserts element(s) (in place) before given index/indices. """
if isinstance(e, Array.__baseIterables) and isinstance(
i, Array.__baseIterables
):
if len(e) != len(i):
raise ValueError(
"The lengths of the sequences must match, got {} and {}".format(
len(i), len(e)
)
)
for ii, ei in zip(i, e):
self.insert(ii, ei)
else:
super().insert(i, self.__convert(e))
return self
def fill(self, e):
""" Replaces all elements of this Array with given object. """
return Array([e] * self.size)
def fill_(self, e):
""" Replaces (in place) all elements of this Array with given object. """
self[:] = e
return self
def pad(self, n, value=0):
""" Pads this Array with value. """
try:
return self + [value] * n
except TypeError:
raise TypeError(
"{} object cannot be interpreted as an integer".format(type(n).__name__)
) from None
def padLeft(self, n, value=0):
""" Pads this Array with value. """
try:
return [value] * n + self
except TypeError:
raise TypeError(
"{} object cannot be interpreted as an integer".format(type(n).__name__)
) from None
def padTo(self, n, value=0):
"""
Pads this Array with value until length of n is reached.
"""
try:
return self + [value] * (n - self.size)
except TypeError:
raise TypeError(
"{} object cannot be interpreted as an integer".format(type(n).__name__)
) from None
def padLeftTo(self, n, value=0):
""" Pads this Array with value until length of n is reached. """
try:
return [value] * (n - self.size) + self
except TypeError:
raise TypeError(
"{} object cannot be interpreted as an integer".format(type(n).__name__)
) from None
def zip(self, *args):
return Array(zip(self, *args))
def unzip(self):
"""
'Unzips' nested Arrays by unpacking its elements into a zip.
>>> Array((1, "a"), (2, "b")).unzip()
Array(Array(1, 2), Array('a', 'b'))
"""
if not all(map(lambda e: isinstance(e, Iterable), self)):
raise TypeError("Array elements must support iteration")
return Array(zip(*self))
def zipAll(self, *args, default=None):
"""
Zips the sequences. If the iterables are
of uneven length, missing values are filled with default value.
"""
return Array(itertools.zip_longest(self, *args, fillvalue=default))
@property
def all(self):
""" Returns true if bool(e) is True for all elements in this Array. """
return all(self)
@property
def any(self):
""" Returns true if bool(e) is True for any element e in this Array. """
return | |
from __future__ import annotations
import atexit
import copy
import dataclasses
import enum
import json
import logging
import os
import sys
import time
import tkinter
from pathlib import Path
from tkinter import messagebox, ttk
from typing import Any, Callable, Iterator, List, Type, TypeVar, overload
import dacite
from pygments import styles, token
import porcupine
from porcupine import dirs, images, utils
_log = logging.getLogger(__name__)
class LineEnding(enum.Enum):
r"""
This :mod:`enum` has these members representing different ways to write
newline characters to files:
.. data:: CR
``\r``, aka "Mac line endings".
.. data:: LF
``\n``, aka "Linux/Unix line endings".
.. data:: CRLF
``\r\n``, aka "Windows line endings".
Python's :func:`open` function translates all of these to the string
``'\n'`` when reading files and uses a platform-specific default when
writing files.
There are 3 ways to represent line endings in Porcupine, and
different things want the line ending represented in different ways:
* The strings ``'\r'``, ``'\n'`` and ``'\r\n'``. For example,
:func:`open` line endings are specified like this.
* The strings ``'CR'``, ``'LF'`` and ``'CRLF'``. Line endings are
typically defined this way in configuration files, such as
`editorconfig <https://editorconfig.org/>`_ files.
* This enum. I recommend using this to avoid typos.
For example, ``LineEnding[some_string_from_user]`` (see below)
raises an error if the string is invalid.
Convert between this enum and the different kinds of strings like this:
* Enum to backslashy string: ``LineEnding.CRLF.value == '\r\n'``
* Enum to human readable string: ``LineEnding.CRLF.name == 'CRLF'``
* Backslashy string to enum: ``LineEnding('\r\n') == LineEnding.CRLF``
* Human readable string to enum: ``LineEnding['CRLF'] == LineEnding.CRLF``
Use ``LineEnding(os.linesep)`` to get the platform-specific default.
"""
CR = "\r"
LF = "\n"
CRLF = "\r\n"
def _type_check(type_: object, obj: object) -> object:
# dacite tricks needed for validating e.g. objects of type Optional[Path]
@dataclasses.dataclass
class ValueContainer:
__annotations__ = {"value": type_}
parsed = dacite.from_dict(ValueContainer, {"value": obj})
return parsed.value # type: ignore
class _Option:
def __init__(
self, name: str, default: object, type_: Any, converter: Callable[[Any], Any]
) -> None:
default = _type_check(type_, default)
self.name = name
self.value = default
self.default = default
self.type = type_
self.converter = converter
@dataclasses.dataclass
class _UnknownOption:
value: Any
call_converter: bool
def _default_converter(value: Any) -> Any:
return value
# includes the parent
def _get_children_recursively(parent: tkinter.Misc) -> Iterator[tkinter.Misc]:
yield parent
for child in parent.winfo_children():
yield from _get_children_recursively(child)
class Settings:
def __init__(self, change_event_widget: tkinter.Misc | None, change_event_format: str):
# '<<Foo:{}>>'
assert "{}" in change_event_format
assert change_event_format.startswith("<<")
assert change_event_format.endswith(">>")
self._options: dict[str, _Option] = {}
self._unknown_options: dict[str, _UnknownOption] = {}
self._change_event_widget = change_event_widget # None to notify all widgets
self._change_event_format = change_event_format
def add_option(
self,
option_name: str,
default: Any,
type_: Any | None = None,
*,
converter: Callable[[Any], Any] = _default_converter,
exist_ok: bool = False,
) -> None:
"""Add a custom option.
The type of *default* determines how :func:`set_` and :func:`get` behave.
For example, if *default* is a string, then
calling :func:`set_` with a value that isn't a string or
calling :func:`get` with the type set to something else than ``str``
is an error. You can also provide a custom type with the *type*
argument, e.g. ``add_option('foo', None, Optional[pathlib.Path])``.
If you are adding a global option (see :class:`Settings` for non-global
options), use only JSON-safe types. Let me know if this limitation is
too annoying.
If you are **not** adding a global option, you
can also specify a *converter* that takes the value in the
configuration file as an argument and returns an instance of *type*.
For example, ``pygments_lexer`` is set to a string like
"pygments.lexers.Foo" in the config file, even though it appears as a
class in the settings object. That's implemented similarly to this::
def import_lexer_class(name: str) -> something:
...
filetab.settings.add_option(
'pygments_lexer',
pygments.lexers.TextLexer,
...
converter=import_lexer_class)
By default, the converter returns its argument unchanged.
Do not use a lambda function as the converter,
because the settings must be picklable.
If an option with the same name exists already, an error is raised by
default, but if ``exist_ok=True`` is given, then adding the same
option again is allowed. When this happens, an error is raised if
*default*, *type* or *converter* doesn't match what was passed in when
the option was added for the first time.
"""
if type_ is None:
type_ = type(default)
assert type_ is not None
if option_name in self._options:
if not exist_ok:
raise RuntimeError(f"there's already an option named {option_name!r}")
old_option = self._options[option_name]
assert default == old_option.default
assert type_ == old_option.type
assert converter == old_option.converter
return
option = _Option(option_name, default, type_, converter)
self._options[option_name] = option
try:
unknown = self._unknown_options.pop(option_name)
except KeyError:
pass # nothing relevant in config file, use default
else:
# Error handling here because it's not possible to fail early when
# an option goes to _unknown_options, and bad data in a config file
# shouldn't cause add_option() and the rest of a plugin's setup()
# to fail.
try:
if unknown.call_converter:
self.set(option_name, converter(unknown.value))
else:
self.set(option_name, unknown.value)
except Exception:
# can be an error from converter
_log.exception(f"setting {option_name!r} to {unknown.value!r} failed")
def set(
self,
option_name: str,
value: object,
*,
from_config: bool = False,
call_converter: bool | None = None,
) -> None:
"""Set the value of an opiton.
Set ``from_config=True`` if the value comes from a configuration
file (see :func:`add_option`). That does two things:
* The converter given to :func:`add_option` will be used.
* If the option hasn't been added with :func:`add_option` yet, then
the value won't be set immediatelly, but instead it gets set
later when the option is added.
You can specify ``call_converter`` to force the converter to be or
to not be called.
This function is not named ``set`` to avoid conflicting with the
built-in :class:`set` class.
"""
# ...even though this method isn't named 'set_'. But the docstring is
# used in settings.rst to document a global "function".
if call_converter is None:
call_converter = from_config
if option_name not in self._options and from_config:
self._unknown_options[option_name] = _UnknownOption(value, call_converter)
return
option = self._options[option_name]
if call_converter:
value = option.converter(value)
value = _type_check(option.type, value)
# don't create change events when nothing changes (helps avoid infinite recursion)
if option.value == value:
return
option.value = value
event_name = self._change_event_format.format(option_name)
_log.debug(f"{option_name} was set to {value!r}, generating {event_name} events")
if self._change_event_widget is None:
try:
main_window = porcupine.get_main_window()
except RuntimeError as e:
# on porcupine startup, plugin disable list needs to be set before main window exists
if option_name != "disabled_plugins":
raise e
else:
for widget in _get_children_recursively(main_window):
widget.event_generate(event_name)
else:
self._change_event_widget.event_generate(event_name)
# I don't like how this requires overloads for every type
# https://stackoverflow.com/q/61471700
# fmt: off
@overload
def get(self, option_name: str, type_: Type[Path]) -> Path: ...
@overload
def get(self, option_name: str, type_: Type[LineEnding]) -> LineEnding: ...
@overload
def get(self, option_name: str, type_: Type[str]) -> str: ...
@overload
def get(self, option_name: str, type_: Type[bool]) -> bool: ...
@overload
def get(self, option_name: str, type_: Type[int]) -> int: ...
@overload
def get(self, option_name: str, type_: object) -> Any: ...
# fmt: on
def get(self, option_name: str, type_: Any) -> Any:
"""
Return the current value of an option.
*type_* should be e.g. ``str`` or ``int`` depending on what type the option is.
You can also specify ``object`` to allow any type.
This method works correctly for :class:`str` and :class:`int`,
but sometimes it returns Any because mypy sucks::
foo = settings.get('something', str)
reveal_type(foo) # str
from pathlib import Path
shitty_bar = settings.get('something', Optional[Path])
reveal_type(shitty_bar) # Any
Use a type annotation to work around this (and make sure to write the
same type two times)::
good_bar: Path | None = settings.get('something', Optional[Path])
reveal_type(good_bar) # Optional[Path]
Before Python 3.10, you can't use the new ``|`` syntax as an argument to ``settings.get()``,
even though it otherwise works with ``from __future__ import annotations``.
The same goes for built-in generics,
such as ``list[str]`` with lower-case ``list``.
Options of mutable types are returned as copies, so things like
``settings.get('something', List[str])`` always return a new list.
If you want to change a setting like that, you need to first get a copy
of the current value, then modify the copy, and finally :func:`set_` it
back. This is an easy | |
(T_max, )
# compute probs
probs = torch.exp(gaussians.log_prob(frames_idx)) # (B, L_max, T_max)
# apply mask to set probs out of sequence length to 0
probs = probs.masked_fill(mask.unsqueeze(2), 0) # (B, L_max, T_max)
# compute weights
weights = probs / (torch.sum(probs, dim=1, keepdim=True) + 1e-20) # (B, L_max, T_max)
# compute upsampled embedding
x_upsamp = torch.sum(x.unsqueeze(-1) * weights.unsqueeze(2), dim=1) # (B, input_dim, T_max)
x_upsamp = x_upsamp.permute(0, 2, 1) # (B, T_max, input_dim)
return x_upsamp, weights
class FrameDecoder(nn.Module):
''' Frame Decoder Module:
- Positional Encoding
- 4x FFT Blocks with FiLM conditioning
- Linear projection
'''
def __init__(self, hparams):
super(FrameDecoder, self).__init__()
nb_mels = hparams.n_mel_channels
embed_dim = hparams.phoneme_encoder['hidden_embed_dim']
hparams.frame_decoder['hidden_embed_dim'] = embed_dim
Tuple = namedtuple('Tuple', hparams.frame_decoder)
hparams = Tuple(**hparams.frame_decoder)
# positional encoding
self.pos_enc = PositionalEncoding(embed_dim)
# FFT blocks
blocks = []
for _ in range(hparams.nb_blocks):
blocks.append(FFTBlock(hparams))
self.blocks = nn.ModuleList(blocks)
# linear projection for mel-spec prediction
self.projection = LinearNorm(embed_dim, nb_mels, w_init_gain='linear')
def forward(self, x, film_params, output_lengths):
''' Forward function of Decoder Embedding:
x = (B, T_max, hidden_embed_dim)
film_params = (B, nb_blocks, nb_film_params)
output_lengths = (B, )
'''
# compute positional encoding
pos = self.pos_enc(output_lengths.unsqueeze(1)) # (B, T_max, hidden_embed_dim)
# create mask
mask = ~get_mask_from_lengths(output_lengths) # (B, T_max)
# add and mask
x = x + pos # (B, T_max, hidden_embed_dim)
x = x.masked_fill(mask.unsqueeze(2), 0) # (B, T_max, hidden_embed_dim)
# pass through FFT blocks
for idx, block in enumerate(self.blocks):
x = block(x, film_params[:, idx, :], mask) # (B, T_max, hidden_embed_dim)
# predict mel-spec frames and mask tensor
mel_specs = self.projection(x) # (B, T_max, nb_mels)
mel_specs = mel_specs.masked_fill(mask.unsqueeze(2), 0) # (B, T_max, nb_mels)
mel_specs = mel_specs.transpose(1, 2) # (B, nb_mels, T_max)
return mel_specs
class DaftExprt(nn.Module):
''' DaftExprt model from <NAME>, <NAME>, <NAME>, <NAME>
"DaftExprt: Robust Prosody Transfer Across Speakers for Expressive Speech Synthesis"
arXiv:2108.02271, 2021.
'''
def __init__(self, hparams):
super(DaftExprt, self).__init__()
self.prosody_encoder = ProsodyEncoder(hparams)
self.speaker_classifier = SpeakerClassifier(hparams)
self.phoneme_encoder = PhonemeEncoder(hparams)
self.prosody_predictor = LocalProsodyPredictor(hparams)
self.gaussian_upsampling = GaussianUpsamplingModule(hparams)
self.frame_decoder = FrameDecoder(hparams)
def parse_batch(self, gpu, batch):
''' Parse input batch
'''
# extract tensors
symbols, durations_float, durations_int, symbols_energy, symbols_pitch, input_lengths, \
frames_energy, frames_pitch, mel_specs, output_lengths, speaker_ids, feature_dirs, feature_files = batch
# transfer tensors to specified GPU
symbols = symbols.cuda(gpu, non_blocking=True).long() # (B, L_max)
durations_float = durations_float.cuda(gpu, non_blocking=True).float() # (B, L_max)
durations_int = durations_int.cuda(gpu, non_blocking=True).long() # (B, L_max)
symbols_energy = symbols_energy.cuda(gpu, non_blocking=True).float() # (B, L_max)
symbols_pitch = symbols_pitch.cuda(gpu, non_blocking=True).float() # (B, L_max)
input_lengths = input_lengths.cuda(gpu, non_blocking=True).long() # (B, )
frames_energy = frames_energy.cuda(gpu, non_blocking=True).float() # (B, T_max)
frames_pitch = frames_pitch.cuda(gpu, non_blocking=True).float() # (B, T_max)
mel_specs = mel_specs.cuda(gpu, non_blocking=True).float() # (B, n_mel_channels, T_max)
output_lengths = output_lengths.cuda(gpu, non_blocking=True).long() # (B, )
speaker_ids = speaker_ids.cuda(gpu, non_blocking=True).long() # (B, )
# create inputs and targets
inputs = (symbols, durations_float, durations_int, symbols_energy, symbols_pitch, input_lengths,
frames_energy, frames_pitch, mel_specs, output_lengths, speaker_ids)
targets = (durations_float, symbols_energy, symbols_pitch, mel_specs, speaker_ids)
file_ids = (feature_dirs, feature_files)
return inputs, targets, file_ids
def forward(self, inputs):
''' Forward function of DaftExprt
'''
# extract inputs
symbols, durations_float, durations_int, symbols_energy, symbols_pitch, input_lengths, \
frames_energy, frames_pitch, mel_specs, output_lengths, speaker_ids = inputs
input_lengths, output_lengths = input_lengths.detach(), output_lengths.detach()
# extract FiLM parameters from reference and speaker ID
# (B, nb_blocks, nb_film_params)
prosody_embed, encoder_film, prosody_pred_film, decoder_film = self.prosody_encoder(frames_energy, frames_pitch, mel_specs, speaker_ids, output_lengths)
# pass through speaker classifier
spk_preds = self.speaker_classifier(prosody_embed) # (B, nb_speakers)
# embed phoneme symbols, add positional encoding and encode input sequence
enc_outputs = self.phoneme_encoder(symbols, encoder_film, input_lengths) # (B, L_max, hidden_embed_dim)
# predict prosody parameters
duration_preds, energy_preds, pitch_preds = self.prosody_predictor(enc_outputs, prosody_pred_film, input_lengths) # (B, L_max)
# perform Gaussian upsampling on symbols sequence
# use prosody ground-truth values for training
# symbols_upsamp = (B, T_max, hidden_embed_dim)
# weights = (B, L_max, T_max)
symbols_upsamp, weights = self.gaussian_upsampling(enc_outputs, durations_float, durations_int, symbols_energy, symbols_pitch, input_lengths)
# decode output sequence and predict mel-specs
mel_spec_preds = self.frame_decoder(symbols_upsamp, decoder_film, output_lengths) # (B, nb_mels, T_max)
# parse outputs
speaker_preds = spk_preds
film_params = [self.prosody_encoder.post_multipliers, encoder_film, prosody_pred_film, decoder_film]
encoder_preds = [duration_preds, energy_preds, pitch_preds, input_lengths]
decoder_preds = [mel_spec_preds, output_lengths]
alignments = weights
return speaker_preds, film_params, encoder_preds, decoder_preds, alignments
def get_int_durations(self, duration_preds, hparams):
''' Convert float durations to integer frame durations
'''
# min float duration to have at least one mel-spec frame attributed to the symbol
fft_length = hparams.filter_length / hparams.sampling_rate
dur_min = fft_length / 2
# set duration under min duration to 0.
duration_preds[duration_preds < dur_min] = 0. # (B, L_max)
# convert to int durations for each element in the batch
durations_int = torch.LongTensor(duration_preds.size(0), duration_preds.size(1)).zero_() # (B, L_max)
for line_idx in range(duration_preds.size(0)):
end_prev, symbols_idx, durations_float = 0., [], []
for symbol_id in range(duration_preds.size(1)):
symb_dur = duration_preds[line_idx, symbol_id].item()
if symb_dur != 0.: # ignore 0 durations
symbols_idx.append(symbol_id)
durations_float.append([end_prev, end_prev + symb_dur])
end_prev += symb_dur
int_durs = torch.LongTensor(duration_to_integer(durations_float, hparams)) # (L_max, )
durations_int[line_idx, symbols_idx] = int_durs
# put on GPU
durations_int = durations_int.cuda(duration_preds.device, non_blocking=True).long() # (B, L_max)
return duration_preds, durations_int
def pitch_shift(self, pitch_preds, pitch_factors, hparams, speaker_ids):
''' Pitch shift pitch predictions
Pitch factors are assumed to be in Hz
'''
# keep track of unvoiced idx
zero_idxs = (pitch_preds == 0.).nonzero() # (N, 2)
# pitch factors are F0 shifts in Hz
# pitch_factors = [[+50, -20, ...], ..., [+30, -10, ...]]
for line_idx in range(pitch_preds.size(0)):
speaker_id = speaker_ids[line_idx].item()
pitch_mean = hparams.stats[f'spk {speaker_id}']['pitch']['mean']
pitch_std = hparams.stats[f'spk {speaker_id}']['pitch']['std']
pitch_preds[line_idx] = torch.exp(pitch_std * pitch_preds[line_idx] + pitch_mean) # (L_max)
# perform pitch shift in Hz domain
pitch_preds[line_idx] += pitch_factors[line_idx] # (L_max)
# go back to log and re-normalize using pitch training stats
pitch_preds[line_idx] = (torch.log(pitch_preds[line_idx]) - pitch_mean) / pitch_std # (L_max)
# set unvoiced idx to zero
pitch_preds[zero_idxs[:, 0], zero_idxs[:, 1]] = 0.
return pitch_preds
def pitch_multiply(self, pitch_preds, pitch_factors):
''' Apply multiply transform to pitch prediction with respect to the mean
Effects of factor values on the pitch:
]0, +inf[ amplify
0 no effect
]-1, 0[ de-amplify
-1 flatten
]-2, -1[ invert de-amplify
-2 invert
]-inf, -2[ invert amplify
'''
# multiply pitch for each element in the batch
for line_idx in range(pitch_preds.size(0)):
# keep track of voiced and unvoiced idx
non_zero_idxs = pitch_preds[line_idx].nonzero() # (M, )
zero_idxs = (pitch_preds[line_idx] == 0.).nonzero() # (N, )
# compute mean of voiced values
mean_pitch = torch.mean(pitch_preds[line_idx, non_zero_idxs])
# compute deviation to the mean for each pitch prediction
pitch_deviation = pitch_preds[line_idx] - mean_pitch # (L_max)
# multiply factors to pitch deviation
pitch_deviation *= pitch_factors[line_idx] # (L_max)
# add deviation to pitch predictions
pitch_preds[line_idx] += pitch_deviation # (L_max)
# reset unvoiced values to 0
pitch_preds[line_idx, zero_idxs] = 0.
return pitch_preds
def inference(self, inputs, pitch_transform, hparams):
''' Inference function of DaftExprt
'''
# symbols = (B, L_max)
# dur_factors = (B, L_max)
# energy_factors = (B, L_max)
# pitch_factors = (B, L_max)
# input_lengths = (B, )
# energy_refs = (B, T_max)
# pitch_refs = (B, T_max)
# mel_spec_refs = (B, n_mel_channels, T_max)
# ref_lengths = (B, )
# speaker_ids = (B, )
symbols, dur_factors, energy_factors, pitch_factors, input_lengths, \
energy_refs, pitch_refs, mel_spec_refs, ref_lengths, speaker_ids = inputs
# extract FiLM parameters from reference and speaker ID
# (B, nb_blocks, nb_film_params)
_, encoder_film, prosody_pred_film, decoder_film = self.prosody_encoder(energy_refs, pitch_refs, mel_spec_refs, speaker_ids, ref_lengths)
# embed phoneme symbols, add positional encoding and encode input sequence
enc_outputs = self.phoneme_encoder(symbols, encoder_film, input_lengths) # (B, L_max, hidden_embed_dim)
# predict prosody parameters
duration_preds, energy_preds, pitch_preds = self.prosody_predictor(enc_outputs, prosody_pred_film, input_lengths) # (B, L_max)
# multiply durations by duration factors and extract int durations
duration_preds *= dur_factors # (B, L_max)
duration_preds, durations_int = self.get_int_durations(duration_preds, hparams) # (B, L_max)
# add energy factors to energies
# set 0 energy for symbols with 0 duration
energy_preds *= energy_factors # (B, L_max)
energy_preds[durations_int == 0] = 0. # (B, L_max)
# set unvoiced pitch for symbols with 0 duration
# apply pitch factors using specified transformation
pitch_preds[durations_int == 0] = 0.
if pitch_transform == 'add':
pitch_preds = self.pitch_shift(pitch_preds, pitch_factors, hparams, speaker_ids) # (B, L_max)
elif pitch_transform == 'multiply':
pitch_preds = self.pitch_multiply(pitch_preds, pitch_factors) # | |
"""client.py
The client script continuously measures write performance, submitting the results to a server, as well as \
logging to client_{pid}.log. The client will run the specified run time, or defaults to a random time \
that will write at least 2 files if not provided. Writing is performed in chunks to a temporary file. \
The chunk size can be specified, or will default to a random size between 10MB and 20MB. The file will be \
closed when the number of chunks written exceeds the maximum file size, and a new temporary file will be \
opened, repeating the process. The maximum file size will default to a random size between the chunk size \
and 100 times the chunk size if not specified.
Usage:
python client.py [option] host port
Options and arguments:
host : server host address
port : server port
-t : run time, in seconds
-c : size of each chunk to write, in MB
-m : maximum file size, approximately, in MB
-d : delete the files as they are closed
"""
from __future__ import print_function
import sys
import getopt
import os.path
import time
import socket
import threading
import tempfile
import random
import traceback
import subprocess
import multiprocessing
import Queue
import re
class EventThread(threading.Thread):
def __init__(self, target = None, name = None, args = (), kwargs = {}):
super(EventThread, self).__init__(target = target, name = name, args = args, kwargs = kwargs)
self.finished = threading.Event()
def cancel(self):
self.finished.set()
def run(self):
super(EventThread, self).run()
self.finished.set()
class RepeatingTimer(EventThread):
def __init__(self, interval, target, args = (), kwargs = {}):
super(RepeatingTimer, self).__init__()
self.target = target
self.args = args
self.kwargs = kwargs
self.interval = interval
def run(self):
try:
while True:
self.finished.wait(self.interval)
if (self.finished.is_set()): break
self.target(*self.args, **self.kwargs)
finally:
del self.target, self.args, self.kwargs
class Client(object):
"""Client(server_address, chunk, maxsize, minfiles = 0, delete = True, log = "client_{pid}.log")
Client writes to a temporary file in chunk increments. After the number of
chunks written exceeds the maxsize, the file is closed and the performance
data for the file write is logged and a DATA message sent to a server. A
new temporary file is created and the process repeats.
Client sends a START message before beginning the writing process, and sends
an ALIVE message every 5 seconds. STATUS messages with process information
about the data collection process are sent every 10 seconds.
Client implements the context manager interface and expects the __enter__()
method to be called to start the process. The __exit__() method is expected
to be called to shut down and clean up Client. Shut down will block until
minfiles have been successfully written or too many write errors in a row
occur.
The chunk data is expected to be a multiprocessing.Array().
"""
def __init__(self, server_address, chunk, maxsize, minfiles = 0, delete = True, log = None):
self.server_address = server_address
self.chunksize = len(chunk)
self.maxsize = maxsize
self.log = "client_{}.log".format(os.getpid()) if (log is None) else log
self.finish = multiprocessing.Event()
self.loggerq = multiprocessing.Queue() # items must be (cmds, message), cmds = '[SWQ]+'
self.loggerthread = threading.Thread(target = self.logger)
self.heartbeattimer = RepeatingTimer(5.0, self.heartbeat)
self.statustimer = EventThread(args = (10.0,), target = self.status)
self.workerthread = multiprocessing.Process(target = self.worker, args = (chunk, maxsize, minfiles, delete, self.finish, self.loggerq))
def logger_write(self, file, message):
message = ": ".join((time.strftime("%Y-%m-%d %H:%M:%S"), message))
try:
file.write(message)
file.write("\n")
except IOError:
print(message)
def logger_sendall(self, file, message):
message = ": ".join((str(os.getpid()), message))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect(self.server_address)
try:
sock.sendall(message + '\n')
finally:
sock.shutdown(socket.SHUT_RDWR)
except socket.error as e:
self.logger_write(file, ": ".join(("ERROR", str(e))))
finally:
sock.close()
sock = None
def logger(self):
# logger runs in a thread, consuming messages from the loggerq
# S to send to server, W to write to log, Q to quit the thread
with open(self.log, "a") as file:
cmd = None
while (cmd is not 'Q'):
cmds, message = self.loggerq.get()
for cmd in cmds:
if (cmd is 'S'):
self.logger_sendall(file, message)
elif (cmd is 'W'):
self.logger_write(file, message)
def heartbeat(self):
message = "ALIVE"
self.loggerq.put(('S', message))
# sys.platform may not be granular enough depending on environment expectations
# ubuntu
if (sys.platform == "linux2"):
def status_popen(self, interval):
# ubuntu top does not put a newline at the end of the last data line
# leaving stdout.readline() hanging until the next interval update
# by also monitoring top's own pid, we force the first data line to have a newline
# assume top's pid is higher than workerthread.pid and use -PID ordering
topcmd = ["top", "-p", str(self.workerthread.pid), "-b", "-d", str(interval), "-p", "0", "-o", "-PID"]
return subprocess.Popen(topcmd, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
# process stdout lines until data is generated
def status_processor(self, line):
ex = re.compile(r"^top -\s")
while (not ex.match(line)): line = yield
ex = re.compile(r"^\s*PID\s+USER\b")
while (not ex.match(line)): line = yield
keys = line.rstrip().split()
line = yield
values = line.rstrip().split(None, len(keys) - 1) # split up to command
yield {k: v for k, v in zip(keys, values)}
# macOS
elif (sys.platform == "darwin"):
def status_popen(self, interval):
topcmd = ["top", "-pid", str(self.workerthread.pid), "-l", "0", "-s", str(int(round(interval))), "-i", "1"]
return subprocess.Popen(topcmd, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
# process stdout lines until data is generated
def status_processor(self, line):
ex = re.compile(r"^Processes:\s")
while (not ex.match(line)): line = yield
ex = re.compile(r"^\s*PID\s+COMMAND\b")
while (not ex.match(line)): line = yield
keys = line.rstrip().split()
line = yield
values = line.rstrip().split(None, 1) # split up to command
values = [values[0]] + values[1].rsplit(None, len(keys) - 2) # right split back to command
yield {k: v for k, v in zip(keys, values)}
def status(self, interval):
thread = threading.current_thread()
# send empty STATUS messages every interval seconds until
# the workerthread starts and we can start top
while True:
thread.finished.wait(interval)
if (thread.finished.is_set()): return
if (hasattr(self, "status_popen") and (self.workerthread.is_alive())):
# start top, with data output every interval seconds
topp = self.status_popen(interval)
break
message = "STATUS"
self.loggerq.put(('S', message))
# start a separate thread that waits for stdout lines so we can check for the finished event
def readlines():
line = topp.stdout.readline()
while (line):
stdoutq.put(line)
line = topp.stdout.readline()
stdoutq = Queue.Queue()
readlinesthread = threading.Thread(target = readlines)
readlinesthread.start()
while (not thread.finished.is_set()):
try:
# pass stdout lines to the platform specific processor
# if stdout stops producing lines for too long then start over with a new processor
line = stdoutq.get(True, 0.5)
processor = self.status_processor(line)
data = processor.next()
while ((data is None) and (not thread.finished.is_set())):
line = stdoutq.get(True, 0.5)
data = processor.send(line)
except Queue.Empty:
pass
else:
# told to finish
if (data is None): break
# send the data to the server
message = ": ".join(("STATUS", repr(data)))
self.loggerq.put(('S', message))
topp.terminate()
topp.wait()
readlinesthread.join()
@staticmethod
def worker(chunk, maxsize, minfiles, delete, finish, loggerq):
"""worker(chunk, maxsize, minfiles, delete, finish, loggerq)
Continuously writes chunks of data to temporary files, measuring the
time to perform os.write()/os.fsync() calls. The combined time for all
chunk writes to a single file is logged and sent to a server as a DATA
message.
Arguments:
chunk : A multiprocessing.Array() of the chunk data.
maxsize : Integer. Rollover file when at least maxsize bytes are written.
minfiles : Integer. Keep working until at least minfiles are written.
delete : Boolean. Indicates whether to delete files when closed.
finish : A multiprocessing.Event() signaling the worker to end.
loggerq : A multiprocessing.Queue() to place log messages.
"""
global len
lap = time.time
write = os.write # use os.write()/os.fsync() to limit the layers getting involved
fsync = os.fsync
finish = finish
chunk = chunk
maxsize = maxsize
minfiles = minfiles
files = 0
errored = 0
# stop writing when finish event is set
# and we reached our minfiles or errored too many times in a row
while (((files < minfiles) and (errored < minfiles)) or (not finish.is_set())):
with chunk.get_lock():
data = chunk.get_obj()
datasize = len(data)
# keep track of time markers around os.write()/os.fsync()
laps = [None] * ((maxsize + (datasize - 1)) // datasize)
err = None
size = 0
i = 0
with tempfile.NamedTemporaryFile(delete = delete) as tmp:
fileno = tmp.fileno()
try:
while ((size < maxsize) and ((files < minfiles) or (not finish.is_set()))):
# if os.write() returns a size lower | |
= doc.createTextNode("%s" %filteredErrata)
paragraph.appendChild(pText)
#rowValidityCell.appendChild(paragraph)
errataCell.appendChild(paragraph)
tableRow.appendChild(errataCell)
table.appendChild(tableRow)
oTableContainer.appendChild(table)
oTableRow.appendChild(oTableContainer)
oTable.appendChild(oTableRow)
#Add some blank spave before any tables
tableSpacer = doc.createElement("div")
tableSpacer.setAttribute("class", "vBlankSpace")
tableSpacer.appendChild(oTable)
masterTableDivL = doc.createElement("div")
masterTableDivL.setAttribute("class", "vAlignment")
masterTableDivL.appendChild(tableSpacer)
masterTableBodyRow.appendChild(masterTableDivL)
masterTableBody.appendChild(masterTableBodyRow)
masterTable.appendChild(masterTableHeader)
masterTable.appendChild(masterTableBody)
body.appendChild(masterTable)
html.appendChild(head)
html.appendChild(body)
doc.appendChild(html)
fileStream = doc.toprettyxml(indent = " ")
logRoot = expanduser("~")
logDir = os.path.join(logRoot, "Graphyne")
if not os.path.exists(logDir):
os.makedirs(logDir)
resultFileLoc = os.path.join(logDir, fileName)
fileObject = open(resultFileLoc, "w", encoding="utf-8")
fileObject.write(fileStream)
fileObject.close()
def smokeTestSet(persistence, lLevel, css, profileName, persistenceArg = None, persistenceType = None, createTestDatabase = False, repoLocations = [[]], validate = False, serverURL = None, unitTests = True, callbackTestServerURL = None):
'''
repoLocations = a list of all of the filesystem location that that compose the repository.
useDeaultSchema. I True, then load the 'default schema' of Graphyne
persistenceType = The type of database used by the persistence engine. This is used to determine which flavor of SQL syntax to use.
Enumeration of Possible values:
Default to None, which is no persistence
"sqlite" - Sqlite3
"mssql" - Miscrosoft SQL Server
"hana" - SAP Hana
persistenceArg = the Module/class supplied to host the entityRepository and LinkRepository. If default, then use the Graphyne.DatabaseDrivers.NonPersistent module.
Enumeration of possible values:
None - May only be used in conjunction with "sqlite" as persistenceType and will throw an InconsistentPersistenceArchitecture otherwise
"none" - no persistence. May only be used in conjunction with "sqlite" as persistenceType and will throw an InconsistentPersistenceArchitecture otherwise
"memory" - Use SQLite in in-memory mode (connection = ":memory:")
"<valid filename with .sqlite as extension>" - Use SQLite, with that file as the database
"<filename with .sqlite as extension, but no file>" - Use SQLite and create that file to use as the DB file
"<anything else>" - Presume that it is a pyodbc connection string and throw a InconsistentPersistenceArchitecture exception if the dbtype is "sqlite".
createTestDatabase = a flag for creating regression test data. This flag is only to be used for regression testing the graph and even then, only if the test
database does not already exist.
*If persistenceType is None (no persistence, then this is ignored and won't throw any InconsistentPersistenceArchitecture exceptions)
'''
global rmlEngine
print(("\nStarting Graphyne Smoke Test: %s") %(persistenceType))
print(("...%s: Engine Start") %(persistenceType))
#Graph.startLogger(lLevel, "utf-8", True, persistenceType)
#Smoketest pecific Engine config
#Some broadcaster Configuration for testing purposes. ActionEngine and StimulusEngine are in Engine by default, but Smotetest needs RegressionTestBroadcaster
"""
rmlEngine.plugins['RegressionTestBroadcaster'] = {'name' : 'RegressionTestBroadcaster',
'pluginType' : 'EngineService',
'Module' : 'Broadcasters.RegressionTestBroadcaster',
'PluginParemeters': {'heatbeatTick' : 1, 'broadcasterID' : 'test', 'port' : 8081}
}
rmlEngine.broadcasters['test'] = {'memes' : [ 'TestPackageStimulusEngine.SimpleStimuli.Descriptor_Trailer',
'TestPackageStimulusEngine.SimpleStimuli.Descriptor_HelloPage',
'TestPackageStimulusEngine.SimpleStimuli.ADescriptor_HelloPage',
'TestPackageStimulusEngine.SimpleStimuli.Descriptor_HelloPage2',
'TestPackageStimulusEngine.SimpleStimuli.Descriptor_HelloPage3',
'TestPackageStimulusEngine.SimpleStimuli.Descriptor_MultiPage',
'TestPackageStimulusEngine.SimpleStimuli.Descriptor_AnotherPage',
'TestPackageStimulusEngine.SimpleStimuli.ADescriptor_AnotherPage']}
"""
#Set up the persistence of rmlEngine. It defaults to no persistence
rmlEngine.setPersistence(persistenceArg, persistenceType)
#Fill out rmlEngine.rtParams
rmlEngine.setRuntimeParam("consoleLogLevel", lLevel)
rmlEngine.setRuntimeParam("responseQueue", responseQueue)
for repo in repoLocations:
#installFilePath = os.path.dirname(__file__)
userRoot = expanduser("~")
repoLocationSnippet = os.path.join(*repo)
repoLocation = os.path.join(userRoot, repoLocationSnippet)
rmlEngine.addRepo(repoLocation)
#Start a gpaphyne engine in the current process, for the direct control tests
rmlEngine.validateOnLoad = validate
rmlEngine.start()
#start server instance in the
#subprocess.run('server.py')
time.sleep(300.0)
print("...Engine Started")
startTime = time.time()
resultSet = []
'''
if serverURL is not None:
print("Running Cold Start REST API tests")
resultSetAPI = runAPITests("API (Cold Start)", serverURL, persistenceArg, persistenceType, repoLocations, validate)
resultSet.extend(resultSetAPI)
print("Finished REST API tests")
'''
if unitTests == True:
print("Starting Unit Tests")
print("Starting Cold Start Unit Tests")
resultSetUitTests = runTests("Cold Start")
resultSet.extend(resultSetUitTests)
print("Finished Cold Start")
print("Starting Reatart Tests")
resultSetRestartTests = runRestartTests()
resultSet.extend(resultSetRestartTests)
print("Finished Reatart Tests")
#Is it really nescessary to restart the engine a third time, after the initial boot up?
#rmlEngine.shutdown()
#rmlEngine = Engine.Engine()
if serverURL is not None:
print("Running Warm Start REST API tests")
resultSetAPI = runAPITests("API (Warm Start)", serverURL, persistenceArg, persistenceType, repoLocations, validate, callbackTestServerURL)
resultSet.extend(resultSetAPI)
print("Finished REST API tests")
print("Starting Warm Start Unit Tests")
resultSetUitTestsW = runTests("Warm Start")
resultSet.extend(resultSetUitTestsW)
print("Finished Warm Start")
endTime = time.time()
validationTime = endTime - startTime
testReport = {"resultSet" : resultSet, "validationTime" : validationTime, "persistence" : persistence.__name__, "profileName" : profileName}
#publishResults(resultSet, validationTime, css)
print(("...%s: Test run finished. Waiting 30 seconds for log thread to catch up before starting shutdown") %(persistence.__name__))
time.sleep(30.0)
print(("...%s: Engine Stop") %(persistence.__name__))
#Graph.stopLogger()
rmlEngine.shutdown()
print(("...%s: Engine Stopped") %(persistence.__name__))
return testReport
if __name__ == "__main__":
print("\nStarting Intentsity Smoke Test")
print("...Engine Start")
css = Fileutils.defaultCSS()
parser = argparse.ArgumentParser(description="Intentsity Smoke Test")
parser.add_argument("-c", "--dbtcon", type=str, help="|String| The database connection string (if a relational DB) or filename (if SQLite).\n 'none' - no persistence. This is the default value\n 'memory' - Use SQLite in in-memory mode (connection = ':memory:') None persistence defaults to memory id SQlite is used\n '<valid filename>' - Use SQLite, with that file as the database\n <filename with .sqlite as extension, but no file> - Use SQLite and create that file to use as the DB file\n <anything else> - Presume that it is a pyodbc connection string")
parser.add_argument("-d", "--dbtype", type=str, help="|String| The database type to be used. If --dbtype is a relational database, it will also determine which flavor of SQL syntax to use.\n Possible options are 'none', 'sqlite', 'mssql' and 'hana'. \n Default is 'none'")
parser.add_argument("-i", "--library", type=str, help="|String| Run the unit tests or skip them. The full suite takes about 4 hours to run, so you may want to skip them if you are only testing the rest API.\n Options are (in increasing order of verbosity) 'warning', 'info' and 'debug'. \n Default is 'warning'")
parser.add_argument("-l", "--logl", type=str, help="|String| Graphyne's log level during the validation run. \n Options are (in increasing order of verbosity) 'warning', 'info' and 'debug'. \n Default is 'warning'")
parser.add_argument("-r", "--repo", nargs='*', type=str, help="|String| One or more repository folders to be tested. At least two required (Graphyne test repo and Intentsity Test Repo filesystem locations)")
parser.add_argument("-s", "--server", type=str, help="|String| Whether to test the server REST api, or skip it. 'y' or 'n'. 'y' == Yes, test the server. 'n' == No, skip it. If no, then the url parameter is ignored. defaults to y")
parser.add_argument("-u", "--url", type=str, help="|String| URL for exterlally launched server. If none is given, then a server will be started in a subprocess, wuth the url localhost:8080. Giving a specific url allows you to start the server in a seperate debug session and debug the server side seperately. If you are simply running unit tests, then you can save yourself the complexity and let smoketest start the serrver on its own.")
parser.add_argument("-t", "--callback", type=str, help="|String| URL callback test server. If none is given, then a server will be started in a subprocess, wuth the url localhost:8090. Giving a specific url allows you to start the server in a seperate debug session and debug the server side seperately. If you are simply running unit tests, then you can save yourself the complexity and let smoketest start the serrver on its own.")
parser.add_argument("-v", "--val", type=str, help="|String| Sets validation of the repo. 'y' or 'n', defaults to n")
parser.add_argument("-x", "--resetdb", type=str, help="|String| Reset the esisting persistence DB This defaults to true and is only ever relevant when Graphyne is using relational database persistence.")
args = parser.parse_args()
lLevel = Graph.logLevel.WARNING
if args.logl:
if args.logl == "info":
lLevel = Graph.logLevel.INFO
print("\n -- log level = 'info'")
elif args.logl == "debug":
lLevel = Graph.logLevel.DEBUG
print("\n -- log level = 'debug'")
elif args.logl == "warning":
pass
else:
print("Invalid log level %s! Permitted valies of --logl are 'warning', 'info' and 'debug'!" %args.logl)
sys.exit()
useServer = True
if args.server:
if (args.server is None) or (args.server == 'none'):
pass
elif (args.server == 'y') or (args.server == 'Y'):
useServer = True
print("\n -- Including REST API tests")
elif (args.server == 'n') or (args.server == 'N'):
useServer = False
print("\n -- Skipping REST API tests")
else:
print("Invalid REST API server choice %s! Permitted valies of --server | |
<gh_stars>0
#!/usr/bin/env python
"""CRN class."""
from collections import defaultdict
import itertools
from libsbml import writeSBMLToFile, formulaToL3String, SBMLDocument
import logging
import numpy as np
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import connected_components
import sympy as sp
import warnings
import sympy as sp
from scipy import integrate
from .conslaw import ConsLaw
from .createmodel import model_from_reacts, model_from_sbml, replace_reacts
from .matrixfunctions import negative, sdiag, print_matrix, _pos_dependent, _pos_generators
from .crncomplex import Complex
from .parsereaction import parse_reaction_file, parse_complex, parse_reactions, ast_to_sympy_expr, flux_value
from .reaction import Reaction, _split_reaction_monom, merge_reactions, _same_denom
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2016, <NAME>"
__license__ = "BSD"
__version__ = "0.0.1"
class CRN(object):
"""A chemical reaction network.
Keyword argument:
* model: SBML model.
:Example:
>>> net = CRN(model_from_sbml("enzyme.xml"))
or equivalently
>>> net = from_sbml("enzyme.xml")
In alternative, a CRN can be created from a list of reactions or strings representing reactions:
>>> reacts = [Reaction("r1", Complex(a = 1), Complex(b = 1), "k1*a"), Reaction("r2", Complex(b = 2), Complex(), "k2*b**2")]
>>> net = from_reacts(reacts)
or equivalently
>>> net1 = from_react_strings(["r1: a ->(k1) b", "r2: 2b ->(k2) "])
One can also create an empty CRN and update the reactions:
>>> from crnpy.parsereaction import parse_reactions
>>> net2 = CRN()
>>> net2.species
()
>>> net2.complexes
()
>>> net2.reactions = parse_reactions(["r1: a ->(k1) b", "r2: 2b ->(k2) "])
>>> net2.species
('a', 'b')
>>> net2.complexes
(a, b, 2b, )
"""
def __init__(self, model = None):
self.logger = logging.getLogger("crnpy.crn")
self.logger.info("Creating an instance of crn.")
if model:
self._model, self._document = model
# species
self._species_from_sbml()
self._removed_species = []
# reactions
self._get_reactions()
self._populate()
else:
self._model, self._document = None, None
self._removed_species = []
self.reactions = []
@classmethod
def from_reacts(cls, reacts):
"""Create a reaction network from a list of reactions."""
crn = cls()
crn.reactions = reacts
return crn
@property
def model(self):
"""SBML model of the chemical reaction network.
If the reaction network is created using a list of reactions,
the SBML model is not created. It is created if save_sbml is called,
or if update_model() is called.
"""
return self._model
@property
def document(self):
"""SBML document.
If the reaction network is created using a list of reactions,
the SBML document is not created. It is created if save_sbml is called,
or if update_model() is called.
"""
return self._document
@property
def species(self):
"""Tuple of the network species.
:Example:
>>> from crnpy.crn import CRN, from_react_strings
>>> net = from_react_strings(["A1 ->(k1) A2 + A3", "A2 ->(k2) 2 A3"])
>>> net.species
('A1', 'A2', 'A3')
The tuple of species is read-only, and can change if the reactions are updated,
or if a reduction method is applied to eliminate some species.
:rtype: tuple of strings.
"""
return tuple(self._species)
@property
def removed_species(self):
"""Pairs (species, expression) for species that have been eliminated.
:rtype: list of pairs (string, sympy expression).
"""
return tuple(self._removed_species)
@property
def complexes(self):
"""Tuple of the network complexes.
:Example:
>>> from crnpy.crn import CRN, from_react_strings
>>> net = from_react_strings(["A1 ->(k1) A2 + A3", "A2 ->(k2) 2 A3"])
>>> net.complexes
(A1, A2 + A3, A2, 2A3)
The tuple of complexes is read-only, and can change if the reactions are updated,
or if a reduction method is applied to eliminate some species.
:rtype: tuple of Complex objects.
"""
return tuple(self._complexes)
@property
def reactions(self):
"""Tuple of the network reactions.
:setter: Sets the reactions, updating the CRN model and document if they exist.
:rtype: tuple of Reaction objects.
"""
return tuple(self._reactions)
@reactions.setter
def reactions(self, value):
self._species = sorted(list(set([s for r in value for s in r.reactant] +
[s for r in value for s in r.product])))
self._n_species = len(self._species)
self._reactions = value
self._populate()
self.update_model(if_exists = True)
@property
def n_species(self):
return self._n_species
@property
def n_complexes(self):
return self._n_complexes
@property
def n_reactions(self):
return self._n_reactions
@property
def reactionids(self):
return tuple(self._reactionids)
@property
def rates(self):
"""Rates of the reactions.
:rtype: matrix of sympy expressions.
"""
return self._rates
@property
def kinetic_params(self):
"""Kinetic parameters of the reactions.
:rtype: tuple of sympy expressions.
"""
return tuple(self._kinetic_params)
def _species_from_sbml(self):
"""Extract species from SBML model."""
# species
self._species = [self.model.getSpecies(s).getId() for s in range(self.model.getNumSpecies())]
self._species = sorted(self.species)
self._n_species = len(self.species)
def _get_reactions(self):
"""Extract reactions from SBML model."""
nr = self.model.getNumReactions()
reactions = []
for r in range(nr):
reaction = self.model.getReaction(r)
reactionid = reaction.getId()
reactant = Complex(dict((c.getSpecies(), 1 if np.isnan(c.getStoichiometry()) else int(c.getStoichiometry())) \
for c in reaction.getListOfReactants()))
product = Complex(dict((c.getSpecies(), 1 if np.isnan(c.getStoichiometry()) else int(c.getStoichiometry())) \
for c in reaction.getListOfProducts()))
# remove species with stoichiometric coefficient equal to 0
reactant = Complex(dict((s, sc) for s, sc in reactant.items() if sc != 0))
product = Complex(dict((s, sc) for s, sc in product.items() if sc != 0))
math = reaction.getKineticLaw().getMath()
if math:
# Special case for FLUX_VALUE
fv = flux_value(math)
if flux_value(math):
rate = sp.Symbol(fv) * reactant.ma()
if reaction.getReversible():
raterev = sp.Symbol(fv + "_rev") * product.ma()
else:
kineticlaw = ast_to_sympy_expr(math)
if reaction.getReversible():
# if reaction is reversible, we need to split the kineticLaw
# into forward and backward formulas.
# We expand the kineticLaw and assume that the components relating
# to the inverse reaction are those with a minus sign in front.
numer, denom = kineticlaw.as_numer_denom()
negative = numer.expand().coeff(-1)
rate = ((numer + negative) / denom).factor()
raterev = (negative / denom).factor()
else:
rate = kineticlaw
else:
param = "k_" + reaction.reactionid
rate = reactant.ma() * sp.Symbol(param)
if reaction.getReversible():
raterev = product.ma() * sp.Symbol(param + "_rev")
reactions.append(Reaction(reactionid, reactant, product, rate))
if reaction.getReversible():
revid = reactionid + "_rev"
if not raterev:
raterev = product.ma() * sp.Symbol("k_" + revid)
reactions.append(Reaction(revid, product, reactant, raterev))
self._reactions = reactions
def _populate(self):
"""Create crn attributes from reactions."""
self._n_reactions = len(self.reactions)
self._reactionids = [r.reactionid for r in self.reactions]
self._rates = sp.Matrix([r.rate for r in self.reactions])
self._kinetic_params = [r.kinetic_param for r in self.reactions]
self._n_species = len(self.species)
# complexes and matrices
self._complexes = []
nc = -1
incidence = {}
cmatrix = {}
for nr in range(self.n_reactions):
r = self.reactions[nr]
if r.reactant in self._complexes:
indc = self._complexes.index(r.reactant)
else:
nc = nc + 1
indc = nc
self._complexes.append(r.reactant)
for s in r.reactant: cmatrix[(self.species.index(s), nc)] = r.reactant[s]
incidence[(indc, nr)] = -1
if r.product in self._complexes:
indc = self._complexes.index(r.product)
else:
nc = nc + 1
indc = nc
self._complexes.append(r.product)
for s in r.product: cmatrix[(self.species.index(s), nc)] = r.product[s]
incidence[(indc, nr)] = 1
if r.reactant == r.product:
incidence[(indc, nr)] = 0
self._n_complexes = len(self._complexes)
# Create the matrix of complex coefficients. s x c
self._complex_matrix = sp.SparseMatrix(self.n_species, self.n_complexes, cmatrix)
# Create the incidence matrix. c x r
self._incidence_matrix = sp.SparseMatrix(self.n_complexes, self.n_reactions, incidence)
def update_model(self, if_exists = False):
"""Update the SBML model and document or create them if they do not exist.
If if_exists is set to True, update only if the model already exists."""
if (not if_exists) and (not self.model):
document = SBMLDocument(3, 1)
self._model, self._document = document.createModel(), document
if self.model:
self._model, self._document, self._species = \
replace_reacts(self.model, self.document, self.reactions)
def set_params(self, params_dict):
"""Replace the parameters used in the reaction rates
with the values in dictionary *params_dict*.
*params_dict* is a dictionary with keys the parameters to replace, and
values the sympy expressions or numeric values that replace them.
In the following example we set all the kinetic parameters to 0.001:
:Example:
>>> from crnpy.crn import CRN, from_react_strings
>>> net = from_react_strings(["A1 ->(k1) A2 + A3", "A2 ->(k2) 2 A3"])
>>> net.set_params(dict((k, 0.001) for k in net.kinetic_params))
>>> net.reactions
(r0: A1 ->(1.000e-3) A2 + A3, r1: A2 ->(1.000e-3) 2A3)
"""
self.reactions = [Reaction(r.reactionid,
r.reactant,
r.product,
r.rate.subs(params_dict)) for r in self.reactions]
self.update_model(if_exists = True)
### Matrices ####
@property
def complex_matrix(self):
"""Complex matrix (usually denoted with Y), i.e. the matrix with dimension
number of species times number of complexes, with element Yij given by
the stoichiometric coefficient of the i-th species in the j-th complex.
:rtype: sympy SparseMatrix.
:Example:
>>> from crnpy.crn import CRN, from_react_strings
>>> net = from_react_strings(["A1 ->(k1) A2 + A3", "A2 ->(k2) 2 A3"])
>>> net.complex_matrix
Matrix([
[1, 0, 0, 0],
[0, 1, 1, 0],
[0, 1, 0, 2]])
"""
# ns x nc matrix
return self._complex_matrix
@property
def incidence_matrix(self):
"""Incidence matrix.
Sometimes denoted as Ia, it is the matrix with dimension
number of complexes times number of reactions,
with element at | |
None or self.multiple_ori_ids[track_id] <= self.FRAME_THR \
or dets_ids[ious_inds[attack_ind]] not in self.multiple_ori2att \
or track_id not in self.multiple_ori2att:
continue
if ious[attack_ind, ious_inds[attack_ind]] > self.ATTACK_IOU_THR or (
track_id in self.low_iou_ids and ious[attack_ind, ious_inds[attack_ind]] > 0
):
attack_ids.append(track_id)
target_ids.append(dets_ids[ious_inds[attack_ind]])
attack_inds.append(attack_ind)
target_inds.append(ious_inds[attack_ind])
if hasattr(self, f'temp_i_{track_id}'):
self.__setattr__(f'temp_i_{track_id}', 0)
elif ious[attack_ind, ious_inds[attack_ind]] == 0 and track_id in self.low_iou_ids:
if hasattr(self, f'temp_i_{track_id}'):
self.__setattr__(f'temp_i_{track_id}', self.__getattribute__(f'temp_i_{track_id}') + 1)
else:
self.__setattr__(f'temp_i_{track_id}', 1)
if self.__getattribute__(f'temp_i_{track_id}') > 10:
self.low_iou_ids.remove(track_id)
elif dets_ids[dis_inds[attack_ind]] in self.multiple_ori2att:
attack_ids.append(track_id)
target_ids.append(dets_ids[dis_inds[attack_ind]])
attack_inds.append(attack_ind)
target_inds.append(dis_inds[attack_ind])
fit_index = self.CheckFit(dets, id_feature, attack_ids, attack_inds) if len(attack_ids) else []
if fit_index:
attack_ids = np.array(attack_ids)[fit_index]
target_ids = np.array(target_ids)[fit_index]
attack_inds = np.array(attack_inds)[fit_index]
target_inds = np.array(target_inds)[fit_index]
if self.opt.rand:
noise, attack_iter, suc = self.attack_mt_random(
im_blob,
img0,
id_features,
dets,
inds,
remain_inds,
last_info=self.ad_last_info,
outputs_ori=output,
attack_ids=attack_ids,
attack_inds=attack_inds,
target_ids=target_ids,
target_inds=target_inds
)
else:
noise, attack_iter, suc = self.attack_mt(
im_blob,
img0,
id_features,
dets,
inds,
remain_inds,
last_info=self.ad_last_info,
outputs_ori=output,
attack_ids=attack_ids,
attack_inds=attack_inds,
target_ids=target_ids,
target_inds=target_inds
)
self.low_iou_ids.update(set(attack_ids))
if suc:
self.attacked_ids.update(set(attack_ids))
print(
f'attack ids: {attack_ids}\tattack frame {self.frame_id_}: SUCCESS\tl2 distance: {(noise ** 2).sum().sqrt().item()}\titeration: {attack_iter}')
else:
print(f'attack ids: {attack_ids}\tattack frame {self.frame_id_}: FAIL\tl2 distance: {(noise ** 2).sum().sqrt().item() if noise is not None else None}\titeration: {attack_iter}')
if noise is not None:
l2_dis = (noise ** 2).sum().sqrt().item()
adImg = torch.clip(im_blob + noise, min=0, max=1)
noise = self.recoverNoise(noise, img0)
noise = (noise - np.min(noise)) / (np.max(noise) - np.min(noise))
noise = (noise * 255).astype(np.uint8)
else:
l2_dis = None
adImg = im_blob
output_stracks_att = self.update(adImg, img0)
adImg = self.recoverNoise(adImg.detach(), img0)
output_stracks_att_ind = []
for ind, track in enumerate(output_stracks_att):
if track.track_id not in self.multiple_att_ids:
self.multiple_att_ids[track.track_id] = 0
self.multiple_att_ids[track.track_id] += 1
if self.multiple_att_ids[track.track_id] <= self.FRAME_THR:
output_stracks_att_ind.append(ind)
if len(output_stracks_ori_ind) and len(output_stracks_att_ind):
ori_dets = [track.curr_tlbr for i, track in enumerate(output_stracks_ori) if i in output_stracks_ori_ind]
att_dets = [track.curr_tlbr for i, track in enumerate(output_stracks_att) if i in output_stracks_att_ind]
ori_dets = np.stack(ori_dets).astype(np.float64)
att_dets = np.stack(att_dets).astype(np.float64)
ious = bbox_ious(ori_dets, att_dets)
row_ind, col_ind = linear_sum_assignment(-ious)
for i in range(len(row_ind)):
if ious[row_ind[i], col_ind[i]] > 0.9:
ori_id = output_stracks_ori[output_stracks_ori_ind[row_ind[i]]].track_id
att_id = output_stracks_att[output_stracks_att_ind[col_ind[i]]].track_id
self.multiple_ori2att[ori_id] = att_id
return output_stracks_ori, output_stracks_att, adImg, noise, l2_dis
def update_attack_sg_feat(self, im_blob, img0, **kwargs):
self.frame_id_ += 1
attack_id = kwargs['attack_id']
self_track_id_ori = kwargs.get('track_id', {}).get('origin', None)
self_track_id_att = kwargs.get('track_id', {}).get('attack', None)
activated_starcks = []
refind_stracks = []
lost_stracks = []
removed_stracks = []
width = img0.shape[1]
height = img0.shape[0]
inp_height = im_blob.shape[2]
inp_width = im_blob.shape[3]
c = np.array([width / 2., height / 2.], dtype=np.float32)
s = max(float(inp_width) / float(inp_height) * height, width) * 1.0
meta = {'c': c, 's': s,
'out_height': inp_height // self.opt.down_ratio,
'out_width': inp_width // self.opt.down_ratio}
''' Step 1: Network forward, get detections & embeddings'''
# with torch.no_grad():
im_blob.requires_grad = True
self.model.zero_grad()
output = self.model(im_blob)[-1]
hm = output['hm'].sigmoid()
wh = output['wh']
id_feature = output['id']
id_feature = F.normalize(id_feature, dim=1)
reg = output['reg'] if self.opt.reg_offset else None
dets_raw, inds = mot_decode(hm, wh, reg=reg, cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K)
id_features = []
for i in range(3):
for j in range(3):
id_feature_exp = _tranpose_and_gather_feat_expand(id_feature, inds, bias=(i - 1, j - 1)).squeeze(0)
id_features.append(id_feature_exp)
id_feature = _tranpose_and_gather_feat_expand(id_feature, inds)
id_feature = id_feature.squeeze(0)
dets = self.post_process(dets_raw.clone(), meta)
dets = self.merge_outputs([dets])[1]
remain_inds = dets[:, 4] > self.opt.conf_thres
dets = dets[remain_inds]
id_feature = id_feature[remain_inds]
for i in range(len(id_features)):
id_features[i] = id_features[i][remain_inds]
id_feature = id_feature.detach().cpu().numpy()
last_id_features = [None for _ in range(len(dets))]
last_ad_id_features = [None for _ in range(len(dets))]
dets_index = [i for i in range(len(dets))]
dets_ids = [None for _ in range(len(dets))]
tracks_ad = []
# import pdb; pdb.set_trace()
# vis
'''
for i in range(0, dets.shape[0]):
bbox = dets[i][0:4]
cv2.rectangle(img0, (bbox[0], bbox[1]),
(bbox[2], bbox[3]),
(0, 255, 0), 2)
cv2.imshow('dets', img0)
cv2.waitKey(0)
id0 = id0-1
'''
if len(dets) > 0:
'''Detections'''
detections = [STrack(STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f, 30) for
(tlbrs, f) in zip(dets[:, :5], id_feature)]
else:
detections = []
''' Add newly detected tracklets to tracked_stracks'''
unconfirmed = []
tracked_stracks = [] # type: list[STrack]
for track in self.tracked_stracks_:
if not track.is_activated:
unconfirmed.append(track)
else:
tracked_stracks.append(track)
''' Step 2: First association, with embedding'''
strack_pool = joint_stracks(tracked_stracks, self.lost_stracks_)
STrack.multi_predict(strack_pool)
dists = matching.embedding_distance(strack_pool, detections)
# dists = matching.gate_cost_matrix(self.kalman_filter, dists, strack_pool, detections)
dists = matching.fuse_motion(self.kalman_filter_, dists, strack_pool, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.7)
# import pdb; pdb.set_trace()
for itracked, idet in matches:
track = strack_pool[itracked]
det = detections[idet]
assert last_id_features[dets_index[idet]] is None
assert last_ad_id_features[dets_index[idet]] is None
last_id_features[dets_index[idet]] = track.smooth_feat
last_ad_id_features[dets_index[idet]] = track.smooth_feat_ad
tracks_ad.append((track, dets_index[idet]))
if track.state == TrackState.Tracked:
track.update(detections[idet], self.frame_id_)
activated_starcks.append(track)
else:
track.re_activate_(det, self.frame_id_, new_id=False)
refind_stracks.append(track)
dets_ids[dets_index[idet]] = track.track_id
''' Step 3: Second association, with IOU'''
dets_index = [dets_index[i] for i in u_detection]
detections = [detections[i] for i in u_detection]
r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
dists = matching.iou_distance(r_tracked_stracks, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.5)
for itracked, idet in matches:
track = r_tracked_stracks[itracked]
det = detections[idet]
assert last_id_features[dets_index[idet]] is None
assert last_ad_id_features[dets_index[idet]] is None
last_id_features[dets_index[idet]] = track.smooth_feat
last_ad_id_features[dets_index[idet]] = track.smooth_feat_ad
tracks_ad.append((track, dets_index[idet]))
if track.state == TrackState.Tracked:
track.update(det, self.frame_id_)
activated_starcks.append(track)
else:
track.re_activate_(det, self.frame_id_, new_id=False)
refind_stracks.append(track)
dets_ids[dets_index[idet]] = track.track_id
for it in u_track:
track = r_tracked_stracks[it]
if not track.state == TrackState.Lost:
track.mark_lost()
lost_stracks.append(track)
'''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
dets_index = [dets_index[i] for i in u_detection]
detections = [detections[i] for i in u_detection]
dists = matching.iou_distance(unconfirmed, detections)
matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
assert last_id_features[dets_index[idet]] is None
assert last_ad_id_features[dets_index[idet]] is None
last_id_features[dets_index[idet]] = unconfirmed[itracked].smooth_feat
last_ad_id_features[dets_index[idet]] = unconfirmed[itracked].smooth_feat_ad
tracks_ad.append((unconfirmed[itracked], dets_index[idet]))
unconfirmed[itracked].update(detections[idet], self.frame_id_)
activated_starcks.append(unconfirmed[itracked])
dets_ids[dets_index[idet]] = unconfirmed[itracked].track_id
for it in u_unconfirmed:
track = unconfirmed[it]
track.mark_removed()
removed_stracks.append(track)
""" Step 4: Init new stracks"""
for inew in u_detection:
track = detections[inew]
if track.score < self.det_thresh:
continue
track.activate_(self.kalman_filter_, self.frame_id_, track_id=self_track_id_ori)
activated_starcks.append(track)
dets_ids[dets_index[inew]] = track.track_id
""" Step 5: Update state"""
for track in self.lost_stracks_:
if self.frame_id_ - track.end_frame > self.max_time_lost:
track.mark_removed()
removed_stracks.append(track)
# print('Ramained match {} s'.format(t4-t3))
self.tracked_stracks_ = [t for t in self.tracked_stracks_ if t.state == TrackState.Tracked]
self.tracked_stracks_ = joint_stracks(self.tracked_stracks_, activated_starcks)
self.tracked_stracks_ = joint_stracks(self.tracked_stracks_, refind_stracks)
self.lost_stracks_ = sub_stracks(self.lost_stracks_, self.tracked_stracks_)
self.lost_stracks_.extend(lost_stracks)
self.lost_stracks_ = sub_stracks(self.lost_stracks_, self.removed_stracks_)
self.removed_stracks_.extend(removed_stracks)
self.tracked_stracks_, self.lost_stracks_ = remove_duplicate_stracks(self.tracked_stracks_, self.lost_stracks_)
# get scores of lost tracks
output_stracks_ori = [track for track in self.tracked_stracks_ if track.is_activated]
logger.debug('===========Frame {}=========='.format(self.frame_id_))
logger.debug('Activated: {}'.format([track.track_id for track in activated_starcks]))
logger.debug('Refind: {}'.format([track.track_id for track in refind_stracks]))
logger.debug('Lost: {}'.format([track.track_id for track in lost_stracks]))
logger.debug('Removed: {}'.format([track.track_id for track in removed_stracks]))
noise = None
suc = 0
for attack_ind, track_id in enumerate(dets_ids):
if track_id == attack_id:
if self.opt.attack_id > 0:
if not hasattr(self, f'frames_{attack_id}'):
setattr(self, f'frames_{attack_id}', 0)
if getattr(self, f'frames_{attack_id}') < self.FRAME_THR:
setattr(self, f'frames_{attack_id}', getattr(self, f'frames_{attack_id}') + 1)
break
fit = self.CheckFit(dets, id_feature, [attack_id], [attack_ind])
ious = bbox_ious(np.ascontiguousarray(dets[:, :4], dtype=np.float64),
np.ascontiguousarray(dets[:, :4], dtype=np.float64))
ious[range(len(dets)), range(len(dets))] = 0
dis = bbox_dis(np.ascontiguousarray(dets[:, :4], dtype=np.float64),
np.ascontiguousarray(dets[:, :4], dtype=np.float64))
dis[range(len(dets)), range(len(dets))] = np.inf
target_ind = np.argmax(ious[attack_ind])
if ious[attack_ind][target_ind] >= self.attack_iou_thr:
if ious[attack_ind][target_ind] == 0:
target_ind = np.argmin(dis[attack_ind])
target_id = dets_ids[target_ind]
if fit:
noise, attack_iter, suc = self.attack_sg_feat(
im_blob,
img0,
id_features,
dets,
inds,
remain_inds,
last_info=self.ad_last_info,
outputs_ori=output,
attack_id=attack_id,
attack_ind=attack_ind,
target_id=target_id,
target_ind=target_ind
)
self.attack_iou_thr = 0
if suc:
suc = 1
print(
f'attack id: {attack_id}\tattack frame {self.frame_id_}: SUCCESS\tl2 distance: {(noise ** 2).sum().sqrt().item()}\titeration: {attack_iter}')
else:
suc = 2
print(
f'attack id: {attack_id}\tattack frame {self.frame_id_}: FAIL\tl2 distance: {(noise ** 2).sum().sqrt().item()}\titeration: {attack_iter}')
else:
suc = 3
if ious[attack_ind][target_ind] == 0:
self.temp_i += 1
if self.temp_i >= 10:
self.attack_iou_thr = self.ATTACK_IOU_THR
else:
self.temp_i = 0
else:
self.attack_iou_thr = self.ATTACK_IOU_THR
if fit:
suc = 2
if noise is not None:
l2_dis = (noise ** 2).sum().sqrt().item()
adImg = torch.clip(im_blob + noise, min=0, max=1)
noise = self.recoverNoise(noise, img0)
noise = (noise - np.min(noise)) / (np.max(noise) - np.min(noise))
noise = (noise * 255).astype(np.uint8)
else:
l2_dis = None
adImg = im_blob
output_stracks_att = self.update(adImg, img0, track_id=self_track_id_att)
adImg = self.recoverNoise(adImg.detach(), img0)
return output_stracks_ori, output_stracks_att, adImg, noise, l2_dis, suc
def update_attack_sg_cl(self, im_blob, img0, **kwargs):
self.frame_id_ += 1
attack_id = kwargs['attack_id']
self_track_id_ori = kwargs.get('track_id', {}).get('origin', None)
self_track_id_att = kwargs.get('track_id', {}).get('attack', None)
activated_starcks = []
refind_stracks = []
lost_stracks = []
removed_stracks = []
width = img0.shape[1]
height = img0.shape[0]
inp_height = im_blob.shape[2]
inp_width = im_blob.shape[3]
c = np.array([width / 2., height / 2.], dtype=np.float32)
s = max(float(inp_width) / float(inp_height) * height, width) * 1.0
meta = {'c': c, 's': s,
'out_height': inp_height | |
bn_momentum: float. Batch normalization momentum.
"""
self.unit1 = BnReluConv(input_channels, n_filters, filter_size,
dropout, bias, dilation,
bn_momentum=bn_momentum)
self.unit2 = BnReluConv(n_filters, n_filters, filter_size, dropout,
bias, 1, bn_momentum=bn_momentum)
if input_channels != n_filters:
self.equal_channels_conv = nn.Conv2d(input_channels, n_filters,
kernel_size=1, bias=bias)
self.input_channels = input_channels
self.n_filters = n_filters
# Initialize modules
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_uniform(m.weight)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
processed = self.unit2(self.unit1(x))
if self.input_channels != self.n_filters:
x = self.equal_channels_conv(x)
out = x + processed
return out
class ResNet(nn.Module):
def __init__(self, input_channels, n_filters, dropout, resnet_layers,
filter_size=3, bias=True, bn_momentum=0.1):
super(ResNet, self).__init__()
"""
This class builds a ResNet with BasicBlocks.
Input:
- input_channels: int. Number of input feature maps.
- n_filters: int. Number of output feature maps.
- dropout: float. Dropout probability.
- resnet_layers: int. Number of BasicBlocks to build.
- filter_size: int. Filter size of the convolution.
- bias: bool. Bias of the convolution.
- bn_momentum: float. Batch normalization momentum.
"""
self.resnet = self._make_layer(input_channels, n_filters, filter_size,
dropout, resnet_layers, bias,
bn_momentum=bn_momentum)
def _make_layer(self, input_channels, n_filters, filter_size, dropout,
resnet_layers, bias, bn_momentum=0.1):
layers = []
for i in range(resnet_layers):
if i == 0:
resnet_in_channels = input_channels
else:
resnet_in_channels = n_filters
layers.append(
BasicBlock(resnet_in_channels, n_filters, filter_size,
dropout, 1, bias, bn_momentum=bn_momentum))
return nn.Sequential(*layers)
def forward(self, x):
out = self.resnet(x)
return out
class DenseResBlock(nn.Module):
def __init__(self, dilation_list, resnet_config, growth_rate,
dropout, n_filters_inout, bias=True,
mixing_conv=True,
stride=2, transformation=None,
preprocess_block=PreprocessBlockStandard, bn_momentum=0.1,
ini='random'):
super(DenseResBlock, self).__init__()
"""
This class builds the Dense Block between IDB and FUB.
It is composed of several ResNets, with transformations and
1x1 mixing convolutions.
The transformations can be: max pooling, strided/dilated/
traditional convolution, upsampling.
The code is made to work with 9 ResNets, with 8 transformations
in between.
Input:
- dilation_list: int list of length N-1. It indicates the dilation
rate to use in each transformation.
Each position indicates each different
transformation. Example: [2, 1] will use
dilation rate 2 for the first transfor-
mation if the first position of
the 'transformation' list is a dilated conv.
Otherwise, dilation rate does not affect
the transformation.
- resnet_config: int list of length N. It creates N ResNets
with M BasicBlocks, where M is the value of
the list at each position.
Example: [4, 4, 4] creates 3 ResNets with 4
basic blocks each.
- growth_rate: int list of length N. Specifies the number of
feature maps to output per each of the N
ResNets.
- dropout: float list of length N. Specifies the dropout rate
per each ResNet.
- n_filters_inout: int. Number of input feature maps to the
DenseBlock.
- bias: bool. Bias of the convolution.
- mixing_conv: bool. To use the 1x1 mixing convolution after
each concatenation.
- stride: int. Stride to use when using strided convolutions
as transformations.
- transformation: string list of length N-1. Specifies the
transformation to use between each ResNet.
- preprocess_block: Class. Which type of dilated convolution
block to use when using 'dilation' as
transformation.
- bn_momentum: float. Batch normalization momentum.
- ini: string. Initilization for dilated convolutions. It
can be 'random' or 'identity'.
"""
# Names of paths to transform. All the paths are transformed
# separately. Example: in ResNet2, we will need to transform
# output of ResNet1 AND input to the DenseResBlock.
# The ones that can be reused for further ResNets and do not
# need additional transformations are just directly used when
# necessary.
path_names = [['input_p1', 'resnet1_p1'], # Input to ResNet2
['input_p2', 'resnet1_p2', 'resnet2_p1'], # Input to
# ResNet3
['input_p3', 'resnet1_p3', 'resnet2_p2', 'resnet3_p1'],
# Input to ResNet4
['input_p4', 'resnet1_p4', 'resnet2_p3', 'resnet3_p2',
'resnet4_p1'], # Input to ResNet5
['resnet5_u1'], # Input to ResNet6
['resnet4_u1', 'resnet5_u2', 'resnet6_u1'],
# Input to ResNet7
['resnet3_u1', 'resnet4_u2', 'resnet5_u3', 'resnet6_u2',
'resnet7_u1'], # Input to ResNet8
['resnet2_u1', 'resnet3_u2', 'resnet4_u3', 'resnet5_u4',
'resnet6_u3', 'resnet7_u2', 'resnet8_u1']
# Input to ResNet9
]
n_filters_conv = 0
for index in range(len(resnet_config)):
if index == 0:
resnet_ch_in = n_filters_inout
else:
resnet_ch_in = n_filters_conv
# ------------ Build ResNet ------------ #
resnet = ResNet(resnet_ch_in, growth_rate[index], dropout[index],
resnet_config[index],
bias=bias, bn_momentum=bn_momentum)
setattr(self, 'resnet' + str(index + 1), resnet)
# ------------ Transform all inputs ------------ #
if index < len(resnet_config) - 1:
# Do not append transformations for the last ResNet.
for k, p_name in enumerate(path_names[index]):
if 'input' in p_name:
num_ch = n_filters_inout
drop_index = 0
else:
num_ch = growth_rate[int(p_name[6]) - 1]
drop_index = int(p_name[6]) - 1
if transformation[index] == nn.MaxPool2d or \
transformation[index] == nn.AvgPool2d:
t_block = transformation[index](kernel_size=(2, 2),
stride=(
stride, stride),
ceil_mode=True)
elif transformation[index] == 'sconv':
t_block = BnReluConv(num_ch, num_ch, 3,
dropout[drop_index], bias=bias,
stride=(2, 2),
bn_momentum=bn_momentum)
elif transformation[index] == 'upsample':
t_block = Upsample_conv(num_ch, num_ch, filter_size=3,
bias=bias)
elif transformation[index] == 'dilation':
t_block = preprocess_block(num_ch, dropout[index],
dilation_list[index], bias,
ini=ini)
elif transformation[index] == 'dilation_mg':
t_block_pre = BnReluConv(num_ch, num_ch, 3,
dropout[drop_index],
bias=bias,
dilation=dilation_list[
index],
bn_momentum=bn_momentum,
ini=ini)
t_block = BnReluConv(num_ch, num_ch, 3,
dropout[drop_index], bias=bias,
dilation=2 * dilation_list[
index],
bn_momentum=bn_momentum,
ini=ini)
t_block_post = BnReluConv(num_ch, num_ch, 3,
dropout[drop_index],
bias=bias,
dilation=dilation_list[
index],
bn_momentum=bn_momentum,
ini=ini)
else:
raise ValueError('Transformation {} for ResNet' + str(
index + 1) + ' not understood'.format(
transformation[index]))
if transformation[index] == 'dilation_mg':
setattr(self, p_name + '_pre', t_block_pre)
setattr(self, p_name, t_block)
setattr(self, p_name + '_post', t_block_post)
else:
setattr(self, p_name, t_block)
# ------------ Mixing convolution 1x1 ------------ #
n_filters_conv = n_filters_inout + sum(growth_rate[:index + 1])
if mixing_conv:
mixing = BnReluConv(n_filters_conv, n_filters_conv,
filter_size=1, dropout=dropout[index],
bias=bias, dilation=1,
bn_momentum=bn_momentum)
setattr(self, 'mixing_r' + str(index + 1), mixing)
self.transformation = transformation
self.mixing = mixing_conv
def forward(self, x):
res1 = self.resnet1(x)
in_p1 = self.input_p1(x)
res1_p1 = self.resnet1_p1(res1)
res2_in = torch.cat((in_p1, res1_p1), 1)
if self.mixing:
res2_in = self.mixing_r1(res2_in)
res2 = self.resnet2(res2_in)
in_p2 = self.input_p2(in_p1)
res1_p2 = self.resnet1_p2(res1_p1)
res2_p1 = self.resnet2_p1(res2)
res3_in = torch.cat((in_p2, res1_p2, res2_p1), 1)
if self.mixing:
res3_in = self.mixing_r2(res3_in)
res3 = self.resnet3(res3_in)
in_p3 = self.input_p3(in_p2)
res1_p3 = self.resnet1_p3(res1_p2)
res2_p2 = self.resnet2_p2(res2_p1)
res3_p1 = self.resnet3_p1(res3)
res4_in = torch.cat((in_p3, res1_p3, res2_p2, res3_p1), 1)
if self.mixing:
res4_in = self.mixing_r3(res4_in)
res4 = self.resnet4(res4_in)
in_p4 = self.input_p4(in_p3)
res1_p4 = self.resnet1_p4(res1_p3)
res2_p3 = self.resnet2_p3(res2_p2)
res3_p2 = self.resnet3_p2(res3_p1)
res4_p1 = self.resnet4_p1(res4)
res5_in = torch.cat((in_p4, res1_p4, res2_p3, res3_p2, res4_p1), 1)
if self.mixing:
res5_in = self.mixing_r4(res5_in)
res5 = self.resnet5(res5_in)
res5_u1 = self.resnet5_u1(res5, res4.size()[2:])
res6_in = torch.cat((in_p3, res1_p3, res2_p2, res3_p1, res4, res5_u1),
1)
if self.mixing:
res6_in = self.mixing_r5(res6_in)
res6 = self.resnet6(res6_in)
res4_u1 = self.resnet4_u1(res4, res3.size()[2:])
res5_u2 = self.resnet5_u2(res5_u1, res3.size()[2:])
res6_u1 = self.resnet6_u1(res6, res3.size()[2:])
res7_in = torch.cat(
(in_p2, res1_p2, res2_p1, res3, res4_u1, res5_u2, res6_u1), 1)
if self.mixing:
res7_in = self.mixing_r6(res7_in)
res7 = self.resnet7(res7_in)
res3_u1 = self.resnet3_u1(res3, res2.size()[2:])
res4_u2 = self.resnet4_u2(res4_u1, res2.size()[2:])
res5_u3 = self.resnet5_u3(res5_u2, res2.size()[2:])
res6_u2 = self.resnet6_u2(res6_u1, res2.size()[2:])
res7_u1 = self.resnet7_u1(res7, res2.size()[2:])
res8_in = torch.cat((in_p1, res1_p1, res2, res3_u1, res4_u2, res5_u3,
res6_u2, res7_u1), 1)
if self.mixing:
res8_in = self.mixing_r7(res8_in)
res8 = self.resnet8(res8_in)
res2_u1 = self.resnet2_u1(res2, res1.size()[2:])
res3_u2 = self.resnet3_u2(res3_u1, res1.size()[2:])
res4_u3 = self.resnet4_u3(res4_u2, res1.size()[2:])
res5_u4 = self.resnet5_u4(res5_u3, res1.size()[2:])
res6_u3 = self.resnet6_u3(res6_u2, res1.size()[2:])
res7_u2 = self.resnet7_u2(res7_u1, res1.size()[2:])
res8_u1 = self.resnet8_u1(res8, res1.size()[2:])
res9_in = torch.cat((x, res1, res2_u1, res3_u2, res4_u3, res5_u4,
res6_u3, res7_u2, res8_u1), 1)
if self.mixing:
res9_in = self.mixing_r8(res9_in)
res9 = self.resnet9(res9_in)
out = torch.cat((x, res1, res2_u1, res3_u2, res4_u3, res5_u4, res6_u3,
res7_u2, res8_u1, res9), 1)
if self.mixing:
out = self.mixing_r9(out)
return out
class DenseResNet(nn.Module):
def __init__(self, input_channels, n_init_conv, subsample_inout,
n_filters_inout, n_classes, dilation_list,
resnet_config, growth_rate,
filter_size_inout, stride=2, dropout=None, bias=True,
mixing_conv=False,
transformation=None,
preprocess_block=PreprocessBlockStandard, logsoftmax=True,
bn_momentum=0.1,
ini='random'):
super(DenseResNet, self).__init__()
"""
Creates FC-DRN arquitecture. It is composed of:
- an IDB (Initial downsampling block), called ini_conv.
- a Dense Block containing all densely connected ResNets.
- an FUB (final upsampling bock), called final_upsample
- Final classifier
Input:
- input_channels: Channels of input images. 3 if RGB
images, 1 if grayscale.
- n_init_conv: Number of Conv + Relu blocks in IDB.
- subsample_inout: Downsample factor used in IDB. Same
factor to upsample final feature maps in FUB.
- filter_size_inout: Filter size for IDB and FUB.
- n_filters_out: Number of channels after IDB and after
FUB.
- n_classes: Number of classes in the dataset to set
output_channels in classifier.
- dilation_list: A list with N-1 dilation factors, where
N is the number of ResNets we use in the model.
Example: [1,1,2,4,1,1,1] for | |
<reponame>jorgeMorfinezM/cargamos_api_test
# -*- coding: utf-8 -*-
"""
Requires Python 3.8 or later
"""
__author__ = "<NAME> (<EMAIL>)"
__copyright__ = "Copyright 2021, <NAME>"
__license__ = ""
__history__ = """ """
__version__ = "1.1.A19.1 ($Rev: 1 $)"
import json
import re
import threading
import time
import uuid
from flask import Flask, jsonify, render_template, json, request
from flask_jwt_extended import JWTManager
from auth_controller.api_authentication import *
from utilities.Utility import Utility as Util
from logger_controller.logger_control import *
from db_controller.database_backend import *
from model.StoreModel import StoreModel
from model.ProductModel import ProductModel
logger = configure_ws_logger()
app = Flask(__name__, static_url_path='/static')
app.config['JWT_SECRET_KEY'] = '<KEY>'
app.config['JWT_BLACKLIST_ENABLED'] = False
app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = ['access', 'refresh']
app.config['JWT_ERROR_MESSAGE_KEY'] = 'message'
app.config['JWT_ACCESS_TOKEN_EXPIRES'] = 3600
app.config['PROPAGATE_EXCEPTIONS'] = True
jwt = JWTManager(app)
# Se inicializa la App con un hilo para evitar problemas de ejecución
# (Falta validacion para cuando ya exista hilo corriendo)
@app.before_first_request
def activate_job():
def run_job():
while True:
time.sleep(2)
thread = threading.Thread(target=run_job)
thread.start()
# Contiene la llamada al HTML que soporta la documentacion de la API,
# sus metodos, y endpoints con los modelos de datos I/O
@app.route('/')
def main():
return render_template('api_manage_ecommerce.html')
def get_stock_all_stores_by_product(product_sku):
stock_list = []
stock_in_stores = select_all_stock_in_product(product_sku)
stock_list = json.loads(stock_in_stores)
if stock_list:
logger.info('List Stock in all Stores by SKU: {}: {}: '.format(product_sku, stock_list))
return stock_list
@app.route('/api/ecommerce/stock/total/', methods=['GET', 'OPTIONS'])
@jwt_required
def endpoint_list_stock_all_stores():
headers = request.headers
auth = headers.get('Authorization')
if not auth and 'Bearer' not in auth:
return request_unauthorized()
else:
if request.method == 'OPTIONS':
headers = {
'Access-Control-Allow-Methods': 'POST, GET, OPTIONS',
'Access-Control-Max-Age': 1000,
'Access-Control-Allow-Headers': 'origin, x-csrftoken, content-type, accept',
}
return '', 200, headers
elif request.method == 'GET':
data = request.get_json(force=True)
product_sku = data['product_sku']
json_data = get_stock_all_stores_by_product(product_sku)
if not product_sku:
return request_conflict()
return json.dumps(json_data)
else:
return not_found()
def get_stock_by_store_by_product(product_sku, store_code):
stock_list = []
stock_in_store = select_stock_in_product(store_code, product_sku)
stock_list = json.loads(stock_in_store)
if stock_list:
logger.info('List Stock in one Store: {} by SKU: {}: {}: '.format(store_code, product_sku, stock_list))
return stock_list
@app.route('/api/ecommerce/stock/detail/', methods=['GET', 'OPTIONS'])
@jwt_required
def endpoint_detailed_stock_by_sku():
headers = request.headers
auth = headers.get('Authorization')
if not auth and 'Bearer' not in auth:
return request_unauthorized()
else:
if request.method == 'OPTIONS':
headers = {
'Access-Control-Allow-Methods': 'POST, GET, OPTIONS',
'Access-Control-Max-Age': 1000,
'Access-Control-Allow-Headers': 'origin, x-csrftoken, content-type, accept',
}
return '', 200, headers
elif request.method == 'GET':
data = request.get_json(force=True)
product_sku = data['product_sku']
store_code = data['store_code']
json_data = get_stock_by_store_by_product(product_sku, store_code)
if not product_sku:
return request_conflict()
return json.dumps(json_data)
else:
return not_found()
def add_stock_by_store_by_product(stock, product_sku, store_code):
stock_add = []
stock_in_product = update_product_store_stock(stock, product_sku, store_code)
stock_add = json.loads(stock_in_product)
if stock_add:
logger.info('Add Stock: {} in one Product: {} by Store: {}: {}: '.format(stock,
product_sku,
store_code,
stock_add))
return stock_add
@app.route('/api/ecommerce/stock/add/', methods=['POST', 'OPTIONS'])
@jwt_required
def endpoint_update_stock():
headers = request.headers
auth = headers.get('Authorization')
if not auth and 'Bearer' not in auth:
return request_unauthorized()
else:
if request.method == 'OPTIONS':
headers = {
'Access-Control-Allow-Methods': 'POST, GET, OPTIONS',
'Access-Control-Max-Age': 1000,
'Access-Control-Allow-Headers': 'origin, x-csrftoken, content-type, accept',
}
return '', 200, headers
elif request.method == 'GET':
data = request.get_json(force=True)
stock = data['stock']
product_sku = data['product_sku']
store_code = data['store_code']
json_data = add_stock_by_store_by_product(stock, product_sku, store_code)
if not product_sku and not store_code and not stock:
return request_conflict()
return json.dumps(json_data)
else:
return not_found()
def manage_store_requested_data(store_data):
store_data_manage = []
store_model_db = StoreModelDb()
try:
store_code = store_data.get("store_code")
store_name = store_data.get("store_name")
store_street_address = store_data("street_address")
store_external_number = store_data("external_number_address")
store_suburb_address = store_data.get("suburb_address")
store_city_address = store_data.get("city_address")
store_country_address = store_data.get("country_address")
store_zippostal_code = store_data.get("zip_postal_code_address")
store_min_inventory = store_data.get("minimum_inventory")
store_obj = StoreModel(store_code, store_name, store_external_number, store_street_address, store_suburb_address,
store_city_address, store_country_address, store_zippostal_code, store_min_inventory)
store_data = store_model_db.manage_store_data(store_obj)
store_data_manage = json.loads(store_data)
if len(store_data_manage) != 0:
logger.info('Response Store Data: %s', str(store_data_manage))
return store_data_manage
except SQLAlchemyError as error:
raise mvc_exc.ConnectionError(
'Can\'t connect to database, verify data connection to "{}".\nOriginal Exception raised: {}'.format(
store_model_db.__tablename__, error
)
)
def get_stores_by_code(store_code):
store_list_data = {}
store_get_list_data = select_by_store_code(store_code)
store_list_data = json.loads(store_get_list_data)
if store_list_data:
logger.info('List Stores data by code: {}: {}: '.format(store_code, store_list_data))
return store_list_data
def update_store_data_endpoint(store_dict_input):
store_updated = dict()
store_updated = update_store_data(store_dict_input)
return store_updated
@app.route('/api/ecommerce/manage/store/', methods=['POST', 'GET', 'PUT', 'DELETE', 'OPTIONS'])
@jwt_required
def endpoint_processing_store_data():
headers = request.headers
auth = headers.get('Authorization')
if not auth and 'Bearer' not in auth:
return request_unauthorized()
else:
if request.method == 'OPTIONS':
headers = {
'Access-Control-Allow-Methods': 'POST, GET, PUT, DELETE, OPTIONS',
'Access-Control-Max-Age': 1000,
'Access-Control-Allow-Headers': 'origin, x-csrftoken, content-type, accept',
}
return '', 200, headers
elif request.method == 'POST':
data = request.get_json(force=True)
if not data or str(data) is None:
return request_conflict()
logger.info('Data Json Store to Manage on DB: %s', str(data))
json_store_response = manage_store_requested_data(data)
return json.dumps(json_store_response)
elif request.method == 'GET':
data = request.get_json(force=True)
store_code = data['store_code']
json_data = []
json_data = get_stores_by_code(store_code)
logger.info('Stores List data by Code: %s', str(json_data))
if not store_code:
return request_conflict()
return json.dumps(json_data)
elif request.method == 'PUT':
data_store = request.get_json(force=True)
store_code = data_store.get("store_code")
store_name = data_store.get("store_name")
if not data_store:
return request_conflict()
json_data = dict()
json_data = update_store_data_endpoint(data_store)
logger.info('Data to update Store: %s',
"Store code: {0}, Store name: {1}".format(store_code, store_name))
logger.info('Store updated Info: %s', str(json_data))
return json_data
elif request.method == 'DELETE':
data = request.get_json(force=True)
store_code = data['store_code']
logger.info('Store to Delete: %s', 'Store Code: {}'.format(store_code))
json_data = []
if not store_code and not Util.validate_store_code_syntax(store_code):
return request_conflict()
json_data = delete_store_data(store_code)
logger.info('Store deleted: %s', json_data)
return json.dumps(json_data)
else:
return not_found()
def manage_product_requested_data(product_data):
product_data_manage = []
product_model_db = ProductModelDb()
try:
product_sku = product_data.get("product_sku")
product_unspc = product_data.get("product_unspc")
product_brand = product_data.get("product_brand")
category_id = product_data.get("category_id")
parent_category_id = product_data.get("parent_category_id")
product_uom = product_data.get("unit_of_measure")
product_stock = product_data.get("product_stock")
store_code = product_data.get("product_store_code")
product_name = product_data.get("product_name")
product_title = product_data.get("product_title")
product_long_description = product_data.get("product_long_description")
product_photo = product_data.get("product_photo")
product_price = product_data.get("product_price")
product_tax = product_data.get("product_tax")
product_currency = product_data.get("product_currency")
product_status = product_data.get("product_status")
product_published = product_data.get("product_published")
manage_stock = product_data.get("product_manage_stock")
product_length = product_data.get("product_length")
product_width = product_data.get("product_width")
product_height = product_data.get("product_height")
product_weight = product_data.get("product_weight")
product_obj = ProductModel(product_sku, product_unspc, product_brand, category_id, parent_category_id,
product_uom, product_stock, store_code, product_name, product_title,
product_long_description, product_photo, product_price, product_tax, product_currency,
product_status, product_published, manage_stock, product_length, product_width,
product_height, product_weight)
data_product = product_model_db.manage_product_data(product_obj)
product_data_manage = json.loads(data_product)
if len(product_data_manage) != 0:
logger.info('Response Product Data: %s', str(product_data_manage))
return product_data_manage
except SQLAlchemyError as error:
raise mvc_exc.ConnectionError(
'Can\'t connect to database, verify data connection to "{}".\nOriginal Exception raised: {}'.format(
product_model_db.__tablename__, error
)
)
def get_products_by_sku(product_sku):
product_list_data = {}
product_get_list_data = select_by_product_sku(product_sku)
product_list_data = json.loads(product_get_list_data)
if product_list_data:
logger.info('List Product data by SKU: {}: {}: '.format(product_sku, product_list_data))
return product_list_data
@app.route('/api/ecommerce/manage/product/', methods=['POST', 'GET', 'PUT', 'DELETE', 'OPTIONS'])
@jwt_required
def endpoint_processing_product_data():
headers = request.headers
auth = headers.get('Authorization')
if not auth and 'Bearer' not in auth:
return request_unauthorized()
else:
if request.method == 'OPTIONS':
headers = {
'Access-Control-Allow-Methods': 'POST, GET, PUT, DELETE, OPTIONS',
'Access-Control-Max-Age': 1000,
'Access-Control-Allow-Headers': 'origin, x-csrftoken, content-type, accept',
}
return '', 200, headers
elif request.method == 'POST':
data = request.get_json(force=True)
if not data or str(data) is None:
return request_conflict()
logger.info('Data Json Store to Manage on DB: %s', str(data))
json_store_response = manage_product_requested_data(data)
return json.dumps(json_store_response)
elif request.method == 'GET':
data = request.get_json(force=True)
product_sku = data['product_sku']
json_data = []
json_data = get_products_by_sku(product_sku)
logger.info('Product List data by SKU: %s', str(json_data))
if not product_sku:
return request_conflict()
return json.dumps(json_data)
elif request.method == 'PUT':
data_store = request.get_json(force=True)
product_sku = data_store.get('product_sku')
product_stock = data_store.get('product_stock')
product_store_code = data_store.get('product_store_code')
product_name = data_store.get('product_name')
if not data_store:
return request_conflict()
json_data = dict()
json_data = update_product_data(data_store)
logger.info('Data to update Product: %s',
"Product SKU: {0}, "
"Product Name: {1}, "
"Product Store Code: {2}, "
"Product Stock: {3}".format(product_sku, product_name, product_store_code, product_stock))
logger.info('Product updated Info: %s', str(json_data))
return json_data
elif request.method == 'DELETE':
data = request.get_json(force=True)
store_code = data['store_code']
product_sku = data['product_sku']
logger.info('Store to Delete: %s', 'Store Code: {}'.format(store_code))
json_data = []
if not store_code and not Util.validate_store_code_syntax(store_code):
return request_conflict()
json_data = delete_product_data(product_sku, store_code)
logger.info('Product deleted: %s', json_data)
return json.dumps(json_data)
else:
return not_found()
@app.route('/api/ecommerce/authorization/', methods=['POST', 'OPTIONS'])
def get_authentication():
json_token = {}
if request.method == 'OPTIONS':
headers = {
'Access-Control-Allow-Methods': 'POST, GET, OPTIONS',
'Access-Control-Max-Age': 1000,
'Access-Control-Allow-Headers': 'origin, x-csrftoken, content-type, accept',
}
return '', 200, headers
elif request.method == 'POST':
data = request.get_json(force=True)
user_name = data['username']
password = data['password']
rfc = data['rfc_client']
regex_email = r"^[(a-z0-9\_\-\.)]+@[(a-z0-9\_\-\.)]+\.[(a-z)]{2,15}$"
regex_passwd = r"^[(A-Za-z0-9\_\-\.\$\#\&\*)(A-Za-z0-9\_\-\.\$\#\&\*)]+"
regex_rfc = r"^([A-ZÑ&]{3,4})?(?:-?)?(\d{2}(?:0[1-9]|1[0-2])(?:0[1-9]|[12]\d|3[01]))?(?:-?)?([A-Z\d]{2})([A\d])$"
match_email = re.match(regex_email, user_name, re.M | re.I)
match_passwd = re.match(regex_passwd, password, re.M | re.I)
match_rfc = re.match(regex_rfc, rfc, re.M | re.I)
if match_email and match_rfc and match_passwd:
password = <PASSWORD>
json_token = user_registration(user_name, password)
json_token = json.dumps(json_token)
return json_token
else:
return request_conflict()
else:
return not_found()
@app.errorhandler(404)
def not_found(error=None):
message = {
'error_code': 404,
'error_message': 'Page Not Found: ' + request.url,
}
resp = jsonify(message)
resp.status_code = 404
return resp
@app.errorhandler(500)
def server_error(error=None):
message = {
'error_code': 500,
'error_message': 'Server Error: ' + request.url,
}
resp = jsonify(message)
resp.status_code = 500
return resp
@app.errorhandler(401)
def request_unauthorized(error=None):
message = {
'error_code': 401,
'error_message': 'Request Unauthorized: ' + request.url,
}
resp = jsonify(message)
resp.status_code = | |
enumerate with --experimental and --since 2.42.0'
'--summary',
['enumerate', '--experimental', '--since', '2.42.0', '--summary'],
{'stdout': ['3 CIMClass(s) returned'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --experimental and --since 2.45.0.',
['enumerate', '--experimental', '--since', '2.45.0', '--names-only'],
{'stdout': [],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --experimental and --since 2.45.x.',
['enumerate', '--experimental', '--since', '2.45.x', '--names-only'],
{'stderr': ['--since option value invalid. ',
'Must contain 3 integer elements',
'2.45.x'],
'rc': 1,
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --indication and --since 2.45.',
['enumerate', '--experimental', '--since', '2.45', '--names-only'],
{'stderr': ['Version value must contain 3 integer elements (int.int.int)',
'2.45'],
'rc': 1,
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --schema "TST".',
['enumerate', '--schema', 'TST', '--names-only'],
{'stdout': ['TST_FamilyCollection', 'TST_Indication',
'TST_IndicationDeprecated', 'TST_IndicationExperimental',
'TST_Lineage', 'TST_MemberOfFamilyCollection',
'TST_MemberOfFamilyCollectionDep',
'TST_MemberOfFamilyCollectionExp', 'TST_Person', ],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --schema "BLA".',
['enumerate', '--schema', 'BLA', '--names-only'],
{'stdout': ['BLA_Person', ],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --schema "EXP".',
['enumerate', '--schema', 'EXP', '--names-only'],
{'stdout': ['EXP_TestExperimental1', 'EXP_TestExperimental2',
'EXP_TestExperimental3', 'EXP_TestExperimental4'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --schema "EXP" --summary',
['enumerate', '--schema', 'EXP', '--summary'],
{'stdout': ['4 CIMClass(s) returned'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --schema "EXP" and --experimental.',
['enumerate', '--schema', 'EXP', '--experimental', '--names-only'],
{'stdout': ['EXP_TestExperimental1', 'EXP_TestExperimental2',
'EXP_TestExperimental3', 'EXP_TestExperimental4'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --schema "EXP" and --experimental.',
['enumerate', '--schema', 'EXP', '--experimental', '--summary'],
{'stdout': ['4 CIMClass(s) returned'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --schema "EXP",--experimental, '
'--summary.',
['enumerate', '--schema', 'EXP', '--experimental', '--summary'],
{'stdout': ['4 CIMClass(s) returned'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --schema "EXP" , --no-experimental.',
['enumerate', '--schema', 'EXP', '--no-experimental', '--names-only'],
{'stdout': [],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --schema "EXP" , --no-experimental '
'--summary',
['enumerate', '--schema', 'EXP', '--no-experimental', '--summary'],
{'stdout': ['0 objects returned'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --schema "NOT_EXIST".',
['enumerate', '--schema', 'NOT_EXIST', '--names-only'],
{'stdout': [],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --subclass-of TST_Person.',
['enumerate', '--subclass-of', 'TST_Person', '--di', '--names-only'],
{'stdout': ['TST_PersonClsDep', 'TST_PersonDep',
'TST_PersonExp', 'TST_PersonExpProperty',
'TST_PersonPropDep', 'TST_PersonSub'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --subclass-of TST_Person --summary.',
['enumerate', '--subclass-of', 'TST_Person', '--di', '--summary'],
{'stdout': ['6 CIMClass(s) returned'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --subclass-of TST_Person '
'-- association--summary .',
['enumerate', '--association', '--subclass-of', 'TST_Person', '--di',
'--summary'],
{'stdout': ['0 objects returned'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --subclass-of TST_PersonDep.',
['enumerate', '--subclass-of', 'TST_PersonDep', '--di', '--names-only'],
{'stdout': [],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --subclass-of TST_PersonDep '
'--summary.',
['enumerate', '--subclass-of', 'TST_PersonDep', '--di', '--summary'],
{'stdout': ['0 objects returned'],
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify class command enumerate with --subclass-of NOT_EXIST excepts.',
['enumerate', '--subclass-of', 'NOT_EXIST', '--names-only'],
{'stderr': ['Classname NOT_EXIST for "subclass-of" not found'],
'rc': 1,
'test': 'innows'},
QUALIFIER_FILTER_MODEL, OK],
['Verify instance command enumerate CIM_Foo_sub2, w --verbose rtns msg.',
{'args': ['enumerate', 'CIM_Foo_sub2'],
'general': ['--verbose']},
{'stdout': 'No objects returned',
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
#
# Enumerate errors
#
['Verify class command enumerate nonexistent class name',
['enumerate', 'CIM_FClassDoesNotExist'],
{'stderr': ['CIMError', 'CIM_ERR_INVALID_CLASS'],
'rc': 1,
'test': 'regex'},
SIMPLE_MOCK_FILE, OK],
['Verify class command enumerate table output fails).',
{'args': ['enumerate'],
'general': ['--output-format', 'table']},
{'stderr': ['Output format "table"', 'not allowed', 'Only CIM formats:'],
'rc': 1,
'test': 'innows'},
SIMPLE_MOCK_FILE, OK],
#
# Test class get
#
['Verify class command get --help response',
['get', '--help'],
{'stdout': CLASS_GET_HELP_LINES,
'test': 'innows'},
None, OK],
['Verify class command get -h response',
['get', '-h'],
{'stdout': CLASS_GET_HELP_LINES,
'test': 'innows'},
None, OK],
# command get local-only option
['Verify class command get not local-only. Tests for property names',
['get', 'CIM_Foo_sub2'],
{'stdout': ['string cimfoo_sub2;', 'InstanceID', 'IntegerProp', 'Fuzzy',
'Key ( true )', 'IN ( false )'],
'test': 'in'},
SIMPLE_MOCK_FILE, OK],
['Verify class command get local-only(--lo)).',
['get', 'CIM_Foo_sub2', '--lo'],
{'stdout': ['class CIM_Foo_sub2 : CIM_Foo {',
'',
' string cimfoo_sub2;',
'',
'};', ''],
'test': 'patterns'},
SIMPLE_MOCK_FILE, OK],
['Verify class command get local-only. Tests whole response',
['get', 'CIM_Foo_sub2', '--local-only'],
{'stdout': ['class CIM_Foo_sub2 : CIM_Foo {',
'',
' string cimfoo_sub2;',
'',
'};', ''],
'test': 'patterns'},
SIMPLE_MOCK_FILE, OK],
# includequalifiers. Test the flag that excludes qualifiers
['Verify class command get without qualifiers. Tests whole response',
['get', 'CIM_Foo_sub2', '--nq'],
{'stdout': ['class CIM_Foo_sub2 : CIM_Foo {',
'',
' string cimfoo_sub2;',
'',
' string InstanceID;',
'',
' uint32 IntegerProp;',
'',
' string cimfoo_emb3;',
'',
' uint32 Fuzzy(',
' string TestInOutParameter,',
' CIM_FooRef1 REF TestRef,',
' string OutputParam,',
' uint32 OutputRtnValue);',
'',
' uint32 FuzzyStatic(',
' string TestInOutParameter,',
' CIM_Foo REF TestRef,',
' string OutputParam,',
' uint32 OutputRtnValue,',
' string cimfoo_emb1);',
'',
' string DeleteNothing();',
'',
'};',
''],
'test': 'lines'},
SIMPLE_MOCK_FILE, OK],
['Verify class command get without qualifiers. Tests whole response',
['get', 'CIM_Foo_sub2', '--no-qualifiers'],
{'stdout': ['class CIM_Foo_sub2 : CIM_Foo {',
'',
' string cimfoo_sub2;',
'',
' string InstanceID;',
'',
' uint32 IntegerProp;',
'',
' string cimfoo_emb3;',
'',
' uint32 Fuzzy(',
' string TestInOutParameter,',
' CIM_FooRef1 REF TestRef,',
' string OutputParam,',
' uint32 OutputRtnValue);',
'',
' uint32 FuzzyStatic(',
' string TestInOutParameter,',
' CIM_Foo REF TestRef,',
' string OutputParam,',
' uint32 OutputRtnValue,',
' string cimfoo_emb1);',
'',
' string DeleteNothing();',
'',
'};',
''],
'test': 'lines'},
SIMPLE_MOCK_FILE, OK],
# pylint: disable=line-too-long
['Verify class command get with propertylist. Tests whole response',
['get', 'CIM_Foo_sub2', '--pl', 'InstanceID'],
{'stdout': ['class CIM_Foo_sub2 : CIM_Foo {',
'',
' [Key ( true ),',
' Description ( "This is key property." )]',
' string InstanceID;',
'',
' [Description ( "Method with in and out parameters" )]',
' uint32 Fuzzy(',
' [IN ( true ),',
' OUT ( true ),',
' Description ( "Define data to be returned in output parameter" )]', # noqa: E501
' string TestInOutParameter,',
' [IN ( true ),',
' OUT ( true ),',
' Description ( "Test of ref in/out parameter" )]',
' CIM_FooRef1 REF TestRef,',
' [IN ( false ),',
' OUT ( true ),',
' Description ( "Rtns method name if exists on input" )]', # noqa: E501
' string OutputParam,',
' [IN ( true ),',
' Description ( "Defines return value if provided." )]', # noqa: E501
' uint32 OutputRtnValue);',
'',
' [Description ( "Static method with in and out parameters" ),', # noqa: E501
' Static ( true )]',
' uint32 FuzzyStatic(',
' [IN ( true ),',
' OUT ( true ),',
' Description ( "Define data to be returned in output parameter" )]', # noqa: E501
' string TestInOutParameter,',
' [IN ( true ),',
' OUT ( true ),',
' Description ( "Test of ref in/out parameter" )]',
' CIM_Foo REF TestRef,',
' [IN ( false ),',
' OUT ( true ),',
' Description ( "Rtns method name if exists on input" )]', # noqa: E501
' string OutputParam,',
' [IN ( true ),',
' Description ( "Defines return value if provided." )]', # noqa: E501
' uint32 OutputRtnValue,',
' [IN ( true ),',
' Description ( "Embedded instance parameter" ),',
' EmbeddedInstance ( "CIM_FooEmb1" )]',
' string cimfoo_emb1);',
'',
' [Description ( "Method with no parameters but embedded instance return" ),', # noqa: E501
' EmbeddedInstance ( "CIM_FooEmb2" )]',
' string DeleteNothing();',
'',
'};',
''],
'test': 'lines'},
SIMPLE_MOCK_FILE, OK],
['Verify class command get with empty propertylist. Tests whole '
'response',
['get', 'CIM_Foo_sub2', '--pl', '""'],
{'stdout': ['class CIM_Foo_sub2 : CIM_Foo {',
'',
' [Description ( "Method with in and out parameters" )]',
' uint32 Fuzzy(',
' [IN ( true ),',
' OUT ( true ),',
' Description ( "Define data to be returned in output parameter" )]', # noqa: E501
' string TestInOutParameter,',
' [IN ( true ),',
' OUT ( true ),',
' Description ( "Test of ref in/out parameter" )]',
' CIM_FooRef1 REF TestRef,',
' [IN ( false ),',
' OUT ( true ),',
' Description ( "Rtns method name if exists on input" )]', # noqa: E501
' string OutputParam,',
' [IN ( true ),',
' Description ( "Defines return value if provided." )]', # noqa: E501
' uint32 OutputRtnValue);',
'',
' [Description ( "Static method with in and out | |
"""
The client's core Flexx engine, implemented in PScript.
"""
from pscript import this_is_js, RawJS
from pscript.stubs import window, undefined, time, console, JSON
# This module gets transpiled to JavaScript as a whole
__pscript__ = True
class Flexx:
""" JavaScript Flexx module. This provides the connection between
the Python and JS (via a websocket).
"""
def __init__(self):
if window.flexx.init:
raise RuntimeError('Should not create global Flexx object more than once.')
# Init (overloadable) variables. These can be set by creating
# a window.flexx object *before* instantiating this class, or by
# setting them on this object before the init() is called.
self.is_notebook = False
self.is_exported = False
# Copy attributes from temporary object (e.g. is_notebook, require, ...)
for key in window.flexx.keys():
self[key] = window.flexx[key]
# We need a global main widget (shared between sessions)
self.need_main_widget = True # Used/set in ui/_widget.py
# Keep track of sessions
self._session_count = 0
self.sessions = {}
# Note: flexx.init() is not auto-called when Flexx is embedded
window.addEventListener('load', self.init, False)
window.addEventListener('unload', self.exit, False) # not beforeunload
def init(self):
""" Called after document is loaded. """
# Create div to put dynamic CSS assets in
self.asset_node = window.document.createElement("div")
self.asset_node.id = 'Flexx asset container'
window.document.body.appendChild(self.asset_node)
if self.is_exported:
if self.is_notebook:
print('Flexx: I am in an exported notebook!')
else:
print('Flexx: I am in an exported app!')
self.run_exported_app()
else:
print('Flexx: Initializing')
if not self.is_notebook:
self._remove_querystring()
self.init_logging()
def _remove_querystring(self):
# remove querystring ?session=x
try:
window.history.replaceState(window.history.state, '',
window.location.pathname)
except Exception:
pass # e.g. firefox-app/nw
def exit(self):
""" Called when runtime is about to quit. """
for session in self.sessions.values():
session.exit()
def spin(self, n=1):
RawJS("""
var el = window.document.getElementById('flexx-spinner');
if (el) {
if (n === null) { // Hide the spinner overlay, now or in a bit
if (el.children[0].innerHTML.indexOf('limited') > 0) {
setTimeout(function() { el.style.display = 'none'; }, 2000);
} else {
el.style.display = 'none';
}
} else {
for (var i=0; i<n; i++) { el.children[1].innerHTML += '■'; }
}
}
""")
def init_logging(self):
""" Setup logging so that messages are proxied to Python.
"""
if window.console.ori_log:
return # already initialized the loggers
# Keep originals
window.console.ori_log = window.console.log
window.console.ori_info = window.console.info or window.console.log
window.console.ori_warn = window.console.warn or window.console.log
window.console.ori_error = window.console.error or window.console.log
def log(msg):
window.console.ori_log(msg)
for session in self.sessions.values():
session.send_command("PRINT", str(msg))
def info(msg):
window.console.ori_info(msg)
for session in self.sessions.values():
session.send_command("INFO", str(msg))
def warn(msg):
window.console.ori_warn(msg)
for session in self.sessions.values():
session.send_command("WARN", str(msg))
def error(msg):
evt = dict(message=str(msg), error=msg, preventDefault=lambda: None)
on_error(evt)
def on_error(evt):
self._handle_error(evt)
on_error = on_error.bind(self)
# Set new versions
window.console.log = log
window.console.info = info
window.console.warn = warn
window.console.error = error
# Create error handler, so that JS errors get into Python
window.addEventListener('error', on_error, False)
def create_session(self, app_name, session_id, ws_url):
# The call to this method is embedded by get_page(),
# or injected by init_notebook().
# Can be called before init() is called.
if window.performance and window.performance.navigation.type == 2:
# Force reload when we got here with back-button, otherwise
# an old session-id is used, see issue #530
window.location.reload()
elif self._validate_browser_capabilities():
s = JsSession(app_name, session_id, ws_url)
self._session_count += 1
self['s' + self._session_count] = s
self.sessions[session_id] = s
def _validate_browser_capabilities(self):
# We test a handful of features here, and assume that if these work,
# all of Flexx works. It is not a hard guarantee, of course, because
# the user can use modern features in an application.
RawJS("""
var el = window.document.getElementById('flexx-spinner');
if ( window.WebSocket === undefined || // IE10+
Object.keys === undefined || // IE9+
false
) {
var msg = ('Flexx does not support this browser.<br>' +
'Try Firefox, Chrome, ' +
'or a more recent version of the current browser.');
if (el) { el.children[0].innerHTML = msg; }
else { window.alert(msg); }
return false;
} else if (''.startsWith === undefined) { // probably IE
var msg = ('Flexx support for this browser is limited.<br>' +
'Consider using Firefox, Chrome, or maybe Edge.');
if (el) { el.children[0].innerHTML = msg; }
return true;
} else {
return true;
}
""")
def _handle_error(self, evt):
msg = short_msg = evt.message
if not window.evt:
window.evt = evt
if evt.error and evt.error.stack: # evt.error can be None for syntax err
stack = evt.error.stack.splitlines()
# Some replacements
session_needle = '?session_id=' + self.id
for i in range(len(stack)):
stack[i] = stack[i].replace('@', ' @ ').replace(session_needle, '')
# Strip items from the start
for x in [evt.message, '_pyfunc_op_error']:
if x in stack[0]:
stack.pop(0)
# Truncate the stack
for i in range(len(stack)):
for x in ['_process_actions', '_process_reactions', '_process_calls']:
if ('Loop.' + x) in stack[i]:
stack = stack[:i]
break
# Pop items from in between
for i in reversed(range(len(stack))):
for x in ['flx_action ']:
if stack[i] and stack[i].count(x):
stack.pop(i)
# Combine and tweak the message some more
msg += '\n' + '\n'.join(stack)
elif evt.message and evt.lineno: # message, url, linenumber
msg += "\nIn %s:%i" % (evt.filename, evt.lineno)
# Handle error
evt.preventDefault() # Don't do the standard error
window.console.ori_error(msg)
for session in self.sessions.values():
session.send_command("ERROR", short_msg)
class JsSession:
def __init__(self, app_name, id, ws_url=None):
self.app = None # the root component (can be a PyComponent)
self.app_name = app_name
self.id = id
self.status = 1
self.ws_url = ws_url
self._component_counter = 0
self._disposed_ob = {'_disposed': True}
# Maybe this is JLab
if not self.id:
jconfig = window.document.getElementById('jupyter-config-data')
if jconfig:
try:
config = JSON.parse(jconfig.innerText)
self.id = config.flexx_session_id
self.app_name = config.flexx_app_name
except Exception as err:
print(err)
# Init internal variables
self._init_time = time()
self._pending_commands = [] # to pend raw commands during init
self._asset_count = 0
self._ws = None
self.last_msg = None
# self.classes = {}
self.instances = {}
self.instances_to_check_size = {}
if not window.flexx.is_exported:
self.init_socket()
# Initiate service to track resize
# Note that only toplevel widgets are tracked, and only once per sec
window.addEventListener('resize', self._check_size_of_objects, False)
window.setInterval(self._check_size_of_objects, 1000)
def exit(self):
if self._ws: # is not null or undefined
self._ws.close()
self._ws = None
self.status = 0
# flexx.instances.sessions.pop(self) might be good,
# but perhaps not that much need, and leaving is nice for debugging.
def send_command(self, *command):
if self._ws is not None:
try:
bb = serializer.encode(command)
except Exception as err:
print('Command that failed to encode:')
print(command)
raise err
self._ws.send(bb)
def instantiate_component(self, module, cname, id, args, kwargs, active_components):
# Maybe we still have the instance?
c = self.instances.get(id, None)
if c is not None and c._disposed is False:
return c
# Find the class
m = window.flexx.require(module)
Cls = m[cname] # noqa
# Instantiate. If given, replicate the active components by which the
# JsComponent was instantiated in Python.
kwargs['flx_session'] = self
kwargs['flx_id'] = id
active_components = active_components or []
for ac in active_components:
ac.__enter__()
try:
c = Cls(*args, **kwargs)
finally:
for ac in reversed(active_components):
ac.__exit__()
return c
def _register_component(self, c, id=None):
if self.app is None:
self.app = c # Set our root component; is the first to register
if id is None:
self._component_counter += 1
id = c.__name__ + '_' + str(self._component_counter) + 'js'
c._id = id
c._uid = self.id + '_' + id
self.instances[c._id] = c
def _unregister_component(self, c):
self.instances_to_check_size.pop(c.id, None)
pass # c gets popped from self.instances by DISPOSE_ACK command
def get_component_instance(self, id):
""" Get instance of a Component class, or None. Or the document body
if "body" is given.
"""
if id == 'body':
return window.document.body
else:
return self.instances.get(id, None)
def init_socket(self):
""" Make the connection to Python.
"""
# Check WebSocket support
WebSocket = window.WebSocket
if (WebSocket is undefined):
window.document.body.textContent = 'Browser does not support WebSockets'
raise RuntimeError("FAIL: need websocket")
# Construct ws url
if not self.ws_url:
proto = 'ws'
if window.location.protocol == 'https:':
proto = 'wss'
address = window.location.hostname
if window.location.port:
address += ':' + window.location.port
self.ws_url = '%s://%s/flexx/ws/%s' % (proto, address, self.app_name)
# Resolve public hostname
self.ws_url = self.ws_url.replace('0.0.0.0', window.location.hostname)
# Open web socket in binary mode
self._ws = ws = WebSocket(self.ws_url)
ws.binaryType = "arraybuffer"
self.status = 2
def on_ws_open(evt):
window.console.info('Socket opened with session id ' + self.id)
self.send_command('HI_FLEXX', self.id)
def on_ws_message(evt):
msg = evt.data # bsdf-encoded command
if not msg:
pass | |
HARKobject
from HARK.utilities import warnings # Because of "patch" to warnings modules
from HARK.interpolation import CubicInterp, LowerEnvelope, LinearInterp
from HARK.simulation import drawDiscrete, drawBernoulli, drawLognormal, drawUniform
from HARK.utilities import approxMeanOneLognormal, addDiscreteOutcomeConstantMean,\
combineIndepDstns, makeGridExpMult, CRRAutility, CRRAutilityP, \
CRRAutilityPP, CRRAutilityP_inv, CRRAutility_invP, CRRAutility_inv, \
CRRAutilityP_invP
utility = CRRAutility
utilityP = CRRAutilityP
utilityPP = CRRAutilityPP
utilityP_inv = CRRAutilityP_inv
utility_invP = CRRAutility_invP
utility_inv = CRRAutility_inv
utilityP_invP = CRRAutilityP_invP
# =====================================================================
# === Classes that help solve consumption-saving models ===
# =====================================================================
class ConsumerSolution(Solution):
'''
A class representing the solution of a single period of a consumption-saving
problem. The solution must include a consumption function and marginal
value function.
Here and elsewhere in the code, Nrm indicates that variables are normalized
by permanent income.
'''
distance_criteria = ['vPfunc']
def __init__(self, cFunc=None, vFunc=None,
vPfunc=None, vPPfunc=None,
mNrmMin=None, hNrm=None, MPCmin=None, MPCmax=None):
'''
The constructor for a new ConsumerSolution object.
Parameters
----------
cFunc : function
The consumption function for this period, defined over market
resources: c = cFunc(m).
vFunc : function
The beginning-of-period value function for this period, defined over
market resources: v = vFunc(m).
vPfunc : function
The beginning-of-period marginal value function for this period,
defined over market resources: vP = vPfunc(m).
vPPfunc : function
The beginning-of-period marginal marginal value function for this
period, defined over market resources: vPP = vPPfunc(m).
mNrmMin : float
The minimum allowable market resources for this period; the consump-
tion function (etc) are undefined for m < mNrmMin.
hNrm : float
Human wealth after receiving income this period: PDV of all future
income, ignoring mortality.
MPCmin : float
Infimum of the marginal propensity to consume this period.
MPC --> MPCmin as m --> infinity.
MPCmax : float
Supremum of the marginal propensity to consume this period.
MPC --> MPCmax as m --> mNrmMin.
Returns
-------
None
'''
# Change any missing function inputs to NullFunc
if cFunc is None:
cFunc = NullFunc()
if vFunc is None:
vFunc = NullFunc()
if vPfunc is None:
vPfunc = NullFunc()
if vPPfunc is None:
vPPfunc = NullFunc()
self.cFunc = cFunc
self.vFunc = vFunc
self.vPfunc = vPfunc
self.vPPfunc = vPPfunc
self.mNrmMin = mNrmMin
self.hNrm = hNrm
self.MPCmin = MPCmin
self.MPCmax = MPCmax
def appendSolution(self,new_solution):
'''
Appends one solution to another to create a ConsumerSolution whose
attributes are lists. Used in ConsMarkovModel, where we append solutions
*conditional* on a particular value of a Markov state to each other in
order to get the entire solution.
Parameters
----------
new_solution : ConsumerSolution
The solution to a consumption-saving problem; each attribute is a
list representing state-conditional values or functions.
Returns
-------
None
'''
if type(self.cFunc)!=list:
# Then we assume that self is an empty initialized solution instance.
# Begin by checking this is so.
assert NullFunc().distance(self.cFunc) == 0, 'appendSolution called incorrectly!'
# We will need the attributes of the solution instance to be lists. Do that here.
self.cFunc = [new_solution.cFunc]
self.vFunc = [new_solution.vFunc]
self.vPfunc = [new_solution.vPfunc]
self.vPPfunc = [new_solution.vPPfunc]
self.mNrmMin = [new_solution.mNrmMin]
else:
self.cFunc.append(new_solution.cFunc)
self.vFunc.append(new_solution.vFunc)
self.vPfunc.append(new_solution.vPfunc)
self.vPPfunc.append(new_solution.vPPfunc)
self.mNrmMin.append(new_solution.mNrmMin)
class ValueFunc(HARKobject):
'''
A class for representing a value function. The underlying interpolation is
in the space of (m,u_inv(v)); this class "re-curves" to the value function.
'''
distance_criteria = ['func','CRRA']
def __init__(self,vFuncNvrs,CRRA):
'''
Constructor for a new value function object.
Parameters
----------
vFuncNvrs : function
A real function representing the value function composed with the
inverse utility function, defined on market resources: u_inv(vFunc(m))
CRRA : float
Coefficient of relative risk aversion.
Returns
-------
None
'''
self.func = deepcopy(vFuncNvrs)
self.CRRA = CRRA
def __call__(self,m):
'''
Evaluate the value function at given levels of market resources m.
Parameters
----------
m : float or np.array
Market resources (normalized by permanent income) whose value is to
be found.
Returns
-------
v : float or np.array
Lifetime value of beginning this period with market resources m; has
same size as input m.
'''
return utility(self.func(m),gam=self.CRRA)
class MargValueFunc(HARKobject):
'''
A class for representing a marginal value function in models where the
standard envelope condition of v'(m) = u'(c(m)) holds (with CRRA utility).
'''
distance_criteria = ['cFunc','CRRA']
def __init__(self,cFunc,CRRA):
'''
Constructor for a new marginal value function object.
Parameters
----------
cFunc : function
A real function representing the marginal value function composed
with the inverse marginal utility function, defined on market
resources: uP_inv(vPfunc(m)). Called cFunc because when standard
envelope condition applies, uP_inv(vPfunc(m)) = cFunc(m).
CRRA : float
Coefficient of relative risk aversion.
Returns
-------
None
'''
self.cFunc = deepcopy(cFunc)
self.CRRA = CRRA
def __call__(self,m):
'''
Evaluate the marginal value function at given levels of market resources m.
Parameters
----------
m : float or np.array
Market resources (normalized by permanent income) whose marginal
value is to be found.
Returns
-------
vP : float or np.array
Marginal lifetime value of beginning this period with market
resources m; has same size as input m.
'''
return utilityP(self.cFunc(m),gam=self.CRRA)
def derivative(self,m):
'''
Evaluate the derivative of the marginal value function at given levels
of market resources m; this is the marginal marginal value function.
Parameters
----------
m : float or np.array
Market resources (normalized by permanent income) whose marginal
marginal value is to be found.
Returns
-------
vPP : float or np.array
Marginal marginal lifetime value of beginning this period with market
resources m; has same size as input m.
'''
c, MPC = self.cFunc.eval_with_derivative(m)
return MPC*utilityPP(c,gam=self.CRRA)
class MargMargValueFunc(HARKobject):
'''
A class for representing a marginal marginal value function in models where
the standard envelope condition of v'(m) = u'(c(m)) holds (with CRRA utility).
'''
distance_criteria = ['cFunc','CRRA']
def __init__(self,cFunc,CRRA):
'''
Constructor for a new marginal marginal value function object.
Parameters
----------
cFunc : function
A real function representing the marginal value function composed
with the inverse marginal utility function, defined on market
resources: uP_inv(vPfunc(m)). Called cFunc because when standard
envelope condition applies, uP_inv(vPfunc(m)) = cFunc(m).
CRRA : float
Coefficient of relative risk aversion.
Returns
-------
None
'''
self.cFunc = deepcopy(cFunc)
self.CRRA = CRRA
def __call__(self,m):
'''
Evaluate the marginal marginal value function at given levels of market
resources m.
Parameters
----------
m : float or np.array
Market resources (normalized by permanent income) whose marginal
marginal value is to be found.
Returns
-------
vPP : float or np.array
Marginal marginal lifetime value of beginning this period with market
resources m; has same size as input m.
'''
c, MPC = self.cFunc.eval_with_derivative(m)
return MPC*utilityPP(c,gam=self.CRRA)
# =====================================================================
# === Classes and functions that solve consumption-saving models ===
# =====================================================================
class ConsPerfForesightSolver(object):
'''
A class for solving a one period perfect foresight consumption-saving problem.
An instance of this class is created by the function solvePerfForesight in each period.
'''
def __init__(self,solution_next,DiscFac,LivPrb,CRRA,Rfree,PermGroFac):
'''
Constructor for a new ConsPerfForesightSolver.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one-period problem.
DiscFac : float
Intertemporal discount factor for future utility.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the next period.
CRRA : float
Coefficient of relative risk aversion.
Rfree : float
Risk free interest factor on end-of-period assets.
PermGroFac : float
Expected permanent income growth factor at the end of this period.
Returns:
----------
None
'''
# We ask that HARK users define single-letter variables they use in a dictionary
# attribute called notation.
# Do that first.
self.notation = {'a': 'assets after all actions',
'm': 'market resources at decision time',
'c': 'consumption'}
self.assignParameters(solution_next,DiscFac,LivPrb,CRRA,Rfree,PermGroFac)
def assignParameters(self,solution_next,DiscFac,LivPrb,CRRA,Rfree,PermGroFac):
'''
Saves necessary parameters as attributes of self for use by other methods.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
DiscFac : float
Intertemporal discount factor for future utility.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
CRRA : float
Coefficient of relative risk aversion.
Rfree : float
Risk free interest factor on end-of-period assets.
PermGroFac : float
Expected permanent income growth factor at the end of this period.
Returns
-------
none
'''
self.solution_next = solution_next
self.DiscFac = DiscFac
self.LivPrb = LivPrb
self.CRRA = CRRA
self.Rfree = Rfree
self.PermGroFac = PermGroFac
def defUtilityFuncs(self):
'''
Defines CRRA utility function for | |
of this function (CosmoTransitions v1.0.2 and
earlier), the start and end points of the
path were effectively held fixed during the main deformation. This was
because the line ``phi_lin = phi[:1] + ...`` was calculated *before* the
line ``phi = phi+F*stepsize``. Since the spline basis functions are
zero at the start and end points (the spline is added on top of the
straight line between the end points), when the points were later taken
from the spline the end points wouldn't move. This was by design, since
for thin-walled bubbles the endpoints should stay fixed at the two
vacua. However, this caused problems for thick-walled bubbles where the
end points should move.
To get around this, prior versions added an extra block of code to move
the end points before the main deformation. However, this was
unnecessarily complicated and led to error-prone code. In this version,
the end points are always allowed to move if the force `F` is non-zero.
In the thin-walled case, the force should be almost exactly zero at
the end points anyways (there is zero potential gradient and `dphidr` is
zero), so they should stay fixed on their own.
"""
# Find out the direction of the deformation.
F,dV = self.forces()
F_max = np.max(np.sqrt(np.sum(F*F,-1)))
dV_max = np.max(np.sqrt(np.sum(dV*dV,-1)))
fRatio1 = F_max / dV_max
# Rescale the normal force so that it's relative to L:
F *= self._L / dV_max
# Now, see how big the stepsize should be
stepsize = lastStep
phi = self.phi
assert(maxstep > minstep)
step_reversed = False
if reverseCheck < 1 and self._F_prev is not None:
FdotFlast = np.sum(F*self._F_prev, axis=1)
if np.sum(FdotFlast < 0) > len(FdotFlast)*reverseCheck:
# we want to reverse the last step
if stepsize > minstep:
step_reversed = True
phi = self._phi_prev
F = self._F_prev
if verbose: print("step reversed")
stepsize = lastStep/stepDecrease
else:
""" No (large number of) indices reversed, just do a regular
step. Increase the stepsize a bit over the last one."""
stepsize = lastStep * stepIncrease
if stepsize > maxstep: stepsize = maxstep
if stepsize < minstep: stepsize = minstep
# Save the state before the step
self._phi_prev = phi
self._F_prev = F
if self.save_all_steps:
self.phi_list.append(phi)
self.F_list.append(F)
"""Now make the step. It's important to not use += so that this doesn't
change the value stored in self.phi_list."""
phi = phi+F*stepsize
# fit to the spline
phi_lin = phi[:1] + (phi[-1:]-phi[:1])*self._t
phi -= phi_lin
self._beta, residues, rank, s = np.linalg.lstsq(self._X, phi)
phi = np.sum(self._beta[np.newaxis,:,:]*self._X[:,:,np.newaxis], axis=1)
phi += phi_lin
self.phi = phi
Ffit = (phi-self._phi_prev)/stepsize
fRatio2 = np.max(np.sqrt(np.sum(Ffit*Ffit,-1)))/self._L
if verbose:
print("step: %i; stepsize: %0.2e; fRatio1 %0.2e; fRatio2: %0.2e"
% (self.num_steps, stepsize, fRatio1, fRatio2))
fRatio = fRatio2 if checkAfterFit else fRatio1
return self._step_rval(stepsize, step_reversed, fRatio)
def deformPath(self, startstep=2e-3,
fRatioConv=.02, converge_0=5., fRatioIncrease=5.,
maxiter=500, verbose=True, callback=None, step_params={}):
"""
Deform the path many individual steps, stopping either when the
convergence criterium is reached, when the maximum number of iterations
is reached, or when the path appears to be running away from
convergence.
Parameters
----------
startstep : float, optional
Starting stepsize used in :func:`step`.
fRatioConv : float, optional
The routine will stop when the maximum normal force on the path
divided by the maximum potential gradient is less than this.
converge_0 : float, optional
On the first step, use a different convergence criterion. Check if
``fRatio < convergence_0 * fRatioConv``.
fRatioIncrease :float, optional
The maximum fractional amount that fRatio can increase before
raising an error.
maxiter : int, optional
Maximum number of steps to take (ignoring reversed steps).
verbose : bool, optional
If True, print the ending condition.
callback : callable, optional
Called after each step. Should accept an instance of this class as a
parameter, and return False if deformation should stop.
step_params : dict, optional
Parameters to pass to :func:`step`.
Returns
-------
deformation_converged : bool
True if the routine stopped because it converged (as determined by
`fRatioConv`), False otherwise.
"""
minfRatio = np.inf
minfRatio_index = 0
minfRatio_beta = None
minfRatio_phi = None
stepsize = startstep
deformation_converged = False
while True:
self.num_steps += 1
stepsize, step_reversed, fRatio = self.step(stepsize, **step_params)
if callback is not None and not callback(self):
break
minfRatio = min(minfRatio, fRatio)
if fRatio < fRatioConv or (self.num_steps == 1
and fRatio < converge_0*fRatioConv):
if verbose:
print("Path deformation converged. " +
"%i steps. fRatio = %0.5e" % (self.num_steps,fRatio))
deformation_converged = True
break
if minfRatio == fRatio:
minfRatio_beta = self._beta
minfRatio_index = self.num_steps
minfRatio_phi = self.phi
if fRatio > fRatioIncrease*minfRatio and not step_reversed:
self._beta = minfRatio_beta
self.phi = minfRatio_phi
self.phi_list = self.phi_list[:minfRatio_index]
self.F_list = self.F_list[:minfRatio_index]
err_msg = ("Deformation doesn't appear to be converging."
"Stopping at the point of best convergence.")
if verbose: print(err_msg)
raise DeformationError(err_msg)
if self.num_steps >= maxiter:
if verbose:
print("Maximum number of deformation iterations reached.")
break
return deformation_converged
class Deformation_Points:
"""
Deform a path in the presence of a potential such that the normal forces
along the path are zero.
Unlike :class:`Deformation_Spline`, this class changes the points
themselves rather than fitting a spline to the points. It is a more
straightforward implementation, and when run with comparable inputs (i.e.,
the number of basis splines is about the same as the number of points), this
method tends to be somewhat faster. The individual stepsizes here change
with the total number of points, whereas in the spline implementation they
mostly depend on the number of basis functions. However, as long as the path
is fairly smooth, the total number of splines in that class can probably be
smaller than the total number of points in this class, so this class will
tend to be somewhat slower.
The two implementations should converge upon the same answer when the
number of points and basis functions get large.
Parameters
----------
phi : array_like
The list of points that constitutes the original path. Should have
shape ``(n_points, n_dimensions)``.
dphidr : array_like
The 'speed' along the path at the initial points. This does not change
as the path deforms. Should have shape ``(n_points,)``. Gets saved into
the attribute ``self.v2`` as ``v2 = dphidr[:,np.newaxis]**2``.
dV : callable
The potential gradient as a function of phi. The output shape should be
the same as the input shape, which will be ``(..., n_dimensions)``.
fix_start, fix_end : bool, optional
If True, the force on the first/last point along the path is set to
zero, so the point will not change in the deformation step.
save_all_steps : bool, optional
If True, each step gets saved into ``self.phi_list`` and
``self.F_list``.
Attributes
----------
phi : array_like
Set during initialization, and then rewritten at each step.
num_steps : int
Total number of steps taken.
"""
def __init__(self, phi, dphidr, dV,
fix_start=False, fix_end=False, save_all_steps=False):
self.phi = np.asanyarray(phi) # shape (n,N)
self.v2 = np.asanyarray(dphidr)[:,np.newaxis]**2 # shape (n,1)
self.dV = dV
self.F_list = []
self.phi_list = []
self.save_all_steps = save_all_steps
self.fix_start, self.fix_end = fix_start, fix_end
self.num_steps = 0
_forces_rval = namedtuple("forces_rval", "F_norm dV")
def forces(self, phi=None):
"""
Calculate the normal force and potential gradient on the path.
Returns
-------
F_norm, dV : array_like
"""
if phi is None: phi = self.phi
# Let `t` be some variable that parametrizes the points such that
# t_i = i. Calculate the derivs of phi w/ respect to t.
dphi = helper_functions.deriv14_const_dx(phi.T).T
d2phi = helper_functions.deriv23_const_dx(phi.T).T
# Let `x` be some variable that parametrizes the path such that
# |dphi/dx| = 1. Calculate the derivs.
dphi_abssq = np.sum(dphi*dphi, axis=-1)[:,np.newaxis]
dphi /= np.sqrt(dphi_abssq) # This is now dphi/dx
d2phi /= dphi_abssq # = d2phi/dx2 + (dphi/dx)(d2phi/dt2)/(dphi/dt)^2
d2phi -= np.sum(d2phi*dphi, axis=-1)[:,np.newaxis] * dphi # = d2phi/dx2
# Calculate the total force.
dV = self.dV(phi)
dV_perp = dV - np.sum(dV*dphi, axis=-1)[:,np.newaxis] * dphi
F_norm = d2phi*self.v2 - dV_perp
if (self.fix_start):
F_norm[0] = 0.0
if (self.fix_end):
F_norm[-1] = 0.0
return self._forces_rval(F_norm, dV)
_step_rval = namedtuple("step_rval", "stepsize fRatio")
def step(self, stepsize, minstep, diff_check=0.1, step_decrease=2.):
"""
Take two half-steps in the direction | |
'Session' returns object of type '_NameSpace'
"Session": (61451, 2, (9, 0), (), "Session", '{00063002-0000-0000-C000-000000000046}'),
"Size": (3592, 2, (3, 0), (), "Size", None),
"Subject": (55, 2, (8, 0), (), "Subject", None),
"UnRead": (61468, 2, (11, 0), (), "UnRead", None),
# Method 'UserProperties' returns object of type 'UserProperties'
"UserProperties": (63510, 2, (9, 0), (), "UserProperties", '{0006303D-0000-0000-C000-000000000046}'),
}
_prop_map_put_ = {
"BillingInformation": ((34101, LCID, 4, 0),()),
"Body": ((37120, LCID, 4, 0),()),
"Categories": ((36865, LCID, 4, 0),()),
"Companies": ((34107, LCID, 4, 0),()),
"DLName": ((32851, LCID, 4, 0),()),
"Importance": ((23, LCID, 4, 0),()),
"Members": ((32853, LCID, 4, 0),()),
"MessageClass": ((26, LCID, 4, 0),()),
"Mileage": ((34100, LCID, 4, 0),()),
"NoAging": ((34062, LCID, 4, 0),()),
"OneOffMembers": ((32852, LCID, 4, 0),()),
"Sensitivity": ((54, LCID, 4, 0),()),
"Subject": ((55, LCID, 4, 0),()),
"UnRead": ((61468, LCID, 4, 0),()),
}
class _DocumentItem(DispatchBaseClass):
CLSID = IID('{00063020-0000-0000-C000-000000000046}')
coclass_clsid = IID('{00061061-0000-0000-C000-000000000046}')
def Close(self, SaveMode=defaultNamedNotOptArg):
return self._oleobj_.InvokeTypes(61475, LCID, 1, (24, 0), ((3, 1),),SaveMode
)
def Copy(self):
ret = self._oleobj_.InvokeTypes(61490, LCID, 1, (9, 0), (),)
if ret is not None:
ret = Dispatch(ret, 'Copy', None)
return ret
def Delete(self):
return self._oleobj_.InvokeTypes(61514, LCID, 1, (24, 0), (),)
def Display(self, Modal=defaultNamedOptArg):
return self._oleobj_.InvokeTypes(61606, LCID, 1, (24, 0), ((12, 17),),Modal
)
def Move(self, DestFldr=defaultNamedNotOptArg):
ret = self._oleobj_.InvokeTypes(61492, LCID, 1, (9, 0), ((9, 1),),DestFldr
)
if ret is not None:
ret = Dispatch(ret, 'Move', None)
return ret
def PrintOut(self):
return self._oleobj_.InvokeTypes(61491, LCID, 1, (24, 0), (),)
def Save(self):
return self._oleobj_.InvokeTypes(61512, LCID, 1, (24, 0), (),)
def SaveAs(self, Path=defaultNamedNotOptArg, Type=defaultNamedOptArg):
return self._oleobj_.InvokeTypes(61521, LCID, 1, (24, 0), ((8, 1), (12, 17)),Path
, Type)
_prop_map_get_ = {
# Method 'Actions' returns object of type 'Actions'
"Actions": (63511, 2, (9, 0), (), "Actions", '{0006303E-0000-0000-C000-000000000046}'),
# Method 'Application' returns object of type '_Application'
"Application": (61440, 2, (9, 0), (), "Application", '{00063001-0000-0000-C000-000000000046}'),
# Method 'Attachments' returns object of type 'Attachments'
"Attachments": (63509, 2, (9, 0), (), "Attachments", '{0006303C-0000-0000-C000-000000000046}'),
"BillingInformation": (34101, 2, (8, 0), (), "BillingInformation", None),
"Body": (37120, 2, (8, 0), (), "Body", None),
"Categories": (36865, 2, (8, 0), (), "Categories", None),
"Class": (61450, 2, (3, 0), (), "Class", None),
"Companies": (34107, 2, (8, 0), (), "Companies", None),
"ConversationIndex": (113, 2, (8, 0), (), "ConversationIndex", None),
"ConversationTopic": (112, 2, (8, 0), (), "ConversationTopic", None),
"CreationTime": (12295, 2, (7, 0), (), "CreationTime", None),
"EntryID": (61470, 2, (8, 0), (), "EntryID", None),
# Method 'FormDescription' returns object of type 'FormDescription'
"FormDescription": (61589, 2, (9, 0), (), "FormDescription", '{00063046-0000-0000-C000-000000000046}'),
# Method 'GetInspector' returns object of type '_Inspector'
"GetInspector": (61502, 2, (9, 0), (), "GetInspector", '{00063005-0000-0000-C000-000000000046}'),
"Importance": (23, 2, (3, 0), (), "Importance", None),
"LastModificationTime": (12296, 2, (7, 0), (), "LastModificationTime", None),
# Method 'Links' returns object of type 'Links'
"Links": (62469, 2, (9, 0), (), "Links", '{0006308A-0000-0000-C000-000000000046}'),
"MAPIOBJECT": (61696, 2, (13, 0), (), "MAPIOBJECT", None),
"MessageClass": (26, 2, (8, 0), (), "MessageClass", None),
"Mileage": (34100, 2, (8, 0), (), "Mileage", None),
"NoAging": (34062, 2, (11, 0), (), "NoAging", None),
"OutlookInternalVersion": (34130, 2, (3, 0), (), "OutlookInternalVersion", None),
"OutlookVersion": (34132, 2, (8, 0), (), "OutlookVersion", None),
"Parent": (61441, 2, (9, 0), (), "Parent", None),
"Saved": (61603, 2, (11, 0), (), "Saved", None),
"Sensitivity": (54, 2, (3, 0), (), "Sensitivity", None),
# Method 'Session' returns object of type '_NameSpace'
"Session": (61451, 2, (9, 0), (), "Session", '{00063002-0000-0000-C000-000000000046}'),
"Size": (3592, 2, (3, 0), (), "Size", None),
"Subject": (55, 2, (8, 0), (), "Subject", None),
"UnRead": (61468, 2, (11, 0), (), "UnRead", None),
# Method 'UserProperties' returns object of type 'UserProperties'
"UserProperties": (63510, 2, (9, 0), (), "UserProperties", '{0006303D-0000-0000-C000-000000000046}'),
}
_prop_map_put_ = {
"BillingInformation": ((34101, LCID, 4, 0),()),
"Body": ((37120, LCID, 4, 0),()),
"Categories": ((36865, LCID, 4, 0),()),
"Companies": ((34107, LCID, 4, 0),()),
"Importance": ((23, LCID, 4, 0),()),
"MessageClass": ((26, LCID, 4, 0),()),
"Mileage": ((34100, LCID, 4, 0),()),
"NoAging": ((34062, LCID, 4, 0),()),
"Sensitivity": ((54, LCID, 4, 0),()),
"Subject": ((55, LCID, 4, 0),()),
"UnRead": ((61468, LCID, 4, 0),()),
}
class _Explorer(DispatchBaseClass):
CLSID = IID('{00063003-0000-0000-C000-000000000046}')
coclass_clsid = IID('{00063050-0000-0000-C000-000000000046}')
def Activate(self):
return self._oleobj_.InvokeTypes(8467, LCID, 1, (24, 0), (),)
def Close(self):
return self._oleobj_.InvokeTypes(8451, LCID, 1, (24, 0), (),)
def Display(self):
return self._oleobj_.InvokeTypes(8452, LCID, 1, (24, 0), (),)
def IsPaneVisible(self, Pane=defaultNamedNotOptArg):
return self._oleobj_.InvokeTypes(8707, LCID, 1, (11, 0), ((3, 1),),Pane
)
def ShowPane(self, Pane=defaultNamedNotOptArg, Visible=defaultNamedNotOptArg):
return self._oleobj_.InvokeTypes(8708, LCID, 1, (24, 0), ((3, 1), (11, 1)),Pane
, Visible)
_prop_map_get_ = {
# Method 'Application' returns object of type '_Application'
"Application": (61440, 2, (9, 0), (), "Application", '{00063001-0000-0000-C000-000000000046}'),
"Caption": (8465, 2, (8, 0), (), "Caption", None),
"Class": (61450, 2, (3, 0), (), "Class", None),
# Method 'CommandBars' returns object of type 'CommandBars'
"CommandBars": (8448, 2, (13, 0), (), "CommandBars", '{55F88893-7708-11D1-ACEB-006008961DA5}'),
# Method 'CurrentFolder' returns object of type 'MAPIFolder'
"CurrentFolder": (8449, 2, (9, 0), (), "CurrentFolder", '{00063006-0000-0000-C000-000000000046}'),
"CurrentView": (8704, 2, (12, 0), (), "CurrentView", None),
"Height": (8468, 2, (3, 0), (), "Height", None),
"Left": (8469, 2, (3, 0), (), "Left", None),
# Method 'Panes' returns object of type 'Panes'
"Panes": (8705, 2, (9, 0), (), "Panes", '{00063009-0000-0000-C000-000000000046}'),
"Parent": (61441, 2, (9, 0), (), "Parent", None),
# Method 'Selection' returns object of type 'Selection'
"Selection": (8706, 2, (9, 0), (), "Selection", '{00063087-0000-0000-C000-000000000046}'),
# Method 'Session' returns object of type '_NameSpace'
"Session": (61451, 2, (9, 0), (), "Session", '{00063002-0000-0000-C000-000000000046}'),
"Top": (8470, 2, (3, 0), (), "Top", None),
"Views": (12553, 2, (9, 0), (), "Views", None),
"Width": (8471, 2, (3, 0), (), "Width", None),
"WindowState": (8466, 2, (3, 0), (), "WindowState", None),
}
_prop_map_put_ = {
"CurrentFolder": ((8449, LCID, 8, 0),()),
"CurrentView": ((8704, LCID, 4, 0),()),
"Height": ((8468, LCID, 4, 0),()),
"Left": ((8469, LCID, 4, 0),()),
"Top": ((8470, LCID, 4, 0),()),
"Width": ((8471, LCID, 4, 0),()),
"WindowState": ((8466, LCID, 4, 0),()),
}
class _Explorers(DispatchBaseClass):
CLSID = IID('{0006300A-0000-0000-C000-000000000046}')
coclass_clsid = IID('{00063053-0000-0000-C000-000000000046}')
# Result is of type _Explorer
def Add(self, Folder=defaultNamedNotOptArg, DisplayMode=defaultNamedNotOptArg):
ret = self._oleobj_.InvokeTypes(95, LCID, 1, (9, 0), ((12, 1), (3, 17)),Folder
, DisplayMode)
if ret is not None:
ret = Dispatch(ret, 'Add', '{00063003-0000-0000-C000-000000000046}')
return ret
# Result is of type Explorer
def Item(self, Index=defaultNamedNotOptArg):
ret = self._oleobj_.InvokeTypes(81, LCID, 1, (13, 0), ((12, 1),),Index
)
if ret is not None:
# See if this IUnknown is really an IDispatch
try:
ret = ret.QueryInterface(pythoncom.IID_IDispatch)
except pythoncom.error:
return ret
ret = Dispatch(ret, 'Item', '{00063050-0000-0000-C000-000000000046}')
return ret
_prop_map_get_ = {
# Method 'Application' returns object of type '_Application'
"Application": (61440, 2, (9, 0), (), "Application", '{00063001-0000-0000-C000-000000000046}'),
"Class": (61450, 2, (3, 0), (), "Class", None),
"Count": (80, 2, (3, 0), (), "Count", None),
"Parent": (61441, 2, (9, 0), (), "Parent", None),
# Method 'Session' returns object of type '_NameSpace'
"Session": (61451, 2, (9, 0), (), "Session", '{00063002-0000-0000-C000-000000000046}'),
}
_prop_map_put_ = {
}
#This class has Item property/method which may take args - allow indexed access
def __getitem__(self, item):
return self._get_good_object_(self._oleobj_.Invoke(*(81, LCID, 1, 1, item)), "Item")
#This class has Count() property - allow len(ob) to provide this
def __len__(self):
return self._ApplyTypes_(*(80, 2, (3, 0), (), "Count", None))
#This class has a __len__ - this is needed so 'if object:' always returns TRUE.
def __nonzero__(self):
return True
class _Folders(DispatchBaseClass):
CLSID = IID('{00063040-0000-0000-C000-000000000046}')
coclass_clsid = IID('{00063051-0000-0000-C000-000000000046}')
# Result is of type MAPIFolder
def Add(self, Name=defaultNamedNotOptArg, Type=defaultNamedOptArg):
ret = self._oleobj_.InvokeTypes(95, LCID, 1, (9, 0), ((8, 1), (12, 17)),Name
, Type)
if ret is not None:
ret = Dispatch(ret, 'Add', '{00063006-0000-0000-C000-000000000046}')
return ret
# Result is of type MAPIFolder
def GetFirst(self):
ret = self._oleobj_.InvokeTypes(86, LCID, 1, (9, 0), (),)
if ret is not None:
ret = Dispatch(ret, 'GetFirst', '{00063006-0000-0000-C000-000000000046}')
return ret
# Result is of type MAPIFolder
def GetLast(self):
ret = self._oleobj_.InvokeTypes(88, LCID, 1, (9, 0), (),)
if ret is not None:
ret = Dispatch(ret, 'GetLast', '{00063006-0000-0000-C000-000000000046}')
return ret
# Result is of type MAPIFolder
def GetNext(self):
ret = self._oleobj_.InvokeTypes(87, LCID, 1, (9, 0), (),)
if ret is not None:
ret = Dispatch(ret, 'GetNext', '{00063006-0000-0000-C000-000000000046}')
return ret
# Result is of type MAPIFolder
def GetPrevious(self):
ret = self._oleobj_.InvokeTypes(89, LCID, 1, (9, 0), (),)
if ret is not None:
ret = Dispatch(ret, 'GetPrevious', '{00063006-0000-0000-C000-000000000046}')
return ret
# Result is of type MAPIFolder
def Item(self, Index=defaultNamedNotOptArg):
ret = self._oleobj_.InvokeTypes(81, LCID, 1, (9, 0), ((12, 1),),Index
)
if ret is not None:
ret = Dispatch(ret, 'Item', '{00063006-0000-0000-C000-000000000046}')
return ret
def Remove(self, Index=defaultNamedNotOptArg):
return self._oleobj_.InvokeTypes(84, LCID, 1, (24, 0), ((3, 1),),Index
)
_prop_map_get_ = {
# Method 'Application' returns object of type '_Application'
"Application": (61440, 2, (9, 0), (), "Application", '{00063001-0000-0000-C000-000000000046}'),
"Class": (61450, 2, (3, 0), (), "Class", None),
"Count": (80, 2, (3, 0), (), "Count", None),
"Parent": (61441, 2, (9, 0), (), "Parent", None),
"RawTable": (90, 2, (13, 0), (), "RawTable", None),
# Method 'Session' returns object of type '_NameSpace'
"Session": (61451, 2, (9, 0), (), "Session", '{00063002-0000-0000-C000-000000000046}'),
}
_prop_map_put_ = {
}
#This class has Item property/method which may take args - allow indexed access
def __getitem__(self, item):
return self._get_good_object_(self._oleobj_.Invoke(*(81, LCID, 1, 1, item)), "Item")
#This class has Count() property - allow len(ob) to provide this
def __len__(self):
return self._ApplyTypes_(*(80, 2, (3, 0), (), "Count", None))
#This class has a __len__ - this is needed so 'if object:' always returns TRUE.
def __nonzero__(self):
return True
class _IDocSiteControl(DispatchBaseClass):
CLSID = IID('{43507DD0-811D-11CE-B565-00AA00608FAA}')
coclass_clsid = None
_prop_map_get_ = {
"ReadOnly": (-2147356664, 2, (3, 0), ((16395, 10),), "ReadOnly", None),
}
_prop_map_put_ = {
"ReadOnly": ((-2147356664, LCID, 4, 0),()),
}
class _IRecipientControl(DispatchBaseClass):
CLSID = IID('{D87E7E16-6897-11CE-A6C0-00AA00608FAA}')
coclass_clsid = None
_prop_map_get_ = {
"BackColor": (-501, 2, (3, 0), ((16387, 10),), "BackColor", None),
"Enabled": (-514, 2, (3, 0), ((16395, 10),), "Enabled", None),
"Font": (-512, 2, (3, 0), ((16393, 10),), "Font", None),
"ForeColor": (-513, 2, (3, 0), ((16387, 10),), "ForeColor", None),
"ReadOnly": (-2147356664, 2, (3, 0), ((16395, 10),), "ReadOnly", None),
"SpecialEffect": (12, 2, (3, 0), ((16387, 10),), "SpecialEffect", None),
}
_prop_map_put_ = {
"BackColor": ((-501, LCID, 4, 0),()),
"Enabled": | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Definition of the NSDE algorithm"""
import numpy as np
try:
from openmdao.utils.concurrent import concurrent_eval
except ModuleNotFoundError:
import warnings
warnings.warn("OpenMDAO is not installed. Concurrent evaluation is not available.")
from . import sorting, hv
from .strategies import EvolutionStrategy
def mpi_fobj_wrapper(fobj):
"""
Wrapper for the objective function to keep track of individual indices when running under MPI.
Parameters
----------
fobj : callable
Original objective function
Returns
-------
callable
Wrapped function which, in addition to x, takes the individual's index and returns it along with f
"""
def wrapped(x, ii):
return fobj(x), ii
return wrapped
class NSDE:
"""
Non-dominated Sorting Differential Evolution (NSDE) Algorithm.
Attributes
----------
fobj : callable
Objective function.
Should have a single argument of type array_like which corresponds to the design vector.
Should have either a single float or 1D array output corresponding to the objective function value(s),
or two array_like outputs, the first of which corresponds to the objective function value(s) and the second
to the constraint violations.
Constraints are assumed to be satisfied if constraint violations <= constraint tolerance.
lb, ub : array_like
Lower and upper bounds
range : array_like
Distances between the lower and upper bounds
f, cr : float
Mutation rate and crossover probabilities
adaptivity : int
Method of self-adaptivity.
- 0: No self-adaptivity. Specified mutation rate and crossover probability are used.
- 1: Simple self-adaptability. Mutation rate and crossover probability are optimized Mote-Carlo style.
- 2: Complex self-adaptability. Mutation rate and crossover probability are mutated with specified strategy.
max_gen : int
Maximum number of generations
tolx, tolf : float
Tolerances on the design vectors' and objective function values' spreads
tolc : float
Constraint violation tolerance.
n_dim : int
Number of dimension of the problem
n_pop : int
Population size
rng : np.random.Generator
Random number generator
comm : MPI communicator or None
The MPI communicator that will be used objective evaluation for each generation
model_mpi : None or tuple
If the model in fobj is also parallel, then this will contain a tuple with the the
total number of population points to evaluate concurrently, and the color of the point
to evaluate on this rank
strategy : EvolutionStrategy
Evolution strategy to use for procreation
pop : np.array
List of the individuals' chromosomes making up the current population
fit : np.array
Fitness of the individuals in the population
con : np.array
Constraint violations of the individuals in the population
generation : int
Generation counter
"""
def __init__(
self,
strategy=None,
mut=0.85,
crossp=1.0,
adaptivity=0,
max_gen=1000,
tolx=1e-8,
tolf=1e-8,
tolc=1e-6,
n_pop=None,
seed=None,
comm=None,
model_mpi=None,
):
self.fobj = None
self.lb, self.ub = None, None
self.range = 0
self.f = mut
self.cr = crossp
self.max_gen = max_gen
self.tolx = tolx
self.tolf = tolf
self.tolc = tolc
self.n_dim = 0
self.n_obj = 0
self.n_con = 0
self.n_pop = n_pop
self.rng = np.random.default_rng(seed)
if adaptivity not in [0, 1, 2]:
raise ValueError("self_adaptivity must be one of (0, 1, 2).")
self.adaptivity = adaptivity
self.comm = comm
self.model_mpi = model_mpi
if strategy is None:
self.strategy = EvolutionStrategy("rand-to-best/1/bin/random")
elif isinstance(strategy, EvolutionStrategy):
self.strategy = strategy
elif isinstance(strategy, str):
self.strategy = EvolutionStrategy(strategy)
else:
raise ValueError(
"Argument `strategy` should be None, a str, or an instance of EvolutionStrategy."
)
self.pop = None
self.fit = None
self.con = None
self.fronts = None
self.dx, self.df, self.hv = np.inf, np.inf, np.inf
self.pareto_lb = +np.inf
self.pareto_ub = -np.inf
self.generation = 0
self._is_initialized = False
self._running_under_mpi = comm is not None and hasattr(comm, "bcast")
def init(self, fobj, bounds, pop=None):
"""
Initialize the algorithm.
Parameters
----------
fobj : callable
Objective function
bounds : list of 2-tuples
List of (lower, upper) bounds
pop : None or array_like, optional
Initial population. If None, it will be created at random.
"""
# Set default values for the mutation and crossover parameters
if self.f is None or 0.0 > self.f > 1.0:
self.f = 0.85
if self.cr is None or 0.0 > self.cr > 1.0:
self.cr = 1.0
# Prepare the objective function and compute the bounds and variable range
self.fobj = fobj if self.comm is None else mpi_fobj_wrapper(fobj)
self.lb, self.ub = np.asarray(bounds).T
self.range = self.ub - self.lb
# Compute the number of dimensions
self.n_dim = len(bounds)
def create_f_cr(adaptivity, f, cr, n, rng):
# Create random mutation/crossover parameters if self-adaptivity is used
if adaptivity == 0:
f = f * np.ones(n)
cr = cr * np.ones(n)
elif adaptivity == 1:
f = rng.uniform(size=n) * 0.9 + 0.1
cr = rng.uniform(size=n)
elif adaptivity == 2:
f = rng.uniform(size=n) * 0.15 + 0.5
cr = rng.uniform(size=n) * 0.15 + 0.5
return f, cr
adjust_pop = False
if pop is not None:
self.n_pop = pop.shape[0]
self.pop = pop
self.f, self.cr = create_f_cr(
self.adaptivity, self.f, self.cr, self.n_pop, self.rng
)
else:
if self.n_pop is None or self.n_pop <= 0:
self.pop = self.rng.uniform(self.lb, self.ub, size=(1, self.n_dim))
adjust_pop = True
self.n_pop = 1
else:
self.pop = self.rng.uniform(
self.lb, self.ub, size=(self.n_pop, self.n_dim)
)
self.f, self.cr = create_f_cr(
self.adaptivity, self.f, self.cr, self.n_pop, self.rng
)
# Ensure all processors have the same population and mutation/crossover parameters
if self._running_under_mpi:
self.pop, self.f, self.cr = self.comm.bcast(
(self.pop, self.f, self.cr), root=0
)
self.fit, self.con = self(self.pop)
self.n_obj = self.fit.shape[1]
if self.con is not None:
self.n_con = self.con.shape[1]
if adjust_pop:
self.n_pop = 5 * self.n_dim * self.n_obj
# If we are running under MPI, expand population to fully exploit all processors
if self._running_under_mpi:
self.n_pop = int(np.ceil(self.n_pop / self.comm.size) * self.comm.size)
self.pop = np.concatenate(
(
self.pop,
self.rng.uniform(
self.lb, self.ub, size=(self.n_pop - 1, self.n_dim)
),
)
)
self.f, self.cr = create_f_cr(
self.adaptivity, self.f, self.cr, self.n_pop, self.rng
)
if self._running_under_mpi:
self.pop, self.f, self.cr = self.comm.bcast(
(self.pop, self.f, self.cr), root=0
)
self.fit, self.con = self(self.pop)
self.update()
# Set generation counter to 0
self.generation = 0
# Mark class as initialized
self._is_initialized = True
@property
def is_initialized(self):
"""bool: True if the algorithm has been initialized, False if not."""
return self._is_initialized
def __iter__(self):
"""
This class is an iterator itself.
Raises
------
RuntimeError
If this class is being used as an iterator before it has been initialized.
"""
if not self._is_initialized:
raise RuntimeError("NSDE is not yet initialized.")
return self
def __next__(self):
"""
Main iteration.
Returns
-------
NSDE
The new state at the next generation.
"""
if (
self.generation < self.max_gen
and self.dx > self.tolx
and self.df > self.tolf
):
# Create a new population and mutation/crossover parameters
pop_new, f_new, cr_new = self.procreate()
# Ensure all processors have the same updated population and mutation/crossover parameters
if self._running_under_mpi:
pop_new, f_new, cr_new = self.comm.bcast(
(pop_new, f_new, cr_new), root=0
)
# Evaluate the fitness of the new population
fit_new, con_new = self(pop_new)
# Update the class with the new data
self.update(pop_new, fit_new, con_new, f_new, cr_new)
# Compute spreads and update generation counter
if self.n_obj == 1:
self.dx = np.linalg.norm(self.pop[0] - self.pop[-1])
self.df = np.abs(self.fit[0] - self.fit[-1])
else:
pareto = self.fit[self.fronts[0]]
self.pareto_lb = np.minimum(self.pareto_lb, np.min(pareto, axis=0, keepdims=True))
self.pareto_ub = np.maximum(self.pareto_ub, np.max(pareto, axis=0, keepdims=True))
pareto_norm = 1 + (pareto - self.pareto_lb) / (self.pareto_ub - self.pareto_lb)
self.hv = hv.hv(pareto_norm, 2.1 * np.ones(self.n_obj))
self.generation += 1
# Return the new state
return self
else:
raise StopIteration
def __call__(self, pop):
"""
Evaluate the fitness of the given population.
Parameters
----------
pop : array_like
List of chromosomes of the individuals in the population
Returns
-------
fit : np.array
Fitness of the individuals in the given population
con : np.array or None
Constraint violations of the individuals in the given population if present. None otherwise.
Notes
-----
If this class has an MPI communicator the individuals will be evaluated in parallel.
Otherwise function evaluation will be serial.
"""
if self.is_initialized:
fit = np.empty((self.n_pop, self.n_obj))
con = None if self.n_con is None else np.empty((self.n_pop, self.n_con))
else:
fit = pop.shape[0] * [None]
con = None
def handle_result(_v, _i, _fit, _con):
if isinstance(_v, tuple):
_fit[_i] = np.asarray(_v[0])
c = np.asarray(_v[1])
if _con is None:
_con = np.empty((pop.shape[0], c.size))
_con[_i] = c
else:
_fit[_i] = _v
return _fit, _con
# | |
those to avoid reading in the cmds table.
# For now just get things working reliably.
loads_table_path = paths.LOADS_TABLE_PATH(scenario)
loads_rows = []
# Probably too complicated, but this bit of code generates a list of dates
# that are guaranteed to sample all the months in the lookback period with
# two weeks of margin on the tail end.
dt = 21 * u.day
start = CxoTime(stop) - lookback * u.day
if stop is None:
stop = CxoTime.now() + dt
else:
stop = CxoTime(stop)
n_sample = int(np.ceil((stop - start) / dt))
dates = start + np.arange(n_sample + 1) * (stop - start) / n_sample
dirs_tried = set()
# Get the directory listing for each unique Year/Month and find loads.
# For each load not already in the table, download the backstop and add the
# load to the table.
for date in dates:
year, month = str(date.ymdhms.year), date.ymdhms.month
month_name = calendar.month_abbr[month].upper()
dir_year_month = APPROVED_LOADS_OCCWEB_DIR / year / month_name
if dir_year_month in dirs_tried:
continue
dirs_tried.add(dir_year_month)
# Get directory listing for Year/Month
try:
contents = occweb.get_occweb_dir(dir_year_month)
except requests.exceptions.HTTPError as exc:
if str(exc).startswith('404'):
logger.debug(f'No OCCweb directory for {dir_year_month}')
continue
else:
raise
# Find each valid load name in the directory listing and process:
# - Find and download the backstop file
# - Parse the backstop file and save to the loads/ archive as a gzipped
# pickle file
# - Add the load to the table
for content in contents:
if re.match(r'[A-Z]{3}\d{4}[A-Z]/', content['Name']):
load_name = content['Name'][:8] # chop the /
load_date = load_name_to_cxotime(load_name)
if load_date < RLTT_ERA_START:
logger.warning(f'Skipping {load_name} which is before '
f'{RLTT_ERA_START} start of RLTT era')
continue
if load_date >= start and load_date <= stop:
cmds = get_load_cmds_from_occweb_or_local(dir_year_month, load_name)
load = get_load_dict_from_cmds(load_name, cmds, cmd_events)
loads_rows.append(load)
if not loads_rows:
raise ValueError(f'No loads found in {lookback} days')
# Finally, save the table to file
loads_table = Table(loads_rows)
logger.info(f'Saving {len(loads_table)} loads to {loads_table_path}')
loads_table.sort('cmd_start')
loads_table.write(loads_table_path, format='csv', overwrite=True)
loads_table.write(loads_table_path.with_suffix('.dat'), format='ascii.fixed_width',
overwrite=True)
if conf.clean_loads_dir:
clean_loads_dir(loads_table)
return loads_table
def clean_loads_dir(loads):
"""Remove load-like files from loads directory if not in ``loads``"""
for file in Path(paths.LOADS_ARCHIVE_DIR()).glob('*.pkl.gz'):
if (re.match(r'[A-Z]{3}\d{4}[A-Z]\.pkl\.gz', file.name)
and file.name[:8] not in loads['name']):
logger.info(f'Removing load file {file}')
file.unlink()
def get_load_dict_from_cmds(load_name, cmds, cmd_events):
"""Update ``load`` dict in place from the backstop commands.
"""
vehicle_stop_events = ('NSM', 'Safe mode', 'Bright star hold')
observing_stop_events = vehicle_stop_events + ('SCS-107',)
load = {'name': load_name,
'cmd_start': cmds['date'][0],
'cmd_stop': cmds['date'][-1],
'observing_stop': '',
'vehicle_stop': ''}
load['rltt'] = cmds.get_rltt()
load['scheduled_stop_time'] = cmds.get_scheduled_stop_time()
# CHANGE THIS to use LOAD_EVENT entries in commands. Or NOT?? Probably
# provides good visibility into what's going on. But this hard-coding is
# annoying.
for cmd_event in cmd_events:
cmd_event_date = cmd_event['Date']
if (cmd_event_date >= load['cmd_start']
and cmd_event_date <= load['cmd_stop']
and cmd_event['Event'] in observing_stop_events):
logger.info(f'{cmd_event["Event"]} at {cmd_event_date} found for {load_name}')
load['observing_stop'] = cmd_event['Date']
if cmd_event['Event'] in vehicle_stop_events:
load['vehicle_stop'] = cmd_event['Date']
if cmd_event['Event'] == 'Load not run' and cmd_event['Params'] == load_name:
logger.info(f'{cmd_event["Event"]} at {cmd_event_date} found for {load_name}')
load['observing_stop'] = '1998:001'
load['vehicle_stop'] = '1998:001'
if cmd_event['Event'] == 'Observing not run' and cmd_event['Params'] == load_name:
logger.info(f'{cmd_event["Event"]} at {cmd_event_date} found for {load_name}')
load['observing_stop'] = '1998:001'
return load
def get_load_cmds_from_occweb_or_local(dir_year_month=None, load_name=None, use_ska_dir=False):
"""Get the load cmds (backstop) for ``load_name`` within ``dir_year_month``
If the backstop file is already available locally, use that. Otherwise, the
file is downloaded from OCCweb and is then parsed and saved as a gzipped
pickle file of the corresponding CommandTable object.
:param dir_year_month: Path
Path to the directory containing the ``load_name`` directory.
:param load_name: str
Load name in the usual format e.g. JAN0521A.
:returns: CommandTable
Backstop commands for the load.
"""
# Determine output file name and make directory if necessary.
loads_dir = paths.LOADS_ARCHIVE_DIR()
loads_dir.mkdir(parents=True, exist_ok=True)
cmds_filename = loads_dir / f'{load_name}.pkl.gz'
# If the output file already exists, read the commands and return them.
if cmds_filename.exists():
logger.info(f'Already have {cmds_filename}')
with gzip.open(cmds_filename, 'rb') as fh:
cmds = pickle.load(fh)
return cmds
if use_ska_dir:
ska_dir = ska_load_dir(load_name)
for filename in ska_dir.glob('CR????????.backstop'):
backstop_text = filename.read_text()
logger.info(f'Got backstop from {filename}')
cmds = parse_backstop_and_write(load_name, cmds_filename, backstop_text)
break
else:
raise ValueError(f'No backstop file found in {ska_dir}')
else: # use OCCweb
load_dir_contents = occweb.get_occweb_dir(dir_year_month / load_name)
for filename in load_dir_contents['Name']:
if re.match(r'CR\d{3}.\d{4}\.backstop', filename):
# Download the backstop file from OCCweb
backstop_text = occweb.get_occweb_page(
dir_year_month / load_name / filename,
cache=conf.cache_loads_in_astropy_cache)
cmds = parse_backstop_and_write(load_name, cmds_filename, backstop_text)
break
else:
raise ValueError(f'Could not find backstop file in {dir_year_month / load_name}')
return cmds
def parse_backstop_and_write(load_name, cmds_filename, backstop_text):
"""Parse ``backstop_text`` and write to ``cmds_filename`` (gzipped pickle).
This sets the ``source`` column to ``load_name`` and removes the
``timeline_id`` column.
"""
backstop_lines = backstop_text.splitlines()
cmds = get_cmds_from_backstop(backstop_lines)
# Fix up the commands to be in the right format
idx = cmds.colnames.index('timeline_id')
cmds.add_column(load_name, index=idx, name='source')
del cmds['timeline_id']
logger.info(f'Saving {cmds_filename}')
with gzip.open(cmds_filename, 'wb') as fh:
pickle.dump(cmds, fh)
return cmds
def update_cmds_archive(*, lookback=None, stop=None, log_level=logging.INFO,
scenario=None, data_root='.', match_prev_cmds=True):
"""Update cmds2.h5 and cmds2.pkl archive files.
This updates the archive though ``stop`` date, where is required that the
``stop`` date is within ``lookback`` days of existing data in the archive.
:param lookback: int, None
Number of days to look back to get recent load commands from OCCweb.
Default is ``conf.default_lookback`` (currently 30).
:param stop: CxoTime-like, None
Stop date to update the archive to. Default is NOW + 21 days.
:param log_level: int
Logging level. Default is ``logging.INFO``.
:param scenario: str, None
Scenario name for loads and command events
:param data_root: str, Path
Root directory where cmds2.h5 and cmds2.pkl are stored. Default is '.'.
:param match_prev_cmds: bool
One-time use flag set to True to update the cmds archive near the v1/v2
transition of APR1420B. See ``utils/migrate_cmds_to_cmds2.py`` for
details.
"""
# For testing allow override of default `stop` value
if stop is None:
stop = os.environ.get('KADI_COMMANDS_DEFAULT_STOP')
# Local context manager for log_level and data_root
kadi_logger = logging.getLogger('kadi')
log_level_orig = kadi_logger.level
with conf.set_temp('commands_dir', data_root):
try:
kadi_logger.setLevel(log_level)
_update_cmds_archive(lookback, stop, match_prev_cmds, scenario, data_root)
finally:
kadi_logger.setLevel(log_level_orig)
def _update_cmds_archive(lookback, stop, match_prev_cmds, scenario, data_root):
"""Do the real work of updating the cmds archive"""
idx_cmds_path = Path(data_root) / 'cmds2.h5'
pars_dict_path = Path(data_root) / 'cmds2.pkl'
if idx_cmds_path.exists():
cmds_arch = load_idx_cmds(version=2, file=idx_cmds_path)
pars_dict = load_pars_dict(version=2, file=pars_dict_path)
else:
# Make an empty cmds archive table and pars dict
cmds_arch = CommandTable(names=list(CommandTable.COL_TYPES),
dtype=list(CommandTable.COL_TYPES.values()))
del cmds_arch['timeline_id']
pars_dict = {}
match_prev_cmds = False # No matching of previous commands
cmds_recent = update_archive_and_get_cmds_recent(
scenario=scenario, stop=stop, lookback=lookback, cache=False,
pars_dict=pars_dict)
if match_prev_cmds:
idx0_arch, idx0_recent = get_matching_block_idx(cmds_arch, cmds_recent)
else:
idx0_arch = len(cmds_arch)
idx0_recent = 0
# Convert from `params` col of dicts to index into same params in pars_dict.
for cmd in cmds_recent:
cmd['idx'] = get_par_idx_update_pars_dict(pars_dict, cmd)
# If the length of the updated table will be the same as the existing table.
# For the command below the no-op logic should be clear:
# cmds_arch = vstack([cmds_arch[:idx0_arch], cmds_recent[idx0_recent:]])
if idx0_arch == len(cmds_arch) and idx0_recent == len(cmds_recent):
logger.info(f'No new commands found, skipping writing {idx_cmds_path}')
return
# Merge the recent commands with the existing archive.
logger.info(f'Appending {len(cmds_recent) - idx0_recent} new commands after '
f'removing {len(cmds_arch) - idx0_arch} from existing archive')
logger.info(f' starting with cmds_arch[:{idx0_arch}] and adding '
f'cmds_recent[{idx0_recent}:{len(cmds_recent)}]')
# Remove params column before stacking and saving
del cmds_recent['params']
del cmds_arch['params']
# Save the updated archive and pars_dict.
cmds_arch_new = vstack_exact([cmds_arch[:idx0_arch], cmds_recent[idx0_recent:]])
logger.info(f'Writing {len(cmds_arch_new)} commands to {idx_cmds_path}')
cmds_arch_new.write(str(idx_cmds_path), path='data', format='hdf5', overwrite=True)
logger.info(f'Writing updated pars_dict to {pars_dict_path}')
pickle.dump(pars_dict, open(pars_dict_path, 'wb'))
def get_matching_block_idx(cmds_arch, cmds_recent):
# Find place in archive where the recent commands start.
idx_arch_recent = cmds_arch.find_date(cmds_recent['date'][0])
logger.info('Selecting commands from cmds_arch[{}:]'.format(idx_arch_recent))
cmds_arch_recent = cmds_arch[idx_arch_recent:]
# Define the column names that specify a complete and unique row
key_names = ('date', 'type', 'tlmsid', 'scs', 'step', 'source', 'vcdu')
recent_vals = [tuple(
row[x].decode('ascii') if isinstance(row[x], bytes) else str(row[x])
for x in key_names)
for row in cmds_arch_recent]
arch_vals = [tuple(
row[x].decode('ascii') if isinstance(row[x], bytes) else str(row[x])
for x in key_names)
for row in cmds_recent]
diff = difflib.SequenceMatcher(a=recent_vals, b=arch_vals, autojunk=False)
matching_blocks = diff.get_matching_blocks()
logger.info('Matching blocks for (a) recent commands and (b) existing HDF5')
for block in matching_blocks:
logger.info(' {}'.format(block))
opcodes = diff.get_opcodes()
logger.info('Diffs between (a) recent commands and (b) existing HDF5')
for | |
0):
for yp in [-2, -1, 0, 1, 2]:
if (y + yp < last_img.shape[1]) and (y + yp >= 0):
i_xpyp = last_img[x + xp, y + yp]
if i_xpyp > 0:
new_val = min(i_xy, i_xpyp, cur_img[x, y])
if cur_img[x, y] != new_val:
cur_img[x, y] = new_val
img_list += [cur_img]
if (cur_img == last_img).all():
print("Done")
break
else:
print(
"Iteration",
iteration,
"Groups",
len(np.unique(cur_img[cur_img > 0].ravel())),
"Changes",
np.sum(cur_img != last_img),
)
last_img = cur_img
fig, c_ax = plt.subplots(1, 1, figsize=(5, 5), dpi=150)
def update_frame(i):
plt.cla()
sns.heatmap(
img_list[i],
annot=True,
fmt="d",
cmap="nipy_spectral",
ax=c_ax,
cbar=False,
vmin=img_list[0].min(),
vmax=img_list[0].max(),
)
c_ax.set_title(
"Iteration #{}, Groups {}".format(
i + 1, len(np.unique(img_list[i][img_list[i] > 0].ravel()))
)
)
# write animation frames
anim_code = FuncAnimation(
fig, update_frame, frames=len(img_list) - 1, interval=500, repeat_delay=1000
).to_html5_video()
plt.close("all")
HTML(anim_code)
# # Or a smaller kernel
# By using a smaller kernel (in this case where $\sqrt{x^2+y^2}<=1$, we cause the number of iterations to fill to increase and prevent the last pixel from being grouped since it is only connected diagonally
#
# | | | |
# |--:|--:|--:|
# | 0| 1| 0|
# | 1| 1| 1|
# | 0| 1| 0|
#
# In[14]:
last_img = idx_img.copy()
img_list = [last_img]
for iteration in range(99):
cur_img = last_img.copy()
for x in range(last_img.shape[0]):
for y in range(last_img.shape[1]):
if last_img[x, y] > 0:
i_xy = last_img[x, y]
for xp in [-1, 0, 1]:
if (x + xp < last_img.shape[0]) and (x + xp >= 0):
for yp in [-1, 0, 1]:
if np.abs(xp) + np.abs(yp) <= 1:
if (y + yp < last_img.shape[1]) and (y + yp >= 0):
i_xpyp = last_img[x + xp, y + yp]
if i_xpyp > 0:
new_val = min(i_xy, i_xpyp, cur_img[x, y])
if cur_img[x, y] != new_val:
cur_img[x, y] = new_val
img_list += [cur_img]
if (cur_img == last_img).all():
print("Done")
break
else:
print(
"Iteration",
iteration,
"Groups",
len(np.unique(cur_img[cur_img > 0].ravel())),
"Changes",
np.sum(cur_img != last_img),
)
last_img = cur_img
fig, c_ax = plt.subplots(1, 1, figsize=(6, 6), dpi=100)
def update_frame(i):
plt.cla()
sns.heatmap(
img_list[i],
annot=True,
fmt="d",
cmap="nipy_spectral",
ax=c_ax,
cbar=False,
vmin=img_list[0].min(),
vmax=img_list[0].max(),
)
c_ax.set_title(
"Iteration #{}, Groups {}".format(
i + 1, len(np.unique(img_list[i][img_list[i] > 0].ravel()))
)
)
# write animation frames
anim_code = FuncAnimation(
fig, update_frame, frames=len(img_list) - 1, interval=500, repeat_delay=1000
).to_html5_video()
plt.close("all")
HTML(anim_code)
# # Component Labeling: Beyond
#
#
# Now all the voxels which are connected have the same label. We can then perform simple metrics like
#
# - counting the number of voxels in each label to estimate volume.
# - looking at the change in volume during erosion or dilation to estimate surface area
# ### What we would like to to do
#
# - Count the cells
# - Say something about the cells
# - Compare the cells in this image to another image
# - But where do we start?
#
# # COV: With a single object
#
# $$ I_{id}(x,y) =
# \begin{cases}
# 1, & L(x,y) = id \\
# 0, & \text{otherwise}
# \end{cases}$$
# In[15]:
from IPython.display import Markdown
from skimage.io import imread
from skimage.morphology import label
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
seg_img = imread("ext-figures/aachen_label.png") == 26
seg_img = seg_img[::4, ::4]
seg_img = seg_img[110:130:2, 370:420:3]
seg_img[9, 1] = 1
lab_img = label(seg_img)
_, (ax1) = plt.subplots(1, 1, figsize=(7, 7), dpi=150)
sns.heatmap(lab_img, annot=True, fmt="d", ax=ax1, cmap="nipy_spectral", cbar=False)
# ### Define a center
# $$ \bar{x} = \frac{1}{N} \sum_{\vec{v}\in I_{id}} \vec{v}\cdot\vec{i} $$
# $$ \bar{y} = \frac{1}{N} \sum_{\vec{v}\in I_{id}} \vec{v}\cdot\vec{j} $$
# $$ \bar{z} = \frac{1}{N} \sum_{\vec{v}\in I_{id}} \vec{v}\cdot\vec{k} $$
#
# In[16]:
x_coord, y_coord = [], []
for x in range(seg_img.shape[0]):
for y in range(seg_img.shape[1]):
if seg_img[x, y] == 1:
x_coord += [x]
y_coord += [y]
print("x,y coordinates", list(zip(x_coord, y_coord)))
Markdown("$\\bar{x} = %2.2f, \\bar{y} = %2.2f $" % (np.mean(x_coord), np.mean(y_coord)))
# # COM: With a single object
#
# If the gray values are kept (or other meaningful ones are used), this can be seen as a weighted center of volume or center of mass (using $I_{gy}$ to distinguish it from the labels)
#
# ### Define a center
# $$ \Sigma I_{gy} = \frac{1}{N} \sum_{\vec{v}\in I_{id}} I_{gy}(\vec{v}) $$
# $$ \bar{x} = \frac{1}{\Sigma I_{gy}} \sum_{\vec{v}\in I_{id}} (\vec{v}\cdot\vec{i}) I_{gy}(\vec{v}) $$
# $$ \bar{y} = \frac{1}{\Sigma I_{gy}} \sum_{\vec{v}\in I_{id}} (\vec{v}\cdot\vec{j}) I_{gy}(\vec{v}) $$
# $$ \bar{z} = \frac{1}{\Sigma I_{gy}} \sum_{\vec{v}\in I_{id}} (\vec{v}\cdot\vec{k}) I_{gy}(\vec{v}) $$
#
# In[17]:
from IPython.display import Markdown, display
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
xx, yy = np.meshgrid(np.linspace(0, 10, 50), np.linspace(0, 10, 50))
gray_img = 100 * (np.abs(xx * yy - 7) + np.square(yy - 4)) + 0.25
gray_img *= np.abs(xx - 5) < 3
gray_img *= np.abs(yy - 5) < 3
gray_img[gray_img > 0] += 5
seg_img = (gray_img > 0).astype(int)
_, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 7), dpi=150)
sns.heatmap(gray_img, ax=ax1, cmap="bone_r", cbar=True)
ax1.set_title("Intensity Image")
sns.heatmap(seg_img, ax=ax2, cmap="bone", cbar=False)
ax2.set_title("Segmented Image")
# In[18]:
x_coord, y_coord, i_val = [], [], []
for x in range(seg_img.shape[0]):
for y in range(seg_img.shape[1]):
if seg_img[x, y] == 1:
x_coord += [x]
y_coord += [y]
i_val += [gray_img[x, y]]
x_coord = np.array(x_coord)
y_coord = np.array(y_coord)
i_val = np.array(i_val)
cov_x = np.mean(x_coord)
cov_y = np.mean(y_coord)
display(
Markdown(
"""## Center of Volume:
- $\\bar{x} = %2.2f$
- $\\bar{y} = %2.2f $"""
% (cov_x, cov_y)
)
)
com_x = np.sum(x_coord * i_val) / np.sum(i_val)
com_y = np.sum(y_coord * i_val) / np.sum(i_val)
display(
Markdown(
"""## Center of Mass:
- $\\bar{x}_m = %2.2f$
- $\\bar{y}_m = %2.2f $"""
% (com_x, com_y)
)
)
_, (ax1) = plt.subplots(1, 1, figsize=(7, 7), dpi=150)
ax1.matshow(gray_img, cmap="bone_r")
ax1.set_title("Intensity Image")
ax1.plot([cov_y], [cov_x], "ro", label="COV", markersize=20)
ax1.plot([com_y], [com_x], "bo", label="COM", markersize=20)
ax1.legend()
# In[19]:
from skimage.measure import regionprops
help(regionprops)
# In[20]:
from skimage.measure import regionprops
all_regs = regionprops(seg_img, intensity_image=gray_img)
for c_reg in all_regs:
display(Markdown("# Region: {}".format(c_reg.label)))
for k in dir(c_reg):
if not k.startswith("_") and ("image" not in k):
display(Markdown("- {} {}".format(k, getattr(c_reg, k))))
# # Extents: With a single object
#
# Exents or caliper lenghts are the size of the object in a given direction. Since the coordinates of our image our $x$ and $y$ the extents are calculated in these directions
#
# Define extents as the minimum and maximum values along the projection of the shape in each direction
# $$ \text{Ext}_x = \left\{ \forall \vec{v}\in I_{id}: max(\vec{v}\cdot\vec{i})-min(\vec{v}\cdot\vec{i}) \right\} $$
# $$ \text{Ext}_y = \left\{ \forall \vec{v}\in I_{id}: max(\vec{v}\cdot\vec{j})-min(\vec{v}\cdot\vec{j}) \right\} $$
# $$ \text{Ext}_z = \left\{ \forall \vec{}\in I_{id}: max(\vec{v}\cdot\vec{k})-min(\vec{v}\cdot\vec{k}) \right\} $$
#
# - Lots of information about each object now
# - But, I don't think a biologist has ever asked "How long is a cell in the $x$ direction? how about $y$?"
# In[21]:
from IPython.display import Markdown
from skimage.io import imread
from skimage.morphology import label
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
seg_img = imread("ext-figures/aachen_label.png") == 26
seg_img = seg_img[::4, ::4]
seg_img = seg_img[110:130:2, 378:420:3] > 0
seg_img = np.pad(seg_img, 3, mode="constant")
_, (ax1) = plt.subplots(1, 1, figsize=(7, 7), dpi=150)
ax1.matshow(seg_img, cmap="bone_r")
# In[22]:
x_coord, y_coord = [], []
for x in range(seg_img.shape[0]):
for y in range(seg_img.shape[1]):
if seg_img[x, y] == 1:
x_coord += [x]
y_coord += [y]
xmin = np.min(x_coord)
xmax = np.max(x_coord)
ymin = np.min(y_coord)
ymax = np.max(y_coord)
print("X -> ", "Min:", xmin, "Max:", xmax)
print("Y -> ", "Min:", ymin, "Max:", ymax)
# In[23]:
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
_, (ax1) = plt.subplots(1, 1, figsize=(7, 7), dpi=150)
ax1.matshow(seg_img, cmap="bone_r")
xw = xmax - xmin
yw = ymax - ymin
c_bbox = [Rectangle(xy=(ymin, xmin), width=yw, height=xw)]
c_bb_patch = PatchCollection(
c_bbox, facecolor="none", edgecolor="red", linewidth=4, alpha=0.5
)
ax1.add_collection(c_bb_patch)
# # Concrete Example
# So how can we begin to apply the tools we have developed. We take the original car scene from before.
# In[24]:
from skimage.measure import regionprops, label
from skimage.io import imread
import numpy as np
import matplotlib.pyplot as plt
car_img = np.clip(imread("ext-figures/aachen_img.png")[75:150] * 2.0, 0, 255).astype(
np.uint8
)
lab_img = label(imread("ext-figures/aachen_label.png")[::4, ::4] == 26)[75:150]
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(20, 8))
ax1.imshow(car_img)
ax1.set_title("Input Image")
plt.colorbar(ax2.imshow(lab_img, cmap="nipy_spectral"))
ax2.set_title("Labeled Image")
# # Shape Analysis
# We can perform shape analysis on the image and calculate basic shape parameters for each object
# In[25]:
from skimage.measure import regionprops
from matplotlib.patches import Rectangle
from matplotlib.collections import PatchCollection
# shape analysis
all_regions = regionprops(lab_img)
fig, ax1 = plt.subplots(1, 1, figsize=(12, 6), dpi=100)
ax1.imshow(car_img)
print("Found ", len(all_regions), "regions")
bbox_list = []
for c_reg in all_regions:
ax1.plot(c_reg.centroid[1], c_reg.centroid[0], "o", markersize=5)
bbox_list += [
Rectangle(
xy=(c_reg.bbox[1], c_reg.bbox[0]),
width=c_reg.bbox[3] - c_reg.bbox[1],
height=c_reg.bbox[2] - c_reg.bbox[0],
)
]
c_bb_patch = PatchCollection(
bbox_list, facecolor="none", edgecolor="red", linewidth=4, alpha=0.5
)
ax1.add_collection(c_bb_patch)
# # Statistics
# We can then generate a table full of these basic parameters for each object. In this case, we add color as an additional description
# In[26]:
from sklearn.neighbors import KNeighborsClassifier
import webcolors
import pandas as pd
from skimage.morphology import erosion, disk
def ed_img(in_img):
# shrink an image to a few pixels
cur_img = in_img.copy()
while cur_img.max() > 0:
last_img = cur_img
cur_img = erosion(cur_img, disk(1))
return last_img
# guess color name based on rgb value
color_name_class = | |
("The storage engine for the table doesn't"
" support repair"),
'Msg_type': 'note',
'Op': 'repair'
}])
self.assertEqual(ret, expected)
ret = self.run_function(
'mysql.db_optimize',
name=dbname,
connection_user=self.user,
connection_pass=self.password
)
expected = []
for tablename, engine in iter(sorted(tablenames.iteritems())):
if engine is 'MYISAM':
expected.append([{
'Table': dbname+'.'+tablename,
'Msg_text': 'OK',
'Msg_type': 'status',
'Op': 'optimize'
}])
elif engine is 'InnoDB':
expected.append([{
'Table': dbname+'.'+tablename,
'Msg_text': ("Table does not support optimize, "
"doing recreate + analyze instead"),
'Msg_type': 'note',
'Op': 'optimize'
},
{
'Table': dbname+'.'+tablename,
'Msg_text': 'OK',
'Msg_type': 'status',
'Op': 'optimize'
}])
elif engine is 'MEMORY':
expected.append([{
'Table': dbname+'.'+tablename,
'Msg_text': ("The storage engine for the table doesn't"
" support optimize"),
'Msg_type': 'note',
'Op': 'optimize'
}])
self.assertEqual(ret, expected)
# Teardown, remove database
ret = self.run_function(
'mysql.db_remove',
name=dbname,
connection_user=self.user,
connection_pass=self.password
)
self.assertEqual(True, ret)
@skipIf(
NO_MYSQL,
'Please install MySQL bindings and a MySQL Server before running'
'MySQL integration tests.'
)
class MysqlModuleUserTest(integration.ModuleCase,
integration.SaltReturnAssertsMixIn):
'''
User Creation and connection tests
'''
user = 'root'
password = '<PASSWORD>'
@destructiveTest
def setUp(self):
'''
Test presence of MySQL server, enforce a root password
'''
super(MysqlModuleUserTest, self).setUp()
NO_MYSQL_SERVER = True
# now ensure we know the mysql root password
# one of theses two at least should work
ret1 = self.run_state(
'cmd.run',
name='mysqladmin --host="localhost" -u '
+ self.user
+ ' flush-privileges password "'
+ self.password
+ '"'
)
ret2 = self.run_state(
'cmd.run',
name='mysqladmin --host="localhost" -u '
+ self.user
+ ' --password="'
+ self.password
+ '" flush-privileges password "'
+ self.password
+ '"'
)
key, value = ret2.popitem()
if value['result']:
NO_MYSQL_SERVER = False
else:
self.skipTest('No MySQL Server running, or no root access on it.')
def _userCreationLoop(self,
uname,
host,
password=None,
new_password=None,
new_password_hash=None,
**kwargs):
'''
Perform some tests around creation of the given user
'''
# First silently remove it, in case of
ret = self.run_function(
'mysql.user_remove',
user=uname,
host=host,
**kwargs
)
# creation
ret = self.run_function(
'mysql.user_create',
user=uname,
host=host,
password=password,
**kwargs
)
self.assertEqual(True, ret, ('Calling user_create on'
' user {0!r} did not return True: {1}').format(
uname,
repr(ret)
))
# double creation failure
ret = self.run_function(
'mysql.user_create',
user=uname,
host=host,
password=password,
**kwargs
)
self.assertEqual(False, ret, ('Calling user_create a second time on'
' user {0!r} did not return False: {1}').format(
uname,
repr(ret)
))
# Alter password
if not new_password is None or new_password_hash is not None:
ret = self.run_function(
'mysql.user_chpass',
user=uname,
host=host,
password=<PASSWORD>,
password_hash=new_password_hash,
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8',
saltenv={"LC_ALL": "en_US.utf8"}
)
self.assertEqual(True, ret, ('Calling user_chpass on'
' user {0!r} did not return True: {1}').format(
uname,
repr(ret)
))
def _chck_userinfo(self, user, host, check_user, check_hash):
'''
Internal routine to check user_info returned results
'''
ret = self.run_function(
'mysql.user_info',
user=user,
host=host,
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8',
saltenv={"LC_ALL": "en_US.utf8"}
)
if not isinstance(ret, dict):
raise AssertionError(
'Unexpected result while retrieving user_info for {0!r}'.format(
user
)
)
self.assertEqual(ret['Host'], host)
self.assertEqual(ret['Password'], check_hash)
self.assertEqual(ret['User'], check_user)
def _chk_remove_user(self, user, host, **kwargs):
'''
Internal routine to check user_remove
'''
ret = self.run_function(
'mysql.user_remove',
user=user,
host=host,
**kwargs
)
self.assertEqual(True, ret, ('Assertion failed while removing user'
' {0!r} on host {1!r}: {2}').format(
user,
host,
repr(ret)
))
@destructiveTest
def test_user_management(self):
'''
Test various users creation settings
'''
# Create users with rights on this database
# and rights on other databases
user1 = "user '1"
user1_pwd = '<PASSWORD>'
user1_pwd_hash = <PASSWORD>'
# this is : user "2'標
user2 = 'user "2\'\xe6\xa8\x99'
user2_pwd = '<PASSWORD>'
user2_pwd_<PASSWORD> = <PASSWORD>'
user3 = 'user "3;,?:@=&/'
user3_pwd = 'user "3;,?:@=&/'
user3_pwd_<PASSWORD> = <PASSWORD>'
# this is : user ":=;4標 in unicode instead of utf-8
# if unicode char is counted as 1 char we hit the max user
# size (16)
user4 = u'user":;,?:@=&/4\u6a19'
user4_utf8 = 'user":;,?:@=&/4\xe6\xa8\x99'
user4_pwd = 'user "4;,?:@=&/'
user4_pwd_hash = <PASSWORD>'
user5 = u'user ``"5'
user5_utf8 = 'user ``"5'
# this is 標標標\
user5_pwd = <PASSWORD>\xa8\x99\\'
# this is password('標標\\')
user5_pwd_hash = <PASSWORD>'
user6 = u'user %--"6'
user6_utf8 = 'user %--"6'
# this is : --'"% SIX標b
user6_pwd_u = u' --\'"% S<PASSWORD>'
user6_pwd_utf8 = ' --\'"% SIX\xe6\xa8\x99b'
# this is password(' --\'"% SIX標b')
user6_pwd_hash = <PASSWORD>'
self._userCreationLoop(
uname=user1,
host='localhost',
password='<PASSWORD>',
new_password='<PASSWORD>`\'"1b',
connection_user=self.user,
connection_pass=self.password
)
# Now check for results
ret = self.run_function(
'mysql.user_exists',
user=user1,
host='localhost',
password=<PASSWORD>,
password_hash=<PASSWORD>,
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8',
saltenv={"LC_ALL": "en_US.utf8"}
)
self.assertEqual(True, ret, ('Testing final user {0!r} on host {1!r}'
' existence failed').format(user1, 'localhost')
)
self._userCreationLoop(
uname=user2,
host='localhost',
password=None,
# this is his name hash : user "2'標
password_hash='*<PASSWORD>',
# and this is the same with a 'b' added
new_password_hash=<PASSWORD>2_pwd_<PASSWORD>,
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8',
saltenv={"LC_ALL": "en_US.utf8"}
)
# user2 can connect from other places with other password
self._userCreationLoop(
uname=user2,
host='10.0.0.1',
allow_passwordless=True,
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8',
saltenv={"LC_ALL": "en_US.utf8"}
)
self._userCreationLoop(
uname=user2,
host='10.0.0.2',
allow_passwordless=True,
unix_socket=True,
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8',
saltenv={"LC_ALL": "en_US.utf8"}
)
# Now check for results
ret = self.run_function(
'mysql.user_exists',
user=user2,
host='localhost',
password=<PASSWORD>,
password_hash=<PASSWORD>,
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8',
saltenv={"LC_ALL": "en_US.utf8"}
)
self.assertEqual(True, ret, ('Testing final user {0!r} on host {1!r}'
' failed').format(user2, 'localhost')
)
ret = self.run_function(
'mysql.user_exists',
user=user2,
host='10.0.0.1',
allow_passwordless=True,
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8',
saltenv={"LC_ALL": "en_US.utf8"}
)
self.assertEqual(True, ret, ('Testing final user {0!r} on host {1!r}'
' without password failed').format(user2, '10.0.0.1')
)
ret = self.run_function(
'mysql.user_exists',
user=user2,
host='10.0.0.2',
allow_passwordless=True,
unix_socket=True,
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8',
saltenv={"LC_ALL": "en_US.utf8"}
)
self.assertEqual(True, ret, ('Testing final user {0!r} on host {1!r}'
' without password failed').format(user2, '10.0.0.2')
)
# Empty password is not passwordless (or is it a bug?)
self._userCreationLoop(
uname=user3,
host='localhost',
password='',
connection_user=self.user,
connection_pass=self.password
)
# user 3 on another host with a password
self._userCreationLoop(
uname=user3,
host='%',
password='<PASSWORD>',
new_password=<PASSWORD>,
connection_user=self.user,
connection_pass=self.password
)
# Now check for results
ret = self.run_function(
'mysql.user_exists',
user=user3,
host='localhost',
password='',
connection_user=self.user,
connection_pass=self.password
)
self.assertEqual(True, ret, ('Testing final user {0!r} on host {1!r}'
' without empty password failed').format(user3, 'localhost')
)
ret = self.run_function(
'mysql.user_exists',
user=user3,
host='%',
password=<PASSWORD>,
connection_user=self.user,
connection_pass=self.password
)
self.assertEqual(True, ret, ('Testing final user {0!r} on host {1!r}'
' with password failed').format(user3, '%')
)
# check unicode name, and password > password_hash
self._userCreationLoop(
uname=user4,
host='%',
password=<PASSWORD>,
# this is password('<PASSWORD>')
password_hash='*<PASSWORD>0DBF',
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8',
saltenv={"LC_ALL": "en_US.utf8"}
)
# Now check for results
ret = self.run_function(
'mysql.user_exists',
user=user4_utf8,
host='%',
password=<PASSWORD>,
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8',
saltenv={"LC_ALL": "en_US.utf8"}
)
self.assertEqual(True, ret, ('Testing final user {0!r} on host {1!r}'
' with password take from password and not password_hash'
' failed').format(user4_utf8, '%')
)
self._userCreationLoop(
uname=user5,
host='localhost',
password='\<PASSWORD>',
new_password=<PASSWORD>,
unix_socket=True,
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8',
saltenv={"LC_ALL": "en_US.utf8"}
)
ret = self.run_function(
'mysql.user_exists',
user=user5_utf8,
host='localhost',
password=<PASSWORD>,
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8',
saltenv={"LC_ALL": "en_US.utf8"}
)
self.assertEqual(True, ret, ('Testing final user {0!r} on host {1!r}'
' with utf8 password failed').format(user5_utf8, 'localhost')
)
# for this one we give password in unicode and check it in utf-8
self._userCreationLoop(
uname=user6,
host='10.0.0.1',
password=' <PASSWORD>',
new_password=user6_pwd_u,
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8',
saltenv={"LC_ALL": "en_US.utf8"}
)
# Now check for results
ret = self.run_function(
'mysql.user_exists',
user=user6_utf8,
host='10.0.0.1',
password=<PASSWORD>,
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8',
saltenv={"LC_ALL": "en_US.utf8"}
)
self.assertEqual(True, ret, ('Testing final user {0!r} on host {1!r}'
' with unicode password failed').format(user6_utf8, '10.0.0.1')
)
# Final result should be:
# mysql> select Host, User, Password from user where user like 'user%';
# +--------------------+-----------+-------------------------------+
# | User | Host | Password |
# +--------------------+-----------+-------------------------------+
# | user "2'標 | 10.0.0.1 | |
# | user "2'標 | 10.0.0.2 | |
# | user "2'標 | localhost | *3A38A7B94B0(...)60B7AA38FE61 |
# | user "3;,?:@=&/ | % | *AA3B1D4105(...)47EA349E1FD7D |
# | user "3;,?:@=&/ | localhost | |
# | user %--"6 | 10.0.0.1 | *90AE800593(...)E42D17EEA5369 |
# | user '1 | localhost | *4DF33B3B1(...)327877FAB2F4BA |
# | user ``"5 | localhost | *3752E65CD(...)FC6C998B12C376 |
# | user":;,?:@=&/4標 | % | *FC8EF8DBF(...)7478D5CF3DD57C |
# +--------------------+-----------+-------------------------------+
self._chck_userinfo(user=user2,
host='10.0.0.1',
check_user=user2,
check_hash=''
)
self._chck_userinfo(user=user2,
host='10.0.0.2',
check_user=user2,
check_hash=''
)
self._chck_userinfo(user=user2,
host='localhost',
check_user=user2,
check_hash=user2_pwd_hash
)
self._chck_userinfo(user=user3,
host='%',
check_user=user3,
check_hash=user3_pwd_hash
)
self._chck_userinfo(user=user3,
host='localhost',
check_user=user3,
check_hash=''
)
self._chck_userinfo(user=user4,
host='%',
check_user=user4_utf8,
check_hash=user4_pwd_hash
)
self._chck_userinfo(user=user6,
host='10.0.0.1',
check_user=user6_utf8,
check_hash=user6_pwd_hash
)
self._chck_userinfo(user=user1,
host='localhost',
check_user=user1,
check_hash=user1_pwd_hash
)
self._chck_userinfo(user=user5,
host='localhost',
check_user=user5_utf8,
check_hash=user5_pwd_hash
)
# check user_list function
ret = self.run_function(
'mysql.user_list',
connection_user=self.user,
connection_pass=self.password,
connection_charset='utf8',
saltenv={"LC_ALL": "en_US.utf8"}
)
self.assertIn({'Host': 'localhost', 'User': user1}, ret)
self.assertIn({'Host': 'localhost', 'User': user2}, ret)
self.assertIn({'Host': '10.0.0.1', 'User': user2}, ret)
self.assertIn({'Host': '10.0.0.2', 'User': user2}, ret)
self.assertIn({'Host': '%', 'User': user3}, ret)
self.assertIn({'Host': 'localhost', 'User': user3}, ret)
self.assertIn({'Host': '%', 'User': user4_utf8}, ret)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.