input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
from operator import itemgetter
from functools import reduce
from launcher import parse_file
from frequency import get_freq_bounds_ryzen
def calc_share_ratios(list_prio, cores):
""" Basic Shares calculator"""
# Shares calculation logic
max_shares_per_app = 100
# collect all high priority apps
shares_per_app = None
if len(list_prio) <= cores:
total_shares = sum([r[2] for r in list_prio])
shares_per_app = [r[2]/total_shares for r in list_prio]
print(total_shares, shares_per_app)
elif len(list_prio) > cores:
# we have more apps to run than core
# Option 1 assume all High priority apps have the same shares
# Hence all of them will run under the same limit and
# linux scheduler can take care of them
shares_per_app = [max_shares_per_app/(max_shares_per_app*cores)]*cores
# we are done we can return shares_per_app
return shares_per_app
def calc_share_ratios_2(list_prio, inc_cores, ratios):
sum_shares = 0
for i, work in enumerate(list_prio):
if inc_cores[i]:
sum_shares += work[2]
shares = [r[2]/sum_shares if inc_cores[i] else ratios[i] for i, r in enumerate(list_prio)]
print("r2", sum_shares, shares)
return shares
def allocate_shares_loop(limit, _proc, max_per_core, cores, spill):
shares_app = calc_share_ratios(_proc, cores)
left_over_pwr = limit
limit_per_core = None
# allocate
if limit > min(len(_proc),cores)*max_per_core:
# Have more power than needs allocation
limit_per_core = [max_per_core]*min(len(_proc), cores)
left_over_pwr = left_over_pwr - min(len(_proc), cores)*max_per_core
return left_over_pwr, limit_per_core, shares_app
# Allocate and check
limit_per_core = [min(r*limit, max_per_core) for r in shares_app]
cores_to_include = [False if r >= max_per_core else True for r in limit_per_core]
count = 0
# Check for leftover power
left_over_pwr = left_over_pwr - sum(limit_per_core)
print("FIRST ALLOCATION", cores_to_include, limit_per_core, left_over_pwr)
while int(round(left_over_pwr, 0)) > spill:
# we have left over power
if int(round(left_over_pwr, 0) <= spill):
print("END")
break
if reduce((lambda x, y: x and y), cores_to_include):
left_over_pwr = left_over_pwr - sum(limit_per_core)
break
# redistribute this power among the apps that didn't recieve it in the last
# round of proportional share calculations
# find apps and shares to consider
ncores = len([x for x in cores_to_include if x])
if ncores == 1:
limit_per_core = [min(l+(1*left_over_pwr), max_per_core) if c else l for l, c in zip(limit_per_core, cores_to_include)]
left_over_pwr = left_over_pwr - sum(limit_per_core)
break
elif ncores < 1:
# Excess power
break
else:
shares_app = calc_share_ratios_2(_proc, shares_app, cores_to_include)
limit_per_core = [min(l+(r*left_over_pwr), max_per_core) if c else l for l, r, c in zip(limit_per_core, shares_app, cores_to_include)]
print(str(count), left_over_pwr, limit_per_core, shares_app)
count += 1
cores_to_include = [False if r >= max_per_core else True for r in limit_per_core]
left_over_pwr = left_over_pwr - sum(limit_per_core)
print("Entity left = ", left_over_pwr)
left_over_pwr = 0 if left_over_pwr < 0 else left_over_pwr
return left_over_pwr, limit_per_core, shares_app
def first_allocation(power, cores, app_file):
list_procs = parse_file(app_file)
list_procs.sort(key=itemgetter(3))
limit = power*1000
max_per_core = 10000
# cores = 4
high = [r for r in list_procs if r[3] < 0]
low = [r for r in list_procs if r[3] > 0]
high_set = None
low_set = None
if high is None:
# we got no High Powe applications
high_set = None
extra_pwr = limit
else:
high.sort(key=itemgetter(2))
extra_pwr, hi_limits, shares_high = allocate_shares_loop(limit, high, max_per_core, cores, 0)
print("Power left = ", extra_pwr)
high_set = (hi_limits, shares_high, high)
cores_avil = cores if high is None else cores-len(high)
# if int(round(extra_pwr, 0)) > 0 and not(low is None) and cores_avil > 0:
if not(low is None) and cores_avil > 0:
# We have power for low priority
# First check if we have cores avialable
low.sort(key=itemgetter(2))
if int(round(extra_pwr, 0)) > 0 :
extra_pwr, lo_limits, shares_lo = allocate_shares_loop(extra_pwr, low, max_per_core, cores_avil, 0)
else:
# get case for 1 W per avialable core
_,lo_limits, shares_lo = allocate_shares_loop(1000*cores_avil, low, max_per_core, cores_avil, 0)
extra_pwr = None
low_set = (lo_limits, shares_lo, low)
return extra_pwr, high_set, low_set
def get_list_limits(power, cores, app_file):
extra_power, high_set, low_set = first_allocation(power, cores, app_file)
all_limits = None
all_apps = None
if not high_set is None:
#We have high_prio apps
all_limits = high_set[0]
all_apps = high_set[2]
if not low_set is None:
#We have low_prio apps
if not(extra_power is None):
all_limits += low_set[0]
all_apps += low_set[2]
return all_apps, all_limits
def first_freq_allocation(power_limit, cores, app_file):
list_procs = parse_file(app_file)
list_procs.sort(key=itemgetter(3))
bounds = get_freq_bounds_ryzen()
high = [r for r in list_procs if r[3] < 0]
low = [r for r in list_procs if r[3] > 0]
TDP = 85*1000
alpha = 1
if power_limit*1000 < TDP:
alpha = power_limit*1000/float(TDP)
# WARN: hard-coding max frequency for 10 active cores
# add code to read directly from relevant MSR's
# if len(list_procs) > 10:
max_turbo = 3400000
max_per_core = min(get_freq_bounds_ryzen()[1],max_turbo)
freq_limit = alpha * max_per_core * cores
high_set = None
low_set = None
extra_freq = freq_limit
print("FREQ CONFIG: ", power_limit, freq_limit, alpha, max_per_core, max_turbo)
if high is None:
# we got no High Powe applications
high_set = None
extra_freq = freq_limit
else:
high.sort(key=itemgetter(2))
extra_freq, hi_limits, shares_high = allocate_shares_loop(extra_freq, high, max_per_core, min(cores, len(high)), 100000)
# WARN: Hack for fixing lower limit for frequency
hi_limits = [max(h, bounds[0]) for h in hi_limits]
print("freq left = ", extra_freq)
high_set = (hi_limits, shares_high, high)
# First check if we have cores avialable
cores_avil = cores if high is None else cores-len(high)
# if int(round(extra_pwr, 0)) > 0 and not(low is None) and cores_avil > 0:
if not(low is None) and cores_avil > 0:
# We have power for low priority
low.sort(key=itemgetter(2))
if int(round(extra_freq, 0)) > 0 :
extra_freq, lo_limits, shares_lo = allocate_shares_loop(extra_freq, low, max_per_core, cores_avil, 100000)
else:
# get case for 800 MHz per avialable core
_,lo_limits, shares_lo = allocate_shares_loop(800000*cores_avil, low, max_per_core, cores_avil, 100000)
# WARN: Hack for fixing lower limit for frequency
lo_limits = [max(l, bounds[0]) for l in lo_limits]
extra_freq = None
low_set = (lo_limits, shares_lo, low)
return extra_freq, high_set, low_set
def first_perf_allocation(power_limit, cores, app_file):
list_procs = parse_file(app_file)
list_procs.sort(key=itemgetter(3))
high = [r for r in list_procs if r[3] < 0]
low = [r for r in list_procs if r[3] > 0]
TDP = 85*1000
alpha = 1
if power_limit*1000 < TDP:
alpha = (power_limit*1000)/float(TDP)
max_per_core = 100
perf_limit = alpha * max_per_core * cores
high_set = None
low_set = None
extra_freq = perf_limit
print("PERF CONFIG: ", power_limit*1000, perf_limit, alpha, max_per_core)
if high is None:
# we got no High Powe applications
high_set = None
extra_freq = perf_limit
else:
high.sort(key=itemgetter(2))
extra_freq, hi_limits, shares_high = allocate_shares_loop(extra_freq, high, max_per_core, min(cores, len(high)), 1)
# WARN: Hack for fixing lower limit for frequency
# hi_limits = [max(h, 100) for h in hi_limits]
print("freq left = ", extra_freq)
high_set = (hi_limits, shares_high, high)
# First check if we have cores avialable
cores_avil = cores if high is None else cores-len(high)
# if int(round(extra_pwr, 0)) > 0 and not(low is None) and cores_avil > 0:
if not(low is None) and cores_avil > 0:
# We have power for low priority
low.sort(key=itemgetter(2))
if int(round(extra_freq, 0)) > 0 :
extra_freq, lo_limits, shares_lo = allocate_shares_loop(extra_freq, low, max_per_core, cores_avil, 1)
else:
# get case for 800 MHz per avialable core
_,lo_limits, shares_lo = allocate_shares_loop(1*cores_avil, low, max_per_core, cores_avil, 1)
# WARN: Hack for fixing lower limit for frequency
# lo_limits = [max(l, 1) for l in lo_limits]
extra_freq = None
low_set = (lo_limits, shares_lo, low)
return extra_freq, high_set, low_set
def get_list_limits_cores(power, cores, app_file, opt="Power"):
high_set = None
low_set = None
all_limits = None
all_shares = None
high_apps = None
low_apps = None
high_cores = None
low_cores = None
high_limits = None
low_limits = None
high_shares = None
low_shares = None
start = 0
if opt == "Freq":
__, high_set, low_set = first_freq_allocation(power, cores, app_file)
elif opt == "Power":
__, high_set, low_set = first_allocation(power, cores, app_file)
elif opt == "Perf":
__, high_set, low_set = first_perf_allocation(power, cores, app_file)
if not high_set is None:
#We have high_prio apps
all_limits = high_set[0]
high_limits = high_set[0]
high_shares = high_set[1]
high_apps = high_set[2]
high_cores = [i*2 for i in range(start, len(all_limits))]
all_shares = high_set[1] # get high shares
start = len(all_limits)
if not low_set is None:
#We have low_prio apps
all_limits += low_set[0]
low_limits = low_set[0]
low_shares = low_set[1]
low_apps | |
<gh_stars>1-10
import libcst as cst
from libcst import CSTNode
import random
from lampion.components.engine import Engine
def test_create_engine_with_empty_config():
testobject = Engine({}, "PLACEHOLDER")
assert testobject
def test_create_engine_with_none_config():
testobject = Engine(None, "PLACEHOLDER_ENGINE_OUTPUT")
assert testobject
def test_default_engine_has_transformers():
testobject = Engine({}, "PLACEHOLDER")
assert len(testobject.get_transformers()) > 0
def test_run_with_default_transformers_gives_output():
testobject = Engine(None, "PLACEHOLDER_ENGINE_OUTPUT")
example_cst = [example()]
altered_cst = testobject.run(example_cst)[0]
assert altered_cst
def test_run_with_default_transformers_input_remains_unchanged():
testobject = Engine(None, "PLACEHOLDER_ENGINE_OUTPUT")
random.seed(1996)
example_cst = [example()]
initial_value = str(example_cst[0][1].code)
altered_csts = testobject.run(example_cst)
testobject.run(example_cst)
post_change_code = get_first_code(example_cst)
assert initial_value == post_change_code
def test_run_with_default_transformers_output_different_to_input():
testobject = Engine(None, "PLACEHOLDER_ENGINE_OUTPUT")
random.seed(1996)
example_cst = [example()]
initial_value = str(example_cst[0][1].code)
altered_csts = testobject.run(example_cst)
altered_cst = altered_csts[0][1]
assert altered_cst.code != initial_value
def test_run_with_default_transformers_with_two_CSTs_both_changed():
testobject = Engine({"transformations": 50}, "PLACEHOLDER_ENGINE_OUTPUT")
random.seed(1996)
example_cst_A = cst.parse_module("def hi(): \n\tprint(\"Hello World\")")
example_cst_B = cst.parse_module("def bye(): \n\tprint(\"Goodbye (cruel) World\")")
example_A_value = str(example_cst_A.code)
example_B_value = str(example_cst_B.code)
csts = [("PLACEHOLDER", example_cst_A), ("PLACEHOLDER", example_cst_B)]
altered_csts = testobject.run(csts)
altered_cst_A = get_first_code([x for x in altered_csts if "Hello" in x[1].code])
altered_cst_B = get_first_code([x for x in altered_csts if "Goodbye" in x[1].code])
assert altered_cst_A != example_A_value
assert altered_cst_B != example_B_value
def test_run_with_default_transformers_with_two_CSTs_output_has_paths_to_compare():
# This is a regression test, as when I wanted to print some of the changed code
# It started to explode around my head
testobject = Engine(None, "PLACEHOLDER_ENGINE_OUTPUT")
random.seed(1996)
example_cst_A = cst.parse_module("def hi(): \n\tprint(\"Hello World\")")
example_cst_B = cst.parse_module("def bye(): \n\tprint(\"Goodbye (cruel) World\")")
csts = [("PLACEHOLDER_A", example_cst_A), ("PLACEHOLDER_B", example_cst_B)]
altered_csts = testobject.run(csts)
assert len(altered_csts) == 2
first_altered_cst = altered_csts[0]
assert first_altered_cst[0]
assert first_altered_cst[1]
second_altered_cst = altered_csts[1]
assert second_altered_cst[0]
assert second_altered_cst[1]
def test_run_with_default_transformers_with_two_CSTs_both_inputs_stay_unchanged():
testobject = Engine(None, "PLACEHOLDER_ENGINE_OUTPUT")
random.seed(1996)
example_cst_A = cst.parse_module("def hi(): \n\tprint(\"Hello World\")")
example_cst_B = cst.parse_module("def bye(): \n\tprint(\"Goodbye (cruel) World\")")
example_A_value = str(example_cst_A.code)
example_B_value = str(example_cst_B.code)
csts = [("PLACEHOLDER", example_cst_A), ("PLACEHOLDER", example_cst_B)]
testobject.run(csts)
assert example_cst_A.code == example_A_value
assert example_cst_B.code == example_B_value
def test_with_one_file_after_transformation_path_is_the_same():
testobject = Engine(None, "PLACEHOLDER_ENGINE_OUTPUT")
random.seed(1996)
example_cst = [example()]
initial_path = str(example_cst[0][0])
altered_csts = testobject.run(example_cst)
post_change_path = altered_csts[0][0]
assert post_change_path == initial_path
def test_with_one_file_per_class_should_have_expected_transformations():
testobject = Engine({"transformationscope": "per_class", "transformations": 10}, None)
random.seed(1996)
example_cst = [example()]
altered_csts = testobject.run(example_cst)
assert testobject.get_successful_transformations() == 10
def test_with_one_file_global_scope_should_have_expected_transformations():
testobject = Engine({"transformationscope": "global", "transformations": 10}, None)
random.seed(1996)
example_cst = [example()]
altered_csts = testobject.run(example_cst)
assert testobject.get_successful_transformations() == 10
def test_with_one_file_per_class_should_have_expected_transformations_variant_b():
testobject = Engine({"transformationscope": "per_class", "transformations": 15}, None)
random.seed(1996)
example_cst = [example()]
altered_csts = testobject.run(example_cst)
assert testobject.get_successful_transformations() == 15
def test_run_with_two_csts_paths_match():
testobject = Engine(None, "PLACEHOLDER_ENGINE_OUTPUT")
random.seed(1996)
example_cst_A = cst.parse_module("def hi(): \n\tprint(\"Hello World\")")
example_cst_B = cst.parse_module("def bye(): \n\tprint(\"Goodbye (cruel) World\")")
csts = [("PLACEHOLDER_A", example_cst_A), ("PLACEHOLDER_B", example_cst_B)]
altered_csts = testobject.run(csts)
altered_cst_A = [x for x in altered_csts if "Hello" in x[1].code]
altered_cst_B = [x for x in altered_csts if "Goodbye" in x[1].code]
altered_cst_path_A = altered_cst_A[0][0]
altered_cst_path_B = altered_cst_B[0][0]
assert altered_cst_path_A == "PLACEHOLDER_A"
assert altered_cst_path_B == "PLACEHOLDER_B"
def test_run_with_two_csts_second_method_is_kept():
testobject = Engine(None, "PLACEHOLDER_ENGINE_OUTPUT")
random.seed(1996)
example_cst_A = cst.parse_module("def hi(): \n\tprint(\"Hello World\")")
example_cst_B = cst.parse_module("def bye(): \n\tprint(\"Goodbye (cruel) World\")")
csts = [("PLACEHOLDER_A", example_cst_A), ("PLACEHOLDER_B", example_cst_B)]
altered_csts = testobject.run(csts)
altered_cst_B = [x for x in altered_csts if "Goodbye" in x[1].code]
# If one of them is 0 and the other one is two,
# then there was an issue in putting them back in the engines running asts
assert len(altered_cst_B) == 1
def test_run_with_two_csts_first_method_is_kept():
testobject = Engine(None, "PLACEHOLDER_ENGINE_OUTPUT")
random.seed(1996)
example_cst_A = cst.parse_module("def hi(): \n\tprint(\"Hello World\")")
example_cst_B = cst.parse_module("def bye(): \n\tprint(\"Goodbye (cruel) World\")")
csts = [("PLACEHOLDER_A", example_cst_A), ("PLACEHOLDER_B", example_cst_B)]
altered_csts = testobject.run(csts)
altered_cst_A = [x for x in altered_csts if "Hello" in x[1].code]
# If one of them is 0 and the other one is two,
# then there was an issue in putting them back in the engines running asts
assert len(altered_cst_A) == 1
def test_run_with_two_csts_check_only_one_transformation_one_touched():
testobject = Engine(config={"transformations": 1, "transformationscope": "global"})
random.seed(1996)
example_cst_A = cst.parse_module("def hi(): \n\tprint(\"Hello World\")")
example_cst_B = cst.parse_module("def bye(): \n\tprint(\"Goodbye (cruel) World\")")
csts = [("PLACEHOLDER_A", example_cst_A), ("PLACEHOLDER_B", example_cst_B)]
testobject.run(csts)
assert len(testobject.get_touched_paths()) == 1
def test_run_with_two_csts_check_many_transformations_both_touched():
testobject = Engine(config={"transformations": 40, "transformationscope": "global"})
random.seed(1996)
example_cst_A = cst.parse_module("def hi(): \n\tprint(\"Hello World\")")
example_cst_B = cst.parse_module("def bye(): \n\tprint(\"Goodbye (cruel) World\")")
csts = [("PLACEHOLDER_A", example_cst_A), ("PLACEHOLDER_B", example_cst_B)]
testobject.run(csts)
assert len(testobject.get_touched_paths()) == 2
def test_run_global_with_two_csts_check_many_transformations_has_expected_number_of_transformations():
testobject = Engine(config={"transformations": 20, "transformationscope": "global"})
random.seed(1996)
example_cst_A = cst.parse_module("def hi(): \n\tprint(\"Hello World\")")
example_cst_B = cst.parse_module("def bye(): \n\tprint(\"Goodbye (cruel) World\")")
csts = [("PLACEHOLDER_A", example_cst_A), ("PLACEHOLDER_B", example_cst_B)]
testobject.run(csts)
assert testobject.get_successful_transformations() == 20
def test_run_global_with_two_csts_check_many_transformations_has_expected_number_of_transformations_variant_b():
testobject = Engine(config={"transformations": 50, "transformationscope": "global"})
random.seed(1996)
example_cst_A = cst.parse_module("def hi(): \n\tprint(\"Hello World\")")
example_cst_B = cst.parse_module("def bye(): \n\tprint(\"Goodbye (cruel) World\")")
csts = [("PLACEHOLDER_A", example_cst_A), ("PLACEHOLDER_B", example_cst_B)]
testobject.run(csts)
assert testobject.get_successful_transformations() == 50
def test_run_with_two_csts_no_transformations_none_touched():
testobject = Engine(config={"transformations": 0, "transformationscope": "global"})
random.seed(1996)
example_cst_A = cst.parse_module("def hi(): \n\tprint(\"Hello World\")")
example_cst_B = cst.parse_module("def bye(): \n\tprint(\"Goodbye (cruel) World\")")
csts = [("PLACEHOLDER_A", example_cst_A), ("PLACEHOLDER_B", example_cst_B)]
testobject.run(csts)
assert len(testobject.get_touched_paths()) == 0
def test_run_per_class_with_default_transformers_with_two_CSTs_should_have_2_csts():
testobject = Engine({"transformationscope": "per_class"}, None)
random.seed(1996)
example_cst_A = cst.parse_module("def hi(): \n\tprint(\"Hello World\")")
example_cst_B = cst.parse_module("def bye(): \n\tprint(\"Goodbye (cruel) World\")")
csts = [("PLACEHOLDER", example_cst_A), ("PLACEHOLDER", example_cst_B)]
altered_csts = testobject.run(csts)
assert len(altered_csts) == 2
def test_run_per_class_with_default_transformers_with_two_CSTs_both_touched():
testobject = Engine({"transformationscope": "per_class"}, None)
random.seed(1996)
example_cst_A = cst.parse_module("def hi(): \n\tprint(\"Hello World\")")
example_cst_B = cst.parse_module("def bye(): \n\tprint(\"Goodbye (cruel) World\")")
csts = [("PLACEHOLDER_A", example_cst_A), ("PLACEHOLDER_B", example_cst_B)]
testobject.run(csts)
assert len(testobject.get_touched_paths()) == 2
def test_run_per_class_with_default_transformers_with_two_CSTs_both_changed():
testobject = Engine({"transformationscope": "per_class"}, None)
random.seed(1996)
example_cst_A = cst.parse_module("def hi(): \n\tprint(\"Hello World\")")
example_cst_B = cst.parse_module("def bye(): \n\tprint(\"Goodbye (cruel) World\")")
example_A_value = str(example_cst_A.code)
example_B_value = str(example_cst_B.code)
csts = [("PLACEHOLDER_A", example_cst_A), ("PLACEHOLDER_B", example_cst_B)]
altered_csts = testobject.run(csts)
altered_cst_A = get_first_code([x for x in altered_csts if "Hello" in x[1].code])
altered_cst_B = get_first_code([x for x in altered_csts if "Goodbye" in x[1].code])
assert altered_cst_A != example_A_value
assert altered_cst_B != example_B_value
def test_run_per_class_with_default_transformers_with_two_CSTs_should_have_expected_number_of_transformations():
testobject = Engine({"transformationscope": "per_class", "transformations": 5}, None)
random.seed(1996)
example_cst_A = cst.parse_module("def hi(): \n\tprint(\"Hello World\")")
example_cst_B = cst.parse_module("def bye(): \n\tprint(\"Goodbye (cruel) World\")")
csts = [("PLACEHOLDER", example_cst_A), ("PLACEHOLDER", example_cst_B)]
testobject.run(csts)
assert testobject.get_successful_transformations() == 10
def test_run_per_class_with_default_transformers_with_two_CSTs_should_have_expected_number_of_transformations_variant_b():
testobject = Engine({"transformationscope": "per_class", "transformations": 10}, None)
random.seed(1996)
example_cst_A = cst.parse_module("def hi(): \n\tprint(\"Hello World\")")
example_cst_B = cst.parse_module("def bye(): \n\tprint(\"Goodbye (cruel) World\")")
csts = [("PLACEHOLDER", example_cst_A), ("PLACEHOLDER", example_cst_B)]
testobject.run(csts)
assert testobject.get_successful_transformations() == 20
def test_run_with_bad_scope_CSTs_should_stay_unchanged():
testobject = Engine({"transformationscope": "bad_scope", "transformations": 10}, None)
random.seed(1996)
example_cst_A = cst.parse_module("def hi(): \n\tprint(\"Hello World\")")
example_cst_B = cst.parse_module("def bye(): \n\tprint(\"Goodbye (cruel) World\")")
csts = [("PLACEHOLDER", example_cst_A), ("PLACEHOLDER", example_cst_B)]
altered_csts = testobject.run(csts)
altered_cst_A = get_first_code([x for x in altered_csts if "Hello" in x[1].code])
altered_cst_B = get_first_code([x for x in altered_csts if "Goodbye" in x[1].code])
assert testobject.get_successful_transformations() == 0
assert len(testobject.get_touched_paths()) == 0
assert altered_cst_A == example_cst_A.code
assert altered_cst_B == example_cst_B.code
## "Integration" Tests
# Testing a bit of the logic around
def test_engine_with_literal_transformers_cst_has_no_literals():
# Important things to test here:
# 1. Termination
# 2. No successful Transformations
# 3. Bad Transformations recorded
# 4. Code remains unchanged
# 5. No Files touched
# Config that allows "only" literal-related Transformers
config = {}
config["AddUnusedVariableTransformer"] = False
config["AddCommentTransformer"] = False
config["RenameParameterTransformer"] = False
config["RenameVariableTransformer"] = False
config["AddNeutralElementTransformer"] = True
config["LambdaIdentityTransformer"] = True
testobject = Engine(config)
example_cst = [("PLACEHOLDER",cst.parse_module("def some(): return Math.Pi"))]
altered_cst = testobject.run(example_cst)
assert testobject.get_successful_transformations() == 0
assert testobject.get_failed_transformations() > 0
assert len(testobject.get_touched_paths()) == 0
def test_engine_per_class_with_literal_transformers_cst_has_no_literals():
# Config that allows "only" literal-related Transformers
config = {}
config["AddUnusedVariableTransformer"] = False
config["AddCommentTransformer"] = False
config["RenameParameterTransformer"] = False
config["RenameVariableTransformer"] = False
config["AddNeutralElementTransformer"] = True
config["LambdaIdentityTransformer"] = True
config["transformationscope"] = "per_class"
testobject = Engine(config)
example_cst = [("PLACEHOLDER",cst.parse_module("def some(): return Math.Pi"))]
altered_cst = testobject.run(example_cst)
assert testobject.get_successful_transformations() == 0
assert testobject.get_failed_transformations() > 0
assert len(testobject.get_touched_paths()) == 0
def test_engine_per_class_with_unfailable_transformers_has_no_failures():
# These transformers can never fail
config = {}
config["AddUnusedVariableTransformer"] = True
config["AddCommentTransformer"] = False
config["RenameParameterTransformer"] = False
config["RenameVariableTransformer"] = False
config["AddNeutralElementTransformer"] = False
config["LambdaIdentityTransformer"] = False
config["transformationscope"] = "per_class"
config["transformations"] = 10
testobject = Engine(config)
example_cst = [("PLACEHOLDER",cst.parse_module("def some(): \n\t a = 'bla' \n\t b = 'bla' \n\t c = 'bla' \n\t d = 'bla' \n\t e = 'bla' \n\t return Math.Pi"))]
altered_cst = testobject.run(example_cst)
assert testobject.get_successful_transformations() == 10
assert testobject.get_failed_transformations() == 0
def test_engine_with_unfailable_transformers_has_no_failures():
# These transformers can never fail
config = {}
config["AddUnusedVariableTransformer"] = True
config["AddCommentTransformer"] = False
config["RenameParameterTransformer"] = False
config["RenameVariableTransformer"] = False
config["AddNeutralElementTransformer"] = False
config["LambdaIdentityTransformer"] = False
config["transformationscope"] = "global"
config["transformations"] = 3
testobject = Engine(config)
example_cst = [("PLACEHOLDER",cst.parse_module("def some(): \n\t a = 'bla' \n\t b = 'bla' \n\t c = 'bla' \n\t d = 'bla' \n\t e = 'bla' \n\t return Math.Pi"))]
altered_cst = testobject.run(example_cst)
assert 3 == testobject.get_successful_transformations()
assert 0 == testobject.get_failed_transformations()
## Tests for Helpers, internals and Configs
def | |
Milk</option>
<option value="KF-Hip-House-14-Dj-Milk">19/12/12 - KF Hip House #14 - Dj Milk</option>
<option value="Gabe-Wrecked-Machines">07/01/12 - Gabe - Wrecked Machines</option>
<option value="Aniversario-do-Kengao">13/01/12 - Aniversário do Kengão</option>
<option value="Tribe-Club-Summer-Edition">21/01/12 - Tribe Club - Summer Edition</option>
<option value="La-Madre-DJ-Robson-Nogueira">27/01/12 - La Madre - DJ <NAME></option>
<option value="Summer-After">26/02/12 - Summer After</option>
<option value="Reinauguracao-Pos-Reforma">24/02/12 - Reinauguração Pós Reforma</option>
<option value="Pub-in-the-House-BDay-Sabrina-Rabelo">29/02/12 - Pub in the House + B.Day Sabrina Rabelo</option>
<option value="House-no-Deck-Dj-Nicodemo-Banda-All-Star-40">02/03/12 - House no Deck - Dj Nicodemo + Banda All Star 40</option>
<option value="House-no-Deck-Niver-Allan-Mello-Magno-Nascimento-Vanessa-Cabral">29/06/12 - House no Deck - Niver Allan Mello, Magno Nascimento, Vanessa Cabral</option>
<option value="QUARTA-FEIRA">04/09/13 - QUARTA-FEIRA</option>
<option value="House-no-Deck-com-Du-Aoki-Marco-Aoki-e-Diego-Colombini">16/03/12 - House no Deck com Du Aoki, Marco Aoki e Diego Colombini</option>
<option value="House-no-Deck-AfterFest-2a-Edicao">09/03/12 - House no Deck - AfterFest 2ª Edição + B.Day Plínio Boucault</option>
<option value="House-4-Friends-DJ-Felix-Mixer-Live">17/03/12 - House 4 Friends - DJ Felix + Mixer Live</option>
<option value="Beach-Conception-Dj-Felix-Dj-Pedro-Scarpa">24/03/12 - Beach Conception - Dj Felix + Dj Pedro Scarpa</option>
<option value="Electrance-Preview-Indoor">29/01/12 - Electrance Preview Indoor</option>
<option value="Island-Pool-Party-II">17/03/12 - Island Pool Party II</option>
<option value="House-no-Deck-com-DJ-Robson-Nogueira">23/03/12 - House no Deck com DJ Robson Nogueira</option>
<option value="House-no-Deck-Dj-Du-Aoki-Niver-Juliete-Leal">11/05/12 - House no Deck - Dj Du Aoki + Niver Juliete Leal</option>
<option value="Ministry-of-Sound-Campinas-Camarote-Perfect-Life">30/03/12 - Ministry of Sound Campinas - Camarote Perfect Life</option>
<option value="House-no-Deck-AfterFest-3-Edicao-BDay-Rick-Afterfest-Dj-Felix-e-Juliana-Minini">27/04/12 - House no Deck - AfterFest 3ª Edição + B.Day Rick Afterfest, Dj Felix e Juliana Minini</option>
<option value="House-no-Deck-Niver-Danielle-Ferri-e-Maiara-Nozari-Dj-Robson-Nogueira-Dj-Marcelo-Tromboni">18/05/12 - House no Deck - Niver Danielle Ferri e Maiara Nozari - Dj <NAME>, Dj Marcelo Tromboni</option>
<option value="House-no-Deck-Warm-Up-Fire-Up-Lounge-Dj-Edu-Zottini">21/06/13 - House no Deck - Warm Up Fire Up Lounge - Dj Edu Zottini</option>
<option value="Hip-House-Guten">01/06/12 - Hip-House Guten</option>
<option value="House-No-Deck-Dj-Robson-Nogueira">01/06/12 - House No Deck - Dj Robson Nogueira</option>
</select> </div>
<div id="comum">
<div class="info">
<h2>
Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 </h2>
<div class="compartilhar">
<div class="addthis_toolbox addthis_pill_combo">
<a class="addthis_button_tweet" tw:count="horizontal"></a>
<a class="addthis_button_facebook_like"></a>
<a class="addthis_counter addthis_pill_style"></a>
</div>
<script type="text/javascript" src="http://s7.addthis.com/js/250/addthis_widget.js#username=afterfest"></script> </div>
</div>
<div id="listaFotos">
<a href="/uploads/coberturas/XV-Edicao/20130919-084632-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 1" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084632-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 1"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 1"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084632-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 2" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084632-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 2"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 2"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084632-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 3" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084632-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 3"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 3"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084632-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 4" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084632-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 4"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 4"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084632-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 5" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084632-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 5"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 5"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084632-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 6" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084632-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 6"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 6"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084632-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 7" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084632-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 7"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 7"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084632-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 8" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084632-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 8"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 8"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084632-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 9" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084632-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 9"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 9"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084632-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 10" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084632-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 10"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 10"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084632-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 11" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084632-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 11"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 11"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084632-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 12" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084632-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 12"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 12"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084632-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 13" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084632-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 13"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 13"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084632-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 14" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084632-15.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 14"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 14"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084632-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 15" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084632-16.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 15"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 15"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084632-17.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 16" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084632-17.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 16"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 16"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084713-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 17" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084713-2.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 17"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 17"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084713-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 18" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084713-3.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 18"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 18"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084713-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 19" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084713-4.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 19"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 19"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084713-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 20" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084713-5.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 20"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 20"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084713-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 21" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084713-6.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 21"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 21"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084713-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 22" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084713-7.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 22"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 22"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084713-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 23" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084713-8.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 23"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 23"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084713-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 24" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084713-9.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 24"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 24"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084713-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 25" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084713-10.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 25"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 25"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084713-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 26" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084713-11.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 26"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 26"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084713-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 27" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084713-12.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 27"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 27"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084713-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 28" rel="galeria">
<img src="/uploads/coberturas/XV-Edicao/mini.20130919-084713-13.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 28"
alt="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - Foto 28"/></a>
<a href="/uploads/coberturas/XV-Edicao/20130919-084713-14.jpg"
title="Fotos BIXURRASCO - XV Edicao - FATEC - 14/09/2013 - | |
<reponame>mbaak/Eskapade
import numpy as np
from sklearn import preprocessing
import string
import pandas as pd
import scipy
def generate_unordered_categorical_random_data(n_obs, p, dtype=np.str):
"""
Generates unordered categorical random data.
:param int n_obs: Number of data points (rows) to generate
:param np.2darray p: The probabilities associated with each category per dimension. The length of p determines the
number of dimensions, the length of the j-th element of p (np.array) is the number of
categories for dimension j and p[j] are the probabilities for the categories of dimension j.
:param type dtype: The type of the data (str or int)
:return: The generated data
:rtype: np.ndarray
"""
n_dim = p.shape[0]
alphabet = np.array(list(string.ascii_lowercase))
data = np.empty((n_obs, n_dim), dtype=dtype)
for j in range(n_dim):
if dtype == np.str:
data[:, j] = alphabet[np.random.choice(np.arange(0, len(p[j])), n_obs, p=p[j])]
elif dtype == np.int:
data[:, j] = np.random.choice(np.arange(0, len(p[j])), n_obs, p=p[j])
else:
raise NotImplementedError
return data
def generate_ordered_categorical_random_data(n_obs, p):
"""
Generates ordered categorical random data
:param int n_obs: Number of data points (rows) to generate
:param np.2darray p: The probabilities associated with each category per dimension. The length of p determines the
number of dimensions, the length of the j-th element of p (np.array) is the number of
categories for dimension j and p[j] are the probabilities for the categories of dimension j.
:return: The generated data
:rtype: np.ndarray
"""
n_dim = p.shape[0]
data = np.empty((n_obs, n_dim), dtype=np.int)
for j in range(n_dim):
data[:, j] = np.random.choice(np.arange(0, len(p[j])), n_obs, p=p[j])
return data
def generate_continuous_random_data(n_obs, means_stds):
"""
Generates continuous random data. The data is pulled from a gaussian distribution plus a uniform distribution.
The lower and upper boundery of the uniform distribution are equal to -m - 5*s and m + 5*s respectively, where m
and s are the mean and standard deviation of the gaussian distribution respectively.
NB: m should be positive!
:param int n_obs: Number of data points (rows) to generate
:param np.2darray means_stds: The length of means_stds determines the number of dimensions. means_stds[0]
(np.array) are the means for each dimension. means_stds[1] (np.array) are the
standard deviations for each dimension
:return: The generated data
:rtype: np.ndarray
"""
means = means_stds[0]
stds = means_stds[1]
try:
assert len(means) == len(stds)
except AssertionError:
print('lenth of means is not equal to lenth of standard deviations')
n_dim = len(means)
data = np.empty((n_obs, n_dim), dtype=np.float)
for j in range(n_dim):
m = means[j]
s = stds[j]
data[:, j] = np.random.normal(m, s, n_obs) + np.random.uniform(low=-m - 5 * s, high=m + 5 * s, size=(n_obs,))
return data
def generate_data(n_obs=None, p_unordered=None, p_ordered=None, means_stds=None,
dtype_unordered_categorical_data=np.str):
"""
Generates unordered categorical, ordered categorical and continuous random data.
See the docs of the functions generate_unordered_categorical_random_data, generate_ordered_categorical_random_data
and generate_continuous_random_data for more explanation about the p_unordered, p_ordered and means_stds
parameters.
The column names are alphabetically ascending starting with the continuous columns, then the unordered
categorical columns and finally the ordered categorical columns.
:param int n_obs: Number of data points (rows) to generate
:param np.2darray p_unordered: The probabilities associated with each category per unordered categorical dimension
:param np.2darray p_ordered: The probabilities associated with each category per ordered categorical dimension
:param np.2darray means_stds: The means and standard deviations per continuous dimension
:param type dtype_unordered_categorical_data: The type of the unordered categorical data (str or int)
:return: The generated data
:rtype: pd.DataFrame
"""
# Input checking
assert n_obs is not None and n_obs != 0, 'n_obs is 0 or None'
assert p_unordered is not None or p_ordered is not None or means_stds is not None, \
'p_unordered is None, p_ordered is None and means_stds is None. Please set one of these values'
if p_unordered is not None:
unordered_categorical_data = generate_unordered_categorical_random_data(n_obs,
p_unordered,
dtype=dtype_unordered_categorical_data)
else:
unordered_categorical_data = np.array([[]])
if p_ordered is not None:
ordered_categorical_data = generate_ordered_categorical_random_data(n_obs, p_ordered)
else:
ordered_categorical_data = np.array([[]])
if means_stds is not None:
continuous_data = generate_continuous_random_data(n_obs, means_stds)
else:
continuous_data = np.array([[]])
alphabet = np.array(list(string.ascii_lowercase))
columns1 = list(alphabet[0:continuous_data.shape[1]])
columns2 = list(alphabet[continuous_data.shape[1]:
continuous_data.shape[1] + unordered_categorical_data.shape[1]])
columns3 = list(alphabet[continuous_data.shape[1] + unordered_categorical_data.shape[1]:
continuous_data.shape[1] + unordered_categorical_data.shape[1] +
ordered_categorical_data.shape[1]])
df1 = pd.DataFrame(continuous_data, columns=columns1)
df2 = pd.DataFrame(unordered_categorical_data, columns=columns2)
df3 = pd.DataFrame(ordered_categorical_data, columns=columns3)
df = pd.concat([df1, df2, df3], axis=1)
return df
def find_peaks(data, continuous_i, count=1):
"""
Finds peaks in a set of data points. A peak is a set of data points with more then 'count' data points with equal
value.
:param np.ndarray data: the data
:param iterable continuous_i: column indices. In these columns, this function searches for peaks.
:param int count: the minimum number of data points per unique value for the value te be flagged as a peak
:return: (column index, np.array of peak values) as key-value pairs
:rtype: dict
"""
peaks = {}
for d in continuous_i:
u, c = np.unique(data[:, d], return_counts=True)
peaks[d] = u[c > count]
return peaks
def smooth_peaks(data, peaks, smoothing_fraction=0.0002):
"""
Smooths the peaks in the data. All data points in the peaks (data points with equal value) are replaced by a sample
drawn from a normal distribution. The mean of this normal distribution is equal to the value of the peak and the
standard deviation of this normal distribution is equal to the smoothing_fraction times the range of the data (
for the column holding the peak).
:param np.ndarray data: the data
:param dict peaks: (column index, np.array of peak values) as key-value pairs
:param float smoothing_fraction: fraction of the range (of the column) to use for smoothing
:return: smoothed data
:rtype: np.ndarray
"""
data_smoothed = data.copy()
for d, vs in peaks.items():
for v in vs:
i = np.where(data[:, d] == v)[0]
s = (data[:, d].max() - data[:, d].min()) * smoothing_fraction
data_smoothed[i, d] = np.random.normal(v, s, size=len(i))
return data_smoothed
def remove_nans(data_smoothed):
"""
Removes np.nan from data_smoothed. If a row contains at least one np.nan then the whole row is removed.
:param np.ndarray data_smoothed: the data
:return: the data without np.nan's
:rtype: np.ndarray
"""
data_no_nans = data_smoothed.copy()
data_no_nans = data_no_nans[~np.isnan(data_no_nans).any(axis=1)]
return data_no_nans
def make_extremes(x, fraction=0.15):
"""
Calculates extremes: extreme_max = max + fraction * range, extreme_min = min - fraction * range
:param np.ndarray x: the data
:param float fraction: fraction of range to add to min and max
:return: extreme_min and extreme_max
:rtype: tuple
"""
xmin = []
xmax = []
xdiff = []
for i in range(x.shape[1]):
y = x[..., i]
y = y[~np.isnan(y)]
xmin.append(np.min(y))
xmax.append(np.max(y))
xdiff.append((xmax[i] - xmin[i]))
for i in range(x.shape[1]):
xmin[i] -= fraction * xdiff[i]
xmax[i] += fraction * xdiff[i]
return xmin, xmax
def append_extremes(data_continuous, fraction=0.15):
"""
Appends extremes to the data.
:param np.ndarray data_continuous: the data
:param float fraction: fraction to use for calculation of the extremes
:return: the extremes appended to data_continuous
:rtype: tuple
"""
xmin, xmax = make_extremes(data_continuous, fraction=fraction)
# extremes are added because we want to extend the range on which the data is transformed to a normal distribution.
data_extremes = np.append(data_continuous, [xmin, xmax], axis=0).copy()
# save inidices, we want to remove the min and max after quantile transformation
imin = np.argmin(data_extremes, axis=0)
imax = np.argmax(data_extremes, axis=0)
return data_extremes, imin, imax
def transform_to_normal(data_extremes, imin, imax):
"""
Transforming a random distribution to a normal distribution can be done in the following way:
1. Compute the values of the CDF. These values are the percentiles. These are (always) uniformly distributed.
2. Use the percent point function (inverse of cdf) of a normal distribution to transform the uniform\
distribution to a normal distribution.
:param np.ndarray data_extremes: the continuous data columns with smoothed peaks and extremes appended
:param np.array imin: indices of the minimum per continuous column
:param np.array imax: indices of the maximum per continuous column
:return: the continuous columns normalized
:rtype: tuple (np.ndarray, list of trained sklearn.preprocessing.data.QuantileTransformer)
"""
qts = []
data_normalized_ = []
for d in range(0, data_extremes.shape[1]):
qt = preprocessing.QuantileTransformer(n_quantiles=len(data_extremes), subsample=len(data_extremes),
output_distribution='normal', copy=True)
a = qt.fit_transform(data_extremes[:, d].reshape(-1, 1))
a = np.delete(a, np.array([imin[d], imax[d]]))
data_normalized_.append(a)
qts.append(qt)
if data_normalized_:
data_normalized = np.stack(data_normalized_, axis=-1)
else:
data_normalized = np.empty((0,0))
return data_normalized, qts
def insert_back_nans(data_normalized, data, unordered_categorical_i, ordered_categorical_i,
continuous_i):
"""
Insert np.nan's back into the transformed continuous variables (data_normalized) before resampling and | |
<gh_stars>10-100
"""
Transform the parse tree produced by parser.py to a higher-level tree using
recursive descent.
"""
from magpieparsers.parser_common import *
from magpieparsers.types.evaluator import evaluate
from magpieparsers.types.infogripper import *
from magpieparsers.cplusplus.normalise import normalise_type_list
from astexpr import get_expression
import operator
class Error(Exception):
pass
CLASS_SPECIFIERS = ('union', 'struct', 'class', 'enum_specifier')
# AST helper functions
def any_child(node, name_list):
"""
Return the first child of "node" with a name matching any
in "name_list"
"""
for child in node.children:
if child.name in name_list:
return child
def any_in_list(list1, list2):
""" Return the item if any items in list1 are present in list2"""
for item in list1:
if item in list2:
return item
def all_children(node, names):
for child_name in node.get_child_names():
if child_name not in names:
return False
return True
# AST generator
class ASTGen(object):
def __init__(self, types_only = True, experimental = False):
"""
If types_only is set to True, only typedefs are converted.
Conversion of entities other than typedefs is experimental
and is likely to fail with an exception.
If experimental is set to True, a slightly different (new-format)
data structure will be produced.
"""
self.types_only = types_only
self.experimental = experimental
def convert(self, ast, pt, filename, types_only = True):
""" Convert 'pt' to 'ast'.
"""
return self.translation_unit(ast, pt, filename)
def translation_unit(self, baseast, pt, filename):
#print 'PT'
#pt.print_tree()
#print 'PT done'
#ast = Node(baseast, "translation_unit")
#ast.leaf = filename
ast = baseast # Hmm - nfd
#ref_node = ast
for node in pt.children:
if node.name == 'declaration':
#print '*******************************\n\
# processing tree: '
#node.print_tree()
#decl_types = self.declaration(ref_node, node)
decl_types = self.declaration(ast, node)
ast.add_children(decl_types)
#ref_node = decl_types[-1]
#print '*******************************\n\
# new starting-tree: '
#ref_node.print_tree()
#print '*******************************\n\
# processing final-tree: '
#ast.print_tree()
elif node.name == 'function_definition':
if not self.types_only:
self.function_definition(ast, node)
#ast.add_child(Node(ast, 'unfinished_function_definition', source = node))
elif node.name == 'enum':
if not self.types_only:
self.enum(ast, node)
else:
ast.add_child(UnknownNode(ast, node, 'translation_unit', source = node))
return ast
def _wrap_node(self, name, parentast, ast):
result = Node(parentast, name, source = parentast)
result.add_child(ast)
return result
def enum(self, parent, pt):
"""
We look for:
enum
enum_specifier
enumerator_list
enumerator
...
enumerator
...
init_declarator_list (optional)
"""
# FIXME: Stub
ast = Node(parent, 'unfinished')
return ast
def function_definition(self, parent, pt):
"""
Yay a function! We expect it to look like this:
function_definition
declaration_specifiers - 'inline' etc plus return type
function_declarator - function name and parameters
declaration_list - function parameters (K&R only)
compound_statement - function body
"""
def walk_to_fdd(fd_pt):
""" Walks a chain of function declarators, collecting
indirection, until we get to function_direct_declarator.
"""
indirection = ''
while fd_pt:
if fd_pt.leaf == '*':
indirection += '*'
fdd_pt = fd_pt.get_child('function_direct_declarator')
fd_pt = fd_pt.get_child('function_declarator')
return indirection, fdd_pt
# Stick the node in the AST.
node = Node(parent, 'function', source = pt)
parent.add_child(node)
# Parts of the parse tree
# ... function_direct_declarator holds param list & name
retval_indirection, function_direct_declarator = \
walk_to_fdd(pt.get_child('function_declarator'))
declaration_specifiers = pt.get_child('declaration_specifiers')
compound_statement = pt.get_child('compound_statement')
# Get function name...
node.leaf = function_direct_declarator.leaf
# Function return type...
return_type_ast = Node(node, 'return_type', source = declaration_specifiers)
return_type_ast.add_attribute('indirection', retval_indirection)
return_type_ast.add_child(self._wrap_node('target', node,
self.get_type(node, declaration_specifiers)))
node.add_child(return_type_ast)
# Function parameters...
parameter_list = function_direct_declarator.get_child('parameter_list')
if parameter_list:
parameter_declaration_list = parameter_list.get_child('parameter_declaration_list')
for parameter_declaration in parameter_declaration_list.get_children_named('parameter_declaration'):
parameter_ast = self._function_parameter(node, parameter_declaration)
node.add_child(parameter_ast)
# We ignore K&R-style declaration list
# Function body.
body_ast = Node(node, 'body', source = compound_statement)
node.add_child(body_ast)
body_ast.add_children(self._compound_statement(body_ast, compound_statement))
def get_type(self, parentast, pt):
"""
Given a PT containing a type reference as a child, return the AST node
describing this type. The AST node may already exist, or it may be
created here.
"""
if pt.has_child('simple_type_specifier'):
#type_name = ' '.join(pt.get_child('simple_type_specifier').leaf)
#return TypeNode(parentast, None, type_name)
simple_type_specifier = pt.get_children_named('simple_type_specifier')[0]
type_list = [child_pt.name for child_pt in simple_type_specifier.children]
typenode = self.get_type_from_list(parentast, type_list)
elif any_child(pt, CLASS_SPECIFIERS):
child_pt = any_child(pt, CLASS_SPECIFIERS)
# We're either declaring an instance of an existing class, or
# creating a new one.
typenode = self.find_or_create_type(parentast, child_pt)
elif pt.my_children_are('enum_specifier'):
# FIXME: We still have bad handling of enums.
typenode = getTypenode('int', parentast)
else:
pt.print_tree()
parentast.print_tree()
raise Exception()
if typenode is None:
print "Couldn't find type node in here:"
pt.print_tree()
print "My parent is:"
parentast.print_tree()
assert typenode is not None
return typenode
def get_type_from_list(self, parentast, type_list):
type_list = normalise_type_list(type_list)
type_name = ' '.join(type_list)
return getTypenode(type_name, parentast)
def find_or_create_type(self, parentast, pt):
# Called when we're not sure if the current type must be created
# or not.
# FIXME: We could/should extend this to other types.
assert pt.type in CLASS_SPECIFIERS
# The class may exist
ast = None
if pt.leaf:
ast = getTypenode(pt.leaf, parentast)
# There is one case where getTypenode can succeed and yet the type
# can be defined below: when a previous forward declaration is
# being defined.
if ast and not ast.get_child('members') and pt.get_child('members'):
# We're defining it here - so forget about the previous reference
# and use this one.
ast = None
# By this point the name isn't declared, or we need to define a
# forward reference.
if not ast:
# The class doesn't exist - create it
ast = self.create_type(parentast, pt)
# Done: if we're None at this point something is broken.
assert ast is not None
return ast
def create_type(self, parentast, pt):
"""
Create a new type from a list of member_declarations
"""
# We know how to construct the following types:
# structs
# unions
# enums
# forward declarations of structs
if pt.name == 'struct':
handler = self.create_type_struct
elif pt.name == 'union':
handler = self.create_type_union
elif pt.name == 'enum_specifier':
handler = self.create_type_enum
else:
raise Error("Unknown parsed type '%s'" % (pt.type))
return handler(parentast, pt)
def create_type_struct(self, parentast, pt):
members_pt = pt.get_child('members')
if pt.leaf and members_pt and getTypenode(pt.leaf, parentast) is not None:
# This is a forward declaration!
# Modify the forward declaration in-place.
ast = getTypenode(pt.leaf, parentast)
else:
ast = TypeNode(parentast, None, leaf = pt.leaf, source = members_pt)
ast.add_attribute('meta_type', 'struct')
# We may not have a "members" tree if this was a forward decl
if members_pt:
ast.add_child(Node(ast, 'members', source = members_pt))
decls_list = self.make_declarations_list(ast, members_pt)
ast.get_child('members').add_children(decls_list)
return ast
def create_type_union(self, parentast, pt):
members_pt = pt.get_child('members')
ast = TypeNode(parentast, None, source = members_pt)
ast.add_attribute('meta_type', 'union')
if members_pt:
decls_list = self.make_declarations_list(ast, members_pt)
ast.add_child(Node(ast, 'members', children = decls_list, leaf = pt.leaf, source = members_pt))
ast.add_attribute('switch_type', None)
return ast
def create_type_enum(self, parentast, pt):
"""
FIXME: The code below is truly broken. In C++ enums can look like this:
enum Blah{...} blah;
enum {...} blah;
enum {...};
Where the {...} is a list of IDs with (optional) assignment to integers.
A helpful site:
http://cpptips.hyperformix.com/Enums.html
"""
members_pt = pt.get_child('enumerator_list')
ast = TypeNode(parentast, None, source = members_pt)
ast.add_attribute('meta_type', 'enum')
decls_list = self.make_declarations_list(ast, members_pt)
#print "There is only one Power in this world that knows all about the Rings and their effects.\n -> Use this Power to solve the BUG!"
ast.add_children(decls_list)
return ast
def make_declarations_list(self, ast, members_pt):
decls = []
for child in members_pt.children:
if child.name in ('declaration', 'member_declaration'):
decls.extend(self.declaration(ast, child))
return decls
def _is_declaration(self, pt):
return pt.name in ('declaration', 'using_declaration', 'linkage_specification')
def declaration(self, parentast, pt):
"""
Process a declaration.
Complicated because the declaration could be:
* A simple variable declaration
* A definition
* A forward declaration
It also supports member declarators (inside enums, typed class members, typedefs)
in addition to init declarators (all other types of declaration)
"""
# The first part of the declaration specifies the type.
# (the "int" part of "int x")
decl_specifier = pt.get_child('declaration_specifiers')
# This function is used for both member declarations (lists of declarators
# inside structs) and init declarators (values on the RHS of a declaration)
# so we test for both. This list contains intialisers and names.
# (the "x" part of "int x")
init_decl_list = pt.get_child('init_declarator_list')
if not init_decl_list:
init_decl_list = pt.get_child('member_declarator_list')
# Bail early if this doesn't look at all right.
if not decl_specifier:
return [UnknownNode(None, pt, name = 'declaration_type')]
if init_decl_list:
# The type referenced may already exist, or it may be declared here.
declarators = self._init_declarator_list(parentast, init_decl_list)
else:
# If there are no init declarators, that means that either:
# 1. This is a new type, or
# 2. This is a forward declaration.
declarators = []
# Now we're ready to create declarations. We create as many declarations
# as there are names members in the initialiser list.
newNodes = []
decl_type = [ds.name for ds in decl_specifier.children]
decl_type_node = self.get_type(parentast, decl_specifier)
def _any_combination(target, acceptable):
"""
Returns True if target has any combination of "acceptable" in it
(including zero elements - ie if target is [])
"""
for item in target:
if item not in acceptable:
return False
return True
if 'typedef' in decl_type:
node_template = TypeNode(None, source = init_decl_list)
node_template.add_attribute('meta_type', 'alias')
elif 'const' in decl_type:
node_template = Node(None, 'type_instance', source = init_decl_list)
node_template.add_attribute('meta_type', 'const')
elif init_decl_list and _any_combination(decl_type, ['static', 'inline']):
node_template = Node(None, 'type_instance', source = init_decl_list)
elif any_in_list(CLASS_SPECIFIERS, decl_type):
node_template = Node(None, 'type_instance', source = init_decl_list)
elif init_decl_list is not None:
node_template = Node(None, 'type_instance', source = init_decl_list)
if init_decl_list is None:
# Forward declaration.
newNodes.append(decl_type_node)
else:
# Build declarations from the node template.
for decl in declarators:
if decl.name == 'attribute':
# FIXME: Attributes are ignored right now
continue
newNode = node_template.copy()
newNode.leaf = decl.leaf
newNode.add_attribute('indirection',
decl.get_single_attribute('indirection', ''))
newNode.add_child(self._wrap_node('target', newNode, decl_type_node))
newNode.add_child(decl)
if 'static' in decl_type:
newNode.attributes['c_static'] = True
if 'inline' in decl_type:
newNode.attributes['c_inline'] = True
newNodes.append(newNode)
return newNodes
def _init_declarator_list(self, parentast, pt):
return self._some_declarator_list(parentast, pt)
def _some_declarator_list(self, parentast, pt):
"""
Init declarators are separated in source using commas.
"""
assert pt.name in ("init_declarator_list", "member_declarator_list")
init_declarators = []
for child in pt.children:
init_declarators.append(self.init_declarator(parentast, child))
return init_declarators
def init_declarator(self, parentast, pt):
"""
Return an init_declarator node.
We used to return:
name
expression
result
indirection
This now becomes a Node with name "name", child "expression", attribute
"indirection", a result, and possibly other children
"""
decl = Node(None, 'declarator')
if pt.name == 'member_declarator_bitfield':
decl.leaf = pt.leaf
decl.add_child(pt.get_child('expression'))
try:
decl.result = evaluate(pt.get_child('expression'), parentast)
except Exception:
decl.result = pt.get_child('expression').result
elif pt.name == 'attribute':
attribute_pt = pt.children[0]
# FIXME: Attribute support
decl.set_name('attribute')
decl.set_leaf(attribute_pt.name)
elif pt.name in ("init_declarator", "member_declarator"):
indirection = ''
# Now we want this:
# declarator *
# direct_declarator ID
#
# ... or any of these :
# direct_declarator ID <- function declaration
# parameter_list
# type_qualifier
# exception_specification
# direct_declarator ID <- class instantation / array declaration
# expression
# direct_declarator ID | |
<reponame>Bertware/microdot-phat
# -*- coding: utf-8 -*-
tinynumbers = [
[0b11111,0b11111],
[0b11111],
[0b10111,0b11101],
[0b10101,0b11111],
[0b11110,0b00011],
[0b11101,0b10111],
[0b11111,0b00111],
[0b10000,0b11111],
[0b10101,0b10101],
[0b11100,0b11111]
]
# add japanese
font = {
32: [0x00, 0x00, 0x00, 0x00, 0x00], # (space)
33: [0x00, 0x00, 0x5f, 0x00, 0x00], # !
34: [0x00, 0x07, 0x00, 0x07, 0x00], # "
35: [0x14, 0x7f, 0x14, 0x7f, 0x14], # #
36: [0x24, 0x2a, 0x7f, 0x2a, 0x12], # $
37: [0x23, 0x13, 0x08, 0x64, 0x62], # %
38: [0x36, 0x49, 0x55, 0x22, 0x50], # &
39: [0x00, 0x05, 0x03, 0x00, 0x00], # '
40: [0x00, 0x1c, 0x22, 0x41, 0x00], # (
41: [0x00, 0x41, 0x22, 0x1c, 0x00], # )
42: [0x08, 0x2a, 0x1c, 0x2a, 0x08], # *
43: [0x08, 0x08, 0x3e, 0x08, 0x08], # +
44: [0x00, 0x50, 0x30, 0x00, 0x00], # ,
45: [0x08, 0x08, 0x08, 0x08, 0x08], # -
46: [0x00, 0x60, 0x60, 0x00, 0x00], # .
47: [0x20, 0x10, 0x08, 0x04, 0x02], # /
48: [0x3e, 0x51, 0x49, 0x45, 0x3e], # 0
49: [0x00, 0x42, 0x7f, 0x40, 0x00], # 1
50: [0x42, 0x61, 0x51, 0x49, 0x46], # 2
51: [0x21, 0x41, 0x45, 0x4b, 0x31], # 3
52: [0x18, 0x14, 0x12, 0x7f, 0x10], # 4
53: [0x27, 0x45, 0x45, 0x45, 0x39], # 5
54: [0x3c, 0x4a, 0x49, 0x49, 0x30], # 6
55: [0x01, 0x71, 0x09, 0x05, 0x03], # 7
56: [0x36, 0x49, 0x49, 0x49, 0x36], # 8
57: [0x06, 0x49, 0x49, 0x29, 0x1e], # 9
58: [0x00, 0x36, 0x36, 0x00, 0x00], # :
59: [0x00, 0x56, 0x36, 0x00, 0x00], # ;
60: [0x00, 0x08, 0x14, 0x22, 0x41], # <
61: [0x14, 0x14, 0x14, 0x14, 0x14], # =
62: [0x41, 0x22, 0x14, 0x08, 0x00], # >
63: [0x02, 0x01, 0x51, 0x09, 0x06], # ?
64: [0x32, 0x49, 0x79, 0x41, 0x3e], # @
65: [0x7e, 0x11, 0x11, 0x11, 0x7e], # A
66: [0x7f, 0x49, 0x49, 0x49, 0x36], # B
67: [0x3e, 0x41, 0x41, 0x41, 0x22], # C
68: [0x7f, 0x41, 0x41, 0x22, 0x1c], # D
69: [0x7f, 0x49, 0x49, 0x49, 0x41], # E
70: [0x7f, 0x09, 0x09, 0x01, 0x01], # F
71: [0x3e, 0x41, 0x41, 0x51, 0x32], # G
72: [0x7f, 0x08, 0x08, 0x08, 0x7f], # H
73: [0x00, 0x41, 0x7f, 0x41, 0x00], # I
74: [0x20, 0x40, 0x41, 0x3f, 0x01], # J
75: [0x7f, 0x08, 0x14, 0x22, 0x41], # K
76: [0x7f, 0x40, 0x40, 0x40, 0x40], # L
77: [0x7f, 0x02, 0x04, 0x02, 0x7f], # M
78: [0x7f, 0x04, 0x08, 0x10, 0x7f], # N
79: [0x3e, 0x41, 0x41, 0x41, 0x3e], # O
80: [0x7f, 0x09, 0x09, 0x09, 0x06], # P
81: [0x3e, 0x41, 0x51, 0x21, 0x5e], # Q
82: [0x7f, 0x09, 0x19, 0x29, 0x46], # R
83: [0x46, 0x49, 0x49, 0x49, 0x31], # S
84: [0x01, 0x01, 0x7f, 0x01, 0x01], # T
85: [0x3f, 0x40, 0x40, 0x40, 0x3f], # U
86: [0x1f, 0x20, 0x40, 0x20, 0x1f], # V
87: [0x7f, 0x20, 0x18, 0x20, 0x7f], # W
88: [0x63, 0x14, 0x08, 0x14, 0x63], # X
89: [0x03, 0x04, 0x78, 0x04, 0x03], # Y
90: [0x61, 0x51, 0x49, 0x45, 0x43], # Z
91: [0x00, 0x00, 0x7f, 0x41, 0x41], # [
92: [0x02, 0x04, 0x08, 0x10, 0x20], # \
93: [0x41, 0x41, 0x7f, 0x00, 0x00], # ]
94: [0x04, 0x02, 0x01, 0x02, 0x04], # ^
95: [0x40, 0x40, 0x40, 0x40, 0x40], # _
96: [0x00, 0x01, 0x02, 0x04, 0x00], # `
97: [0x20, 0x54, 0x54, 0x54, 0x78], # a
98: [0x7f, 0x48, 0x44, 0x44, 0x38], # b
99: [0x38, 0x44, 0x44, 0x44, 0x20], # c
100: [0x38, 0x44, 0x44, 0x48, 0x7f], # d
101: [0x38, 0x54, 0x54, 0x54, 0x18], # e
102: [0x08, 0x7e, 0x09, 0x01, 0x02], # f
103: [0x08, 0x14, 0x54, 0x54, 0x3c], # g
104: [0x7f, 0x08, 0x04, 0x04, 0x78], # h
105: [0x00, 0x44, 0x7d, 0x40, 0x00], # i
106: [0x20, 0x40, 0x44, 0x3d, 0x00], # j
107: [0x00, 0x7f, 0x10, 0x28, 0x44], # k
108: [0x00, 0x41, 0x7f, 0x40, 0x00], # l
109: [0x7c, 0x04, 0x18, 0x04, 0x78], # m
110: [0x7c, 0x08, 0x04, 0x04, 0x78], # n
111: [0x38, 0x44, 0x44, 0x44, 0x38], # o
112: [0x7c, 0x14, 0x14, 0x14, 0x08], # p
113: [0x08, 0x14, 0x14, 0x18, 0x7c], # q
114: [0x7c, 0x08, 0x04, 0x04, 0x08], # r
115: [0x48, 0x54, 0x54, 0x54, 0x20], # s
116: [0x04, 0x3f, 0x44, 0x40, 0x20], # t
117: [0x3c, 0x40, 0x40, 0x20, 0x7c], # u
118: [0x1c, 0x20, 0x40, 0x20, 0x1c], # v
119: [0x3c, 0x40, 0x30, 0x40, 0x3c], # w
120: [0x44, 0x28, 0x10, 0x28, 0x44], # x
121: [0x0c, 0x50, 0x50, 0x50, 0x3c], # y
122: [0x44, 0x64, 0x54, 0x4c, 0x44], # z
123: [0x00, 0x08, 0x36, 0x41, 0x00], # {
124: [0x00, 0x00, 0x7f, 0x00, 0x00], # |
125: [0x00, 0x41, 0x36, 0x08, 0x00], # }
126: [0x08, 0x08, 0x2a, 0x1c, 0x08], # ~
8221: [0x00, 0x07, 0x00, 0x07, 0x00], # ”
8592: [0x08, 0x1C, 0x2A, 0x08, 0x08], # ←
8593: [0x08, 0x04, 0x7E, 0x04, 0x08], # ↑
8594: [0x08, 0x08, 0x2A, 0x1C, 0x08], # →
8595: [0x08, 0x10, 0x3F, 0x10, 0x08], # ↓
9472: [0x08, 0x08, 0x08, 0x08, 0x08], # ─
9474: [0x00, 0x00, 0x7F, 0x00, 0x00], # │
9484: [0x00, 0x00, 0x78, 0x08, 0x08], # ┌
9488: [0x08, 0x08, 0x78, 0x00, 0x00], # ┐
9492: [0x00, 0x00, 0x0F, 0x08, 0x08], # └
9496: [0x08, 0x08, 0x0F, 0x00, 0x00], # ┘
9500: [0x00, 0x00, 0x7F, 0x08, 0x08], # ├
9508: [0x08, 0x08, 0x7F, 0x00, 0x00], # ┤
9516: [0x08, 0x08, 0x78, 0x08, 0x08], # ┬
9524: [0x08, 0x08, 0x0F, 0x08, 0x08], # ┴
9532: [0x08, 0x08, 0x7F, 0x08, 0x08], # ┼
9632: [0x7F, 0x7F, 0x7F, 0x7F, 0x7F], # ■
9633: [0x7F, 0x41, 0x41, 0x41, 0x7F], # □
9650: [0x10, 0x1C, 0x1E, 0x1C, 0x10], # ▲
9651: [0x10, 0x1C, 0x12, 0x1C, 0x10], # △
9660: [0x04, 0x1C, 0x3C, 0x1C, 0x04], # ▼
9661: [0x04, 0x1C, 0x24, 0x1C, 0x04], # ▽
9670: [0x08, 0x1C, 0x3E, 0x1C, 0x08], # ◆
9675: [0x1C, 0x22, 0x22, 0x22, 0x1C], # ○
9679: [0x1C, 0x3E, 0x3E, 0x3E, 0x1C], # ●
9733: [0x64, 0x3E, 0x1F, 0x3E, 0x64], # ★
12288: [0x00, 0x00, 0x00, 0x00, 0x00], #
12289: [0x10, 0x20, 0x40, 0x00, 0x00], # 、
12290: [0x70, 0x50, 0x70, 0x00, 0x00], # 。
12300: [0x00, 0x0F, 0x01, 0x01, 0x01], # 「
12301: [0x40, 0x40, 0x40, 0x78, 0x00], # 」
12316: [0x02, 0x01, 0x02, 0x04, 0x02], # 〜
12353: [0x28, 0x58, 0x3C, 0x68, 0x00], # ぁ
12354: [0x32, 0x7A, 0x2F, 0x1A, 0x72], # あ
12355: [0x38, 0x40, 0x08, 0x30, 0x00], # ぃ
12356: [0x3E, 0x40, 0x20, 0x00, 0x1C], # い
12357: [0x10, 0x0C, 0x4C, 0x30, 0x00], # ぅ
12358: [0x04, 0x05, 0x45, 0x25, 0x18], # う
12359: [0x48, 0x2C, 0x5C, 0x48, 0x00], # ぇ
12360: [0x44, 0x24, 0x35, 0x4D, 0x44], # え
12361: [0x68, 0x7C, 0x10, 0x64, 0x00], # ぉ
12362: [0x32, 0x7F, 0x0A, 0x48, 0x32], # お
12363: [0x34, 0x0F, 0x44, 0x38, 0x06], # か
12365: [0x22, 0x4A, 0x4F, 0x5A, 0x08], # き
12367: [0x00, 0x18, 0x24, 0x43, 0x00], # く
12369: [0x3E, 0x00, 0x44, 0x3F, 0x04], # け
12371: [0x22, 0x52, 0x42, 0x42, 0x44], # こ
12373: [0x24, 0x44, 0x47, 0x5C, 0x12], # さ
12375: [0x3F, 0x40, 0x40, 0x20, 0x10], # し
12377: [0x02, 0x0A, 0x56, 0x3F, 0x02], # す
12379: [0x04, 0x3E, 0x44, 0x5F, 0x44], # せ
12381: [0x08, 0x2D, 0x5B, 0x45, 0x44], # そ
12383: [0x72, 0x0F, 0x22, 0x4A, 0x48], # た
12385: [0x0A, 0x0E, 0x4B, 0x4A, 0x32], # ち
12387: [0x08, 0x48, 0x48, 0x30, 0x00], # っ
12388: [0x02, 0x02, 0x42, 0x22, 0x1C], # つ
12390: [0x02, 0x32, 0x4A, 0x46, 0x42], # て
12392: [0x30, 0x4B, 0x44, 0x44, 0x42], # と
12394: [0x0A, 0x27, 0x52, 0x38, 0x26], # な
12395: [0x3E, 0x00, 0x22, 0x42, 0x42], # に
12396: [0x38, 0x6F, 0x1C, 0x67, 0x78], # ぬ
12397: [0x12, 0x7F, 0x0A, 0x64, 0x78], # ね
12398: [0x3C, | |
<reponame>BatmanAoD/oil<filename>core/state.py<gh_stars>0
#!/usr/bin/env python
# Copyright 2016 <NAME>. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
from __future__ import print_function
"""
state.py -- Interpreter state
"""
import cStringIO
import os
from core import args
from core import legacy
from osh.meta import runtime
from core import util
from osh.meta import Id
from osh.meta import ast
part_value_e = runtime.part_value_e
value_e = runtime.value_e
lvalue_e = runtime.lvalue_e
scope_e = runtime.scope_e
var_flags_e = runtime.var_flags_e
log = util.log
e_die = util.e_die
class _ErrExit(object):
"""Manages the errexit setting.
- The user can change it with builtin 'set' at any point in the code.
- These constructs implicitly disable 'errexit':
- if / while / until conditions
- ! (part of pipeline)
- && ||
An _ErrExit object prevents these two mechanisms from clobbering each other.
"""
def __init__(self):
self.errexit = False # the setting
self.stack = []
def Push(self):
if self.errexit:
self.errexit = False
self.stack.append(True) # value to restore
else:
self.stack.append(False)
def Pop(self):
self.errexit = self.stack.pop()
def Set(self, b):
"""User code calls this."""
if True in self.stack: # are we in a temporary state?
# TODO: Add error context.
e_die("Can't set 'errexit' in a context where it's disabled "
"(if, !, && ||, while/until conditions)")
self.errexit = b
def Disable(self):
"""For bash compatibility in command sub."""
self.errexit = False
# Used by builtin
SET_OPTIONS = [
('e', 'errexit'),
('n', 'noexec'),
('u', 'nounset'),
('x', 'xtrace'),
('f', 'noglob'),
('C', 'noclobber'),
('h', 'hashall'),
(None, 'pipefail'),
(None, 'debug-completion'),
(None, 'strict-control-flow'),
(None, 'strict-errexit'),
(None, 'strict-array'),
(None, 'vi'),
(None, 'emacs'),
# TODO: Add strict-arg-parse? For example, 'trap 1 2 3' shouldn't be
# valid, because it has an extra argument. Builtins are inconsistent about
# checking this.
]
_SET_OPTION_NAMES = set(name for _, name in SET_OPTIONS)
class ExecOpts(object):
def __init__(self, mem):
"""
Args:
mem: state.Mem, for SHELLOPTS
"""
self.mem = mem
# Depends on the shell invocation (sh -i, etc.) This is not technically an
# 'set' option, but it appears in $-.
self.interactive = False
# set -o / set +o
self.errexit = _ErrExit() # -e
self.nounset = False # -u
self.pipefail = False
self.xtrace = False # NOTE: uses PS4
self.noglob = False # -f
self.noexec = False # -n
self.noclobber = False # -C
# We don't do anything with this yet. But Aboriginal calls 'set +h'.
self.hashall = True # -h is true by default.
# OSH-specific options.
self.debug_completion = False
self.strict_control_flow = False
# strict_errexit makes 'local foo=$(false)' and echo $(false) fail.
# By default, we have mimic bash's undesirable behavior of ignoring
# these failures, since ash copied it, and Alpine's abuild relies on it.
#
# bash 4.4 also has shopt -s inherit_errexit, which says that command subs
# inherit the value of errexit. # I don't believe it is strict enough --
# local still needs to fail.
self.strict_errexit = False
# Several problems:
# - foo="$@" not allowed because it decays. Should be foo=( "$@" ).
# - ${a} not ${a[0]}
# - possibly disallow $* "$*" altogether.
# - do not allow [[ "$@" == "${a[@]}" ]]
self.strict_array = False
# This comes after all the 'set' options.
shellopts = self.mem.GetVar('SHELLOPTS')
assert shellopts.tag == value_e.Str, shellopts
self._InitOptionsFromEnv(shellopts.s)
# shopt -s / -u. NOTE: bash uses $BASHOPTS rather than $SHELLOPTS for
# these.
self.nullglob = False
self.failglob = False
#
# OSH-specific options that are not yet implemented.
#
self.strict_arith = False # e.g. $(( x )) where x doesn't look like integer
self.strict_word = False # word splitting, etc.
self.strict_scope = False # disable dynamic scope
# TODO: strict_bool. Some of this is covered by arithmetic, e.g. -eq.
# Don't need flags -e and -n. -e is $'\n', and -n is write.
self.sane_echo = False
# Used for 'set -o vi/emacs'
# Set by the Executor, if available
self.readline = None
def _InitOptionsFromEnv(self, shellopts):
# e.g. errexit:nounset:pipefail
lookup = set(shellopts.split(':'))
for _, name in SET_OPTIONS:
if name in lookup:
self._SetOption(name, True)
def ErrExit(self):
return self.errexit.errexit
def GetDollarHyphen(self):
chars = []
if self.interactive:
chars.append('i')
if self.ErrExit():
chars.append('e')
if self.nounset:
chars.append('u')
# NO letter for pipefail?
if self.xtrace:
chars.append('x')
if self.noexec:
chars.append('n')
# bash has:
# - c for sh -c, i for sh -i (mksh also has this)
# - h for hashing (mksh also has this)
# - B for brace expansion
return ''.join(chars)
def _SetOption(self, opt_name, b):
"""Private version for synchronizing from SHELLOPTS."""
assert '_' not in opt_name
if opt_name not in _SET_OPTION_NAMES:
raise args.UsageError('Invalid option %r' % opt_name)
if opt_name == 'errexit':
self.errexit.Set(b)
elif opt_name in ('vi', 'emacs'):
if self.readline:
self.readline.parse_and_bind("set editing-mode " + opt_name);
else:
# TODO error message copied from 'cmd_exec.py'; refactor?
util.error('Oil was not built with readline/completion.')
else:
# strict-control-flow -> strict_control_flow
opt_name = opt_name.replace('-', '_')
setattr(self, opt_name, b)
def SetOption(self, opt_name, b):
""" For set -o, set +o, or shopt -s/-u -o. """
self._SetOption(opt_name, b)
val = self.mem.GetVar('SHELLOPTS')
assert val.tag == value_e.Str
shellopts = val.s
# Now check if SHELLOPTS needs to be updated. It may be exported.
#
# NOTE: It might be better to skip rewriting SEHLLOPTS in the common case
# where it is not used. We could do it lazily upon GET.
# Also, it would be slightly more efficient to update SHELLOPTS if
# settings were batched, Examples:
# - set -eu
# - shopt -s foo bar
if b:
if opt_name not in shellopts:
new_val = runtime.Str('%s:%s' % (shellopts, opt_name))
self.mem.InternalSetGlobal('SHELLOPTS', new_val)
else:
if opt_name in shellopts:
names = [n for n in shellopts.split(':') if n != opt_name]
new_val = runtime.Str(':'.join(names))
self.mem.InternalSetGlobal('SHELLOPTS', new_val)
SHOPT_OPTIONS = ('nullglob', 'failglob')
def SetShoptOption(self, opt_name, b):
""" For shopt -s/-u. """
if opt_name not in self.SHOPT_OPTIONS:
raise args.UsageError('Invalid option %r' % opt_name)
setattr(self, opt_name, b)
def ShowOptions(self, opt_names):
""" For 'set -o' and 'shopt -p -o' """
# TODO: Maybe sort them differently?
opt_names = opt_names or _SET_OPTION_NAMES
for opt_name in opt_names:
if opt_name == 'errexit':
b = self.errexit.errexit
else:
attr = opt_name.replace('-', '_')
b = getattr(self, attr)
print('set %so %s' % ('-' if b else '+', opt_name))
def ShowShoptOptions(self, opt_names):
""" For 'shopt -p' """
opt_names = opt_names or self.SHOPT_OPTIONS # show all
for opt_name in opt_names:
b = getattr(self, opt_name)
print('shopt -%s %s' % ('s' if b else 'u', opt_name))
class _ArgFrame(object):
"""Stack frame for arguments array."""
def __init__(self, argv):
self.argv = argv
self.num_shifted = 0
def __repr__(self):
return '<_ArgFrame %s %d at %x>' % (self.argv, self.num_shifted, id(self))
def GetArgNum(self, arg_num):
index = self.num_shifted + arg_num - 1
if index >= len(self.argv):
return runtime.Undef()
return runtime.Str(str(self.argv[index]))
def GetArgv(self):
return self.argv[self.num_shifted : ]
def GetNumArgs(self):
return len(self.argv) - self.num_shifted
def SetArgv(self, argv):
self.argv = argv
self.num_shifted = 0
class _StackFrame(object):
def __init__(self, readonly=False):
self.vars = {} # string -> runtime.cell
self.readonly = readonly
def __repr__(self):
f = cStringIO.StringIO()
f.write('<_StackFrame readonly:%s' % self.readonly)
for name, cell in self.vars.iteritems():
f.write(' %s = ' % name)
f.write(' %s' % cell)
f.write('\n')
f.write('>')
return f.getvalue()
class DirStack(object):
"""For pushd/popd/dirs."""
def __init__(self):
self.stack = []
self.Reset()
def Reset(self):
self.stack[:] = [os.getcwd()]
def Push(self, entry):
self.stack.append(entry)
def Pop(self):
if len(self.stack) <= 1:
return None
return self.stack.pop()
def Iter(self):
"""Iterate in reverse order."""
return reversed(self.stack)
def _FormatStack(var_stack):
"""Temporary debugging.
TODO: Turn this into a real JSON dump or something.
"""
f = cStringIO.StringIO()
for i, entry in enumerate(var_stack):
f.write('[%d] %s' % (i, entry))
f.write('\n')
return f.getvalue()
class Mem(object):
"""For storing variables.
Mem is better than "Env" -- Env implies OS stuff.
Callers:
User code: assigning and evaluating variables, in command context or
arithmetic context.
Completion engine: for COMP_WORDS, etc.
Builtins call it implicitly: read, cd for $PWD, $OLDPWD, etc.
Modules: cmd_exec, word_eval, expr_eval, completion
"""
def __init__(self, argv0, argv, environ, arena):
top = _StackFrame()
self.var_stack = [top]
self.argv0 = argv0
self.argv_stack = [_ArgFrame(argv)]
# NOTE: could use deque and appendleft/popleft, but:
# 1. ASDL type checking of StrArray doesn't | |
<reponame>yonghoonlee/dymos
import numpy as np
import openmdao.api as om
from ...transcriptions.grid_data import GridData
from ...utils.lgl import lgl
class VandermondeControlInterpComp(om.ExplicitComponent):
"""
A component which interpolates control values in 1D using Vandermonde interpolation.
Takes training values for control variables at given _input_ nodes,
broadcaasts them to _discretization_ nodes, and then interpolates the discretization values
to provide a control variable at a given segment tau or phase tau.
For dynamic controls, the current segment is given as a discrete input and the interpolation is
a smooth polynomial along the given segment.
OpenMDAO assumes sizes of variables at setup time, and we don't want to need to change the
size of the control input nodes when we evaluate different segments. Instead, this component
will take in the control values of all segments and internally use the appropriate one.
Parameters
----------
grid_data : GridData
A GridData instance that details information on how the control input and discretization
nodes are layed out.
control_options : dict of {str: ControlOptionsDictionary}
A mapping that maps the name of each control to a ControlOptionsDictionary of its options.
polynomial_control_options : dict of {str: PolynomialControlOptionsDictionary}
A mapping that maps the name of each polynomial control to an OptionsDictionary of its options.
time_units : str
The time units pertaining to the control rates.
standalone_mode : bool
If True, this component runs its configuration steps during setup. This is useful for
unittests in which the component does not exist in a larger group.
**kwargs
Keyword arguments passed to ExplicitComponent.
"""
def __init__(self, grid_data, control_options=None, polynomial_control_options=None,
time_units=None, standalone_mode=False, **kwargs):
self._grid_data = grid_data
self._control_options = {} if control_options is None else control_options
self._polynomial_control_options = {} if polynomial_control_options is None else polynomial_control_options
self._time_units = time_units
self._standalone_mode = standalone_mode
# Storage for the Vandermonde matrix and its inverse for each control
self._V_hat = {}
self._V_hat_inv = {}
# Storage for factors used in the derivatives of Vandermonde matrices.
self._fac = {}
# Cache formatted strings: { control_name : (input_name, output_name) }
self._control_io_names = {}
# The Lagrange interpolation matrix L_id maps control values as given at the input nodes
# to values at the discretization nodes.
num_disc_nodes = grid_data.subset_num_nodes['control_disc']
num_input_nodes = grid_data.subset_num_nodes['control_input']
self._L_id = np.zeros((num_disc_nodes, num_input_nodes), dtype=float)
self._L_id[np.arange(num_disc_nodes, dtype=int),
self._grid_data.input_maps['dynamic_control_input_to_disc']] = 1.0
super().__init__(**kwargs)
def initialize(self):
"""
Declare component options.
"""
self.options.declare('segment_index', types=int, desc='index of the current segment')
self.options.declare('vec_size', types=int, default=1,
desc='number of points at which the control will be evaluated. This is not'
'necessarily the same as the number of nodes in the GridData.')
def _configure_controls(self):
vec_size = self.options['vec_size']
gd = self._grid_data
self._V_hat = {}
self._V_hat_inv = {}
self._disc_node_idxs_by_segment = []
self._input_node_idxs_by_segment = []
if not self._control_options:
return
first_disc_node_in_seg = 0
for seg_idx in range(gd.num_segments):
# Number of control discretization nodes per segment
ncdnps = gd.subset_num_nodes_per_segment['control_disc'][seg_idx]
ar_control_disc_nodes = np.arange(ncdnps, dtype=int)
disc_idxs_in_seg = first_disc_node_in_seg + ar_control_disc_nodes
first_disc_node_in_seg += ncdnps
# The indices of the discretization node u vector pertaining to the given segment
self._disc_node_idxs_by_segment.append(disc_idxs_in_seg)
# The indices of the input u vector pertaining to the given segment
self._input_node_idxs_by_segment.append(gd.input_maps['dynamic_control_input_to_disc'][disc_idxs_in_seg])
# Indices of the control disc nodes belonging to the current segment
control_disc_seg_idxs = gd.subset_segment_indices['control_disc'][seg_idx]
# Segment tau values for the control disc nodes in the phase
control_disc_stau = gd.node_stau[gd.subset_node_indices['control_disc']]
# Segment tau values for the control disc nodes in the given segment
control_disc_seg_stau = control_disc_stau[control_disc_seg_idxs[0]:
control_disc_seg_idxs[1]]
seg_control_order = gd.transcription_order[seg_idx] - 1
if seg_control_order not in self._V_hat:
self._V_hat[seg_control_order] = np.vander(control_disc_seg_stau, increasing=True)
self._V_hat_inv[seg_control_order] = np.linalg.inv(self._V_hat[seg_control_order])
if seg_control_order + 1 not in self._fac:
self._fac[seg_control_order + 1] = np.arange(seg_control_order + 1, dtype=int)
num_uhat_nodes = gd.subset_num_nodes['control_input']
ar = np.arange(vec_size, dtype=int)
for control_name, options in self._control_options.items():
shape = options['shape']
units = options['units']
input_name = f'controls:{control_name}'
output_name = f'control_values:{control_name}'
rate_name = f'control_rates:{control_name}_rate'
rate2_name = f'control_rates:{control_name}_rate2'
uhat_shape = (num_uhat_nodes,) + shape
output_shape = (vec_size,) + shape
self.add_input(input_name, shape=uhat_shape, units=units)
self.add_output(output_name, shape=output_shape, units=units)
self.add_output(rate_name, shape=output_shape, units=units)
self.add_output(rate2_name, shape=output_shape, units=units)
self._control_io_names[control_name] = (input_name, output_name, rate_name, rate2_name)
self.declare_partials(of=output_name, wrt=input_name)
self.declare_partials(of=output_name, wrt='stau', rows=ar, cols=ar)
self.declare_partials(of=rate_name, wrt=input_name)
self.declare_partials(of=rate_name, wrt='stau', rows=ar, cols=ar)
self.declare_partials(of=rate_name, wrt='dstau_dt')
self.declare_partials(of=rate2_name, wrt=input_name)
self.declare_partials(of=rate2_name, wrt='stau', rows=ar, cols=ar)
self.declare_partials(of=rate2_name, wrt='dstau_dt')
def _configure_polynomial_controls(self):
vec_size = self.options['vec_size']
ar = np.arange(vec_size, dtype=int)
for pc_name, options in self._polynomial_control_options.items():
order = options['order']
shape = options['shape']
units = options['units']
input_name = f'polynomial_controls:{pc_name}'
output_name = f'polynomial_control_values:{pc_name}'
rate_name = f'polynomial_control_rates:{pc_name}_rate'
rate2_name = f'polynomial_control_rates:{pc_name}_rate2'
input_shape = (order + 1,) + shape
output_shape = (vec_size,) + shape
self.add_input(input_name, shape=input_shape, units=units)
self.add_output(output_name, shape=output_shape, units=units)
self.add_output(rate_name, shape=output_shape, units=units)
self.add_output(rate2_name, shape=output_shape, units=units)
self._control_io_names[pc_name] = (input_name, output_name, rate_name, rate2_name)
self.declare_partials(of=output_name, wrt=input_name)
self.declare_partials(of=output_name, wrt='ptau', rows=ar, cols=ar)
self.declare_partials(of=rate_name, wrt=input_name)
self.declare_partials(of=rate_name, wrt='ptau', rows=ar, cols=ar)
self.declare_partials(of=rate_name, wrt='t_duration')
self.declare_partials(of=rate2_name, wrt=input_name)
self.declare_partials(of=rate2_name, wrt='ptau', rows=ar, cols=ar)
self.declare_partials(of=rate2_name, wrt='t_duration')
if order not in self._V_hat:
pc_disc_seg_ptau, _ = lgl(order + 1)
self._V_hat[order] = np.vander(pc_disc_seg_ptau, increasing=True)
self._V_hat_inv[order] = np.linalg.inv(self._V_hat[order])
if order + 1 not in self._fac:
self._fac[order + 1] = np.arange(order + 1, dtype=int)
def setup(self):
"""
Perform the I/O creation if operating in _standalone_mode.
"""
if self._standalone_mode:
self.configure_io()
def configure_io(self):
"""
I/O creation is delayed until configure so we can determine shape and units for the controls.
"""
vec_size = self.options['vec_size']
self._V_hat = {}
self._V_hat_inv = {}
# self.add_discrete_input('segment_index', val=0, desc='index of the segment')
self.add_input('stau', shape=(vec_size,), units=None)
self.add_input('dstau_dt', val=1.0, units=f'1/{self._time_units}')
self.add_input('t_duration', val=1.0, units=self._time_units)
self.add_input('ptau', shape=(vec_size,), units=None)
self._configure_controls()
self._configure_polynomial_controls()
def _dvander(self, v):
"""
Return the derivative of a Vandermonde matrix wrt the independent variable _in increasing order_.
Parameters
----------
v : np.array
The Vandermonde matrix for which the derivatives are requested.
Returns
-------
dV : np.array
The derivative of v with respect to the independent variable.
dv2 : np.array
The second derivative of v wrt the independent variable.
dv3 : np.array
The third derivative of v wrt the independent variable.
"""
p, n = v.shape
dv = np.zeros_like(v)
dv2 = dv.copy()
dv3 = dv.copy()
dv[:, 1:] = v[:, :-1]
dv2[:, 2:] = v[:, :-2]
dv3[:, 3:] = v[:, :-3]
fac = self._fac[n]
fac2 = fac[:-1]
fac3 = fac[:-2]
dv[:, :] = dv * fac[np.newaxis, :]
dv2[:, 1:] = dv2[:, 1:] * fac2[np.newaxis, :] * fac[np.newaxis, 1:]
dv3[:, 2:] = dv3[:, 2:] * fac3[np.newaxis, :] * fac2[np.newaxis, 1:] * fac[np.newaxis, 2:]
return dv, dv2, dv3
def compute(self, inputs, outputs, discrete_inputs=None, discrete_outputs=None):
"""
Compute interpolated control values and rates.
Parameters
----------
inputs : `Vector`
`Vector` containing inputs.
outputs : `Vector`
`Vector` containing outputs.
discrete_inputs : `Vector`
`Vector` containing discrete_inputs.
discrete_outputs : `Vector`
`Vector` containing discrete_outputs.
"""
seg_idx = self.options['segment_index']
n = self._grid_data.transcription_order[seg_idx]
stau = inputs['stau']
dstau_dt = inputs['dstau_dt']
ptau = inputs['ptau']
dptau_dt = 2 / inputs['t_duration']
if self._control_options:
seg_order = n - 1
disc_node_idxs = self._disc_node_idxs_by_segment[seg_idx]
input_node_idxs = self._input_node_idxs_by_segment[seg_idx]
V_stau = np.vander(stau, N=n, increasing=True)
dV_stau, dV2_stau, _ = self._dvander(V_stau)
L_seg = self._L_id[disc_node_idxs[0]:disc_node_idxs[0] + len(disc_node_idxs),
input_node_idxs[0]:input_node_idxs[0] + len(input_node_idxs)]
for control_name, options in self._control_options.items():
input_name, output_name, rate_name, rate2_name = self._control_io_names[control_name]
u_hat = np.dot(L_seg, inputs[input_name][input_node_idxs])
a = np.atleast_2d(self._V_hat_inv[seg_order] @ u_hat)
outputs[output_name] = V_stau @ a
outputs[rate_name] = dstau_dt * (dV_stau @ a)
outputs[rate2_name] = dstau_dt**2 * (dV2_stau @ a)
for pc_name, options in self._polynomial_control_options.items():
input_name, output_name, rate_name, rate2_name = self._control_io_names[pc_name]
order = options['order']
V_ptau = np.vander(ptau, N=order+1, increasing=True)
dV_ptau, dV2_ptau, _ = self._dvander(V_ptau)
a = np.atleast_2d(self._V_hat_inv[order] @ inputs[input_name])
outputs[output_name] = V_ptau @ a
outputs[rate_name] = dptau_dt * (dV_ptau @ a)
outputs[rate2_name] = dptau_dt**2 * (dV2_ptau @ a)
def compute_partials(self, inputs, partials, discrete_inputs=None):
"""
Compute derivatives interpolated control values and rates wrt the inputs.
Parameters
----------
inputs : Vector
Unscaled, dimensional input variables read via inputs[key].
partials : Jacobian
Subjac components written to partials[output_name, input_name].
discrete_inputs : Vector
Unscaled, discrete input variables keyed by variable name.
"""
seg_idx = self.options['segment_index']
n = self._grid_data.transcription_order[seg_idx]
stau = inputs['stau'].real
dstau_dt = inputs['dstau_dt'].real
ptau = inputs['ptau'].real
t_duration = inputs['t_duration'].real
dptau_dt = 2.0 / t_duration
ddptau_dt_dtduration = -2.0 / t_duration**2
if self._control_options:
u_idxs = self._input_node_idxs_by_segment[seg_idx]
seg_order = self._grid_data.transcription_order[seg_idx] - 1
V_stau = np.vander(stau, N=n, increasing=True)
dV_stau, dV2_stau, dV3_stau = self._dvander(V_stau)
disc_node_idxs = self._disc_node_idxs_by_segment[seg_idx]
input_node_idxs = self._input_node_idxs_by_segment[seg_idx]
L_seg = self._L_id[disc_node_idxs[0]:disc_node_idxs[0] + len(disc_node_idxs),
input_node_idxs[0]:input_node_idxs[0] + len(input_node_idxs)]
for control_name, options in self._control_options.items():
input_name, output_name, rate_name, rate2_name = self._control_io_names[control_name]
u_hat = np.dot(L_seg, inputs[input_name][input_node_idxs].real)
a = self._V_hat_inv[seg_order] | |
# <NAME>, March 2020
# Common code for PyTorch implementation of Copy-Pasting GAN
import copy
import itertools
import matplotlib.pyplot as plt
import numpy as np
import os, platform, time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from PIL import Image, ImageDraw
from torch.utils.data import Dataset
from tqdm import tqdm
def read_image_robust(img_path, monochromatic=False):
''' Returns an image that meets conditions along with a success flag, in order to avoid crashing. '''
try:
# image = plt.imread(img_path).copy()
image = np.array(Image.open(img_path)).copy() # always uint8
success = True
if np.any(np.array(image.strides) < 0):
success = False # still negative stride
elif not(monochromatic) and (image.ndim != 3 or image.shape[2] != 3):
success = False # not RGB
elif monochromatic:
# width, height = image.shape[1], image.shape[0]
# image = np.broadcast_to(x[:, :, np.newaxis], (height, width, 3))
image = image[:, :, np.newaxis] # one channel <=> only one ground truth
except IOError:
# Probably corrupt file
image = None
success = False
return image, success
def paint_squares(image, noisy=False, channels=10):
'''
Paints one or more squares at random locations to create an artificial foreground image.
Generates multiple associated ground truth masks; one per object.
'''
width, height = image.shape[1], image.shape[0]
image = image.copy() # do not overwrite background
object_count = np.random.randint(1, 5) # [1, 4] inclusive
masks = np.zeros((height, width, channels), dtype=np.uint8)
for i in range(object_count):
sq_w, sq_h = 9, 9
x1 = np.random.randint(0, width - sq_w + 1)
y1 = np.random.randint(0, height - sq_h + 1)
x2 = x1 + sq_w
y2 = y1 + sq_h
masks[y1:y2, x1:x2, i] = 255
if not(noisy):
# Pick one fixed (not necessarily saturated) color for the whole square
clr = np.random.randint(0, 256, 3)
image[y1:y2, x1:x2] = clr
else:
# Pick a random fully saturated (extremal) color for every pixel
image[y1:y2, x1:x2] = np.random.choice([0, 255], (sq_h, sq_w, 3))
return image, masks, object_count
def create_random_gfake_mask(width, height):
''' See Appendix D. '''
x0, y0 = np.random.rand(2) * 0.8 + 0.1
num_verts = np.random.randint(4, 7)
# TODO possible improvement: allow up to more vertices?
# TODO possible improvement: encourage convex (currently many "sharp" objects)
radii = np.random.rand(num_verts) * 0.4 + 0.1
# radii = np.random.rand(num_verts) * 0.8 + 0.2 # TODO: not very clear from paper
angles = np.sort(np.random.rand(num_verts)) * 2.0 * np.pi
poly_polar = list(zip(radii, angles))
poly_cart = [(int(width * (x0 + r * np.cos(a)) / 1),
int(height * (y0 + r * np.sin(a)) / 1)) for (r, a) in poly_polar]
# poly_cart = [(x1, y1), (x2, y2), ...]
img = Image.new('L', (width, height), 0)
ImageDraw.Draw(img).polygon(poly_cart, outline=1, fill=255)
mask = np.array(img, dtype='uint8')
assert(mask.shape == (height, width))
return mask
def apply_border_zero(masks):
ndim = len(masks.shape)
if ndim == 2:
masks[0, :] = 0
masks[-1, :] = 0
masks[:, 0] = 0
masks[:, -1] = 0
elif ndim == 3:
masks[:, 0, :] = 0
masks[:, -1, :] = 0
masks[:, :, 0] = 0
masks[:, :, -1] = 0
elif ndim == 4:
masks[:, :, 0, :] = 0
masks[:, :, -1, :] = 0
masks[:, :, :, 0] = 0
masks[:, :, :, -1] = 0
else:
raise Exception('Mask has too many dimensions')
return masks
def copy_paste(fores, masks, backs, border_zero=True):
# TODO possible improvement: poisson blending
# if hard_thres > 0:
# used_masks = (masks > hard_thres).float() # force binary
# else:
used_masks = masks.clone()
# Border zeroing implemented in April 2020
if border_zero:
used_masks = apply_border_zero(used_masks)
return used_masks * fores + (1.0 - used_masks) * backs
class MyCopyPasteDataset(Dataset):
'''
Custom dataset class with foreground, background, and optional mask folders as image sources.
Only one object may appear per image, since the object count is not kept track of.
Returns irrelevant foreground anti-shortcuts as well. Enforces color (RGB) images.
'''
def __init__(self, fore_dir, back_dir, mask_dir=None, rand_horz_flip=True, post_resize=-1, center_crop=False):
self.fore_dir = fore_dir
self.back_dir = back_dir
self.rand_horz_flip = rand_horz_flip
if post_resize <= 0:
self.post_tf = transforms.ToTensor() # converts [0, 255] to [0.0, 1.0]
elif center_crop:
# Resize + square center crop
self.post_tf = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(post_resize),
transforms.CenterCrop(post_resize),
transforms.ToTensor()
])
else:
# Resize both dimensions, possibly distorting the images
self.post_tf = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((post_resize, post_resize)),
transforms.ToTensor()
])
self.has_masks = (mask_dir is not None)
# Load all file paths; file names must be the same across all 2 or 3 given directories
# self.all_fore_files = []
# self.all_mask_files = []
# self.all_back_files = []
# for fn in os.listdir(fore_dir):
# fore_fp = os.path.join(fore_dir, fn)
# if os.path.isfile(fore_fp):
# back_fp = os.path.join(back_dir, fn)
# assert(os.path.isfile(back_fp))
# self.all_fore_files.append(fore_fp)
# self.all_back_files.append(back_fp)
# if self.has_masks:
# mask_fp = os.path.join(mask_dir, fn)
# assert(os.path.isfile(mask_fp))
# self.all_mask_files.append(mask_fp)
# Load all file paths; file names must be the same across foreground and segmentation masks
self.all_fore_files = []
self.all_mask_files = []
self.all_back_files = []
for fn in os.listdir(fore_dir):
fore_fp = os.path.join(fore_dir, fn)
self.all_fore_files.append(fore_fp)
if self.has_masks:
mask_fp_jpg = os.path.join(mask_dir, fn[:-4] + '.jpg')
mask_fp_png = os.path.join(mask_dir, fn[:-4] + '.png')
if os.path.isfile(mask_fp_jpg):
self.all_mask_files.append(mask_fp_jpg)
elif os.path.isfile(mask_fp_png):
self.all_mask_files.append(mask_fp_png)
else:
raise Exception('No matching mask file found for ' + fore_fp)
for fn in os.listdir(back_dir):
back_fp = os.path.join(back_dir, fn)
self.all_back_files.append(back_fp)
self.fore_count = len(self.all_fore_files)
self.back_count = len(self.all_back_files)
print('Image file count: ' + str(self.fore_count) + ' foreground, ' + str(self.back_count) + ' background, has masks: ' + str(self.has_masks))
def __len__(self):
return self.fore_count
def __getitem__(self, idx):
# Force randomness (especially if num_workers > 0)
np.random.seed(idx + int((time.time() * 654321) % 123456))
# Read random pair of images from file system
success = False
while not(success):
file_idx = np.random.choice(self.fore_count)
fp = self.all_fore_files[file_idx]
fore, success = read_image_robust(fp)
if not(success):
continue
if self.has_masks:
fp = self.all_mask_files[file_idx]
mask, success = read_image_robust(fp, monochromatic=True)
assert(success) # must match fore
# mask = ((mask > 0) * 255.0).astype('uint8') # convert soft masks to hard
else:
mask = None
# Read random background image
success = False
while not(success):
file_idx2 = np.random.choice(self.back_count)
fp = self.all_back_files[file_idx2]
back, success = read_image_robust(fp)
# Read irrelevant foreground image
success = False
while not(success):
file_idx3 = np.random.choice(self.fore_count)
if file_idx3 == file_idx:
continue # try again, cannot pick same image
fp = self.all_fore_files[file_idx3]
irrel, success = read_image_robust(fp)
# Transform foregrounds (+ masks) and backgrounds
# NOTE: identical random choices must be made for some images
if self.rand_horz_flip:
if np.random.rand() < 0.5:
fore = fore[:, ::-1, :].copy()
if self.has_masks:
mask = mask[:, ::-1, :].copy()
if np.random.rand() < 0.5:
irrel = irrel[:, ::-1, :].copy()
if np.random.rand() < 0.5:
back = back[:, ::-1, :].copy()
fore = self.post_tf(fore)
irrel = self.post_tf(irrel)
back = self.post_tf(back)
if self.has_masks:
mask = self.post_tf(mask)
# Verify sizes
assert(fore.shape[1:] == irrel.shape[1:])
assert(fore.shape[1:] == back.shape[1:])
if self.has_masks:
assert(fore.shape[1:] == mask.shape[1:])
# Create grounded fake mask and composite
width, height = fore.shape[2], fore.shape[1] # fore is (C, H, W)
gfake_mask = self.post_tf(create_random_gfake_mask(width, height))
comp_gfake = copy_paste(fore, gfake_mask, back)
# Construct dictionary; object count is unknown
result = {'fore': fore, 'back': back, 'irrel': irrel, 'object_cnt': 1, 'gfake_mask': gfake_mask, 'comp_gfake': comp_gfake}
if self.has_masks:
result['mask'] = mask # don't set None, otherwise crash
return result
class MySquaresDataset(Dataset):
'''
Custom dataset class with just a collection of background images as source.
One or more artificial objects are painted to create a foreground, keeping track of object count.
Returns irrelevant foreground anti-shortcuts as well. Enforces color (RGB) images.
'''
def __init__(self, back_dir, rand_horz_flip=True, noisy=False, max_objects=10):
self.back_dir = back_dir
self.rand_horz_flip = rand_horz_flip
self.post_tf = transforms.ToTensor() # converts [0, 255] to [0.0, 1.0]
self.noisy = noisy
self.max_objects = max_objects
# Load all file paths; file names must be the same across all 2 or 3 given directories
self.all_back_files = []
for fn in os.listdir(back_dir):
back_fp = os.path.join(back_dir, fn)
self.all_back_files.append(back_fp)
self.file_count = len(self.all_back_files)
print('Image file count: ' + str(self.file_count) + ', noisy: ' + str(self.noisy) + ', max objects: ' + str(self.max_objects))
def __len__(self):
return self.file_count
def __getitem__(self, idx):
# Read a random triplet (relevant + background + irrelevant) of non-overlapping backgrounds from file system
success = False
while not(success):
file_idx = np.random.choice(self.file_count)
fp = self.all_back_files[file_idx]
fore, success = read_image_robust(fp)
success = False
while not(success):
file_idx2 = np.random.choice(self.file_count)
if file_idx2 == file_idx:
continue | |
<filename>algogalaxy/cluster/kmeans.py
"""
@author: <NAME>
@email : <EMAIL>
@date : 29/6/21 5:44 pm
@desc :
@ref :
"""
import numpy as np
from algogalaxy.base import BaseEstimator
from utils.util_metric_funcs import euclidean_dist, angle, sq_euclidean_dist
# =============================================================================
# Types and constants
# =============================================================================
# Options for metric measure: Euclidean distance and angle size.
METRIC_FUNCS = {"euclidean": euclidean_dist, "angle": angle}
# =============================================================================
# Initialisation functions
# =============================================================================
def init_forgy(X, n_clusters):
"""
Initialise a specified number of centroids using Forgy's method, which
randomly samples data points from the data as the centroids.
Parameters
----------
X :
n_clusters :
Returns
-------
"""
return X[np.random.choice(range(X.shape[0]), replace=False, size=n_clusters), :]
def init_random_partition(X, n_clusters, lower_limit, upper_limit):
"""
Initialise a specified number of centroids in the random partition method.
The initial centroids used by different instances in the input data are
different, while the ones used by each feature in the same instance are the
same. Therefore, the first dimension of the generated sequence is equal to
the first dimension of the input data, and the second dimension is the
specified number of clusters.
Parameters
----------
X : array-like
Training instances.
Returns
-------
ndarray of shape (n_samples, n_clusters)
The 2-d array of random centroids.
Notes
-----
According to Hamerly et al. [1]_, the Random Partition method is
generally preferable for algorithms such as the k-harmonic means and
fuzzy k-means.
References
----------
.. [1] <NAME>. and <NAME>., 2002, November. Alternatives to the
k-means algorithm that find better clusterings. In Proceedings
of the eleventh international conference on Information and
knowledge management (pp. 600-607).
"""
dots = []
for i in range(0, len(X)):
# dummy = [random.random() for i in range(n_clusters)]
dummy = np.random.randint(low=int(lower_limit), high=int(upper_limit), size=int(n_clusters))
dummy_sum = np.sum(dummy)
dot_temp = [d / dummy_sum for d in dummy]
print("1st is:\n", dot_temp)
# dummy = as_float_array(dummy) # Include cases: dummy.astype(np.float32) and dummy.astype(np.float64)
# for j in range(0, self.n_clusters):
# dummy[j] = dummy[j] / dummy_sum
# print("2nd is:\n", dummy)
dots.append(dot_temp)
return np.asarray(dots)
def init_rand_partition(X, n_clusters):
"""
Initialise a specified number of centroids using random partition method,
which randomly assign each data point to a cluster and then calculate the
average value of each cluster to get the centroids.
Parameters
----------
X
n_clusters
Returns
-------
"""
idxs = np.random.choice(range(0, n_clusters), replace=True, size=X.shape[0])
means = []
for j in range(n_clusters):
means.append(X[idxs == j].mean(axis=0))
return np.concatenate([mean[None, :] for mean in means], axis=0)
# NB: "None" above means that the current dimension is not sliced, but treated as a whole as an array element.
def _kmeans_plusplus(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
"""
Initialise a specified number of centroids using the k-means++ method (heuristic).
Prior validation of data is assumed.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The data to pick seeds for.
n_clusters : int
The number of seeds to choose.
x_squared_norms : ndarray of shape (n_samples,)
Squared Euclidean norm of each data point.
random_state : RandomState instance
The generator used to initialize the centers.
n_local_trials : int, default=None
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Returns
-------
centers : ndarray of shape (n_clusters, n_features)
The initial centers for k-means.
indexes : ndarray of shape (n_clusters,)
The index location of the chosen centers in the data array X. For a
given index and center, X[index] = center.
Notes
-----
K-means++ is an algorithm for choosing the initial values (or "seeds") for
the k-means clustering algorithm. It was proposed in 2007 by <NAME>
and <NAME> [1]_, as an approximation algorithm for the NP-hard
k-means problem—a way of avoiding the sometimes poor clusterings found by
the standard k-means algorithm. It is similar to the first of three seeding
methods proposed, in independent work, in 2006 by <NAME>, <NAME>, <NAME> and <NAME> [2]_. (The distribution of the
first seed is different.)
The exact k-means++ algorithm is as follows:
1. Choose one center uniformly at random among the data points.
2. For each data point `x` not chosen yet, compute :math:`D(x)`, the
distance between `x` and the nearest center that has already been chosen.
3. Choose one new data point at random as a new center, using a weighted
probability distribution where a point `x` is chosen with probability
proportional to :math:`D(x)^2`.
4. Repeat Steps 2 and 3 until `k` centers have been chosen.
5. Now that the initial centers have been chosen, proceed using standard
k-means clustering.
References
----------
.. [1] <NAME>. and <NAME>., 2006. k-means++: The advantages
of careful seeding. Stanford.
.. [2] <NAME>., <NAME>., <NAME>. and <NAME>., 2013. The
effectiveness of Lloyd-type methods for the k-means problem. Journal
of the ACM (JACM), 59(6), pp.1-22.
"""
# TODO: Simplify the function's arguments.
centers = []
X = np.array(X)
# Sample the first data point.
initial_idx = np.random.choice(range(X.shape[0]), )
centers.append(X[initial_idx, :].tolist())
print('max: ', np.max(np.sum((X - np.array(centers)) ** 2)))
# Loop and select the remaining points
for i in range(n_clusters - 1):
print(i)
distance = sq_euclidean_dist(X, np.array(centers))
if i == 0:
pdf = distance / np.sum(distance)
centroid_new = X[np.random.choice(range(X.shape[0]), replace=False, p=pdf.flatten())]
else:
# Calculate the distance of each data point from its nearest centroid.
dist_min = np.min(distance, axis=1)
# if pdf_method == True:
if n_local_trials is not None:
pdf = dist_min / np.sum(dist_min)
# Sample one data point from the given distribution.
centroid_new = X[np.random.choice(range(X.shape[0]), replace=False, p=pdf)]
else:
idx_max = np.argmax(dist_min, axis=0)
centroid_new = X[idx_max, :]
centers.append(centroid_new.tolist())
return np.array(centers)
def kmeans_plusplus(X, n_clusters, *, x_squared_norms=None,
random_state=None, n_local_trials=None):
"""
Initialise a specified number of seeds using the k-means++ method.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to pick seeds from.
n_clusters : int
The number of centroids to initialize
x_squared_norms : array-like of shape (n_samples,), default=None
Squared Euclidean norm of each data point.
random_state : int or RandomState instance, default=None
Determines random number generation for centroid initialization. Pass
an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
n_local_trials : int, default=None
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)).
Returns
-------
centers : ndarray of shape (n_clusters, n_features)
The inital centers for k-means.
indices : ndarray of shape (n_clusters,)
The index location of the chosen centers in the data array X. For a
given index and center, X[index] = center.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: <NAME>. and <NAME>.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
"""
pass # TODO: Simplify the function's arguments; Do validation and call the private function _kmeans_plusplus().
# =============================================================================
# Public k-means batch estimation functions
# =============================================================================
# All functions are based on EM(expectation maximisation).
def k_means(X, n_clusters, *, sample_weight=None, init_method='k-means++',
precompute_distances='deprecated', n_init=10, max_iter=300,
verbose=False, tol=1e-4, random_state=None, copy_x=True,
n_jobs='deprecated', algorithm="auto", return_n_iter=False,
metric="euclidean"):
"""
K-means clustering algorithm (standard k-means).
Parameters
----------
X : array-like of shape (n_samples, n_features)
The observations to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory copy
if the given data is not C-contiguous.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight.
init_method : {'k-means++', 'random'}, callable or array-like of shape \
(n_clusters, n_features), default='k-means++'
Method for initialization:
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose `n_clusters` observations (rows) at random from data
for the initial centroids.
If an array is passed, it should be of shape (n_clusters, n_features)
and gives the | |
import logging
import logging.handlers
import queue
from multiprocessing import JoinableQueue, Process, active_children
from time import sleep
# IMPORTS FOR TYPE HINTING
from typing import Callable, List, Optional, Type, Union
class PipelineManager:
"""
Class encapsulating multiprocessing library in Python.
This class is meant to encapsulate the multiprocessing
functionality already present in Python and combine it
with several other useful libraries, to serve as a way
to have both convenience and functionality.
Parameters
----------
name : str
The name of the PipelineManager. Meant for differentiation.
input_queue : multiprocessing.JoinableQueue
The JoinableQueue from which Processes will be pulled from.
See the Multiprocessing documentation for more information.
output_queue : multiprocessing.JoinableQueue
The JoinableQueue which Processes will place finished items in.
See the Multiprocessing documentation for more information.
error_queue : multiprocessing.JoinableQueue
The JoinableQueue which Processes will place bugged/unfinished items in.
See the Multiprocessing documentation for more information.
slack_queue : SlackBot.SlackQueue
A wrapper class around multiprocessing.JoinableQueue that allows for
asyncio-friendly operations.
logging_queue : multiprocessing.JoinableQueue
The JoinableQueue which is meant to handle logging messages.
See the Multiprocessing documentation for more information.
num_processes : int
The number of Process objects that will be made by this PipelineManager.
process_job : function
The function that each Process will be given to work on.
Make sure that this lines up with what the input/output Queues expect.
timeout_duration : int
The number of seconds which a Process will attempt to get an item from
input_queue before throwing a TimeoutError.
"""
def __init__(self,
input_queue: JoinableQueue,
output_queue: JoinableQueue,
error_queue: JoinableQueue,
slack_queue: 'SlackBot.SlackQueue',
logging_queue: JoinableQueue,
process_job: Callable[[Type['Task.Task']], Type['Task.Task']],
name: str ="PipelineManager",
num_processes: int = 1,
timeout_duration: int = 1) -> None:
"""
Initialize a PipelineManager Object.
Initializes an instance of PipelineManager with a name,
given pair of input/output queues (important for chaining),
the number of processes the PipelineManager will deal with,
the actual target function used (see Process in multiprocessing),
and how long a given Process should wait until returning a TimeoutError.
NOTE: Requres multiprocessing package
Parameters
----------
name : str
The name of the PipelineManager. Meant for differentiation.
input_queue : multiprocessing.JoinableQueue
The JoinableQueue from which Processes will be pulled from.
See the Multiprocessing documentation for more information.
output_queue : multiprocessing.JoinableQueue
The JoinableQueue which Processes will place finished items in.
See the Multiprocessing documentation for more information.
error_queue : multiprocessing.JoinableQueue
The JoinableQueue which Processes will place bugged/unfinished items in.
See the Multiprocessing documentation for more information.
slack_queue : SlackBot.SlackQueue
A wrapper class around multiprocessing.JoinableQueue that allows for
asyncio-friendly operations.
logging_queue : multiprocessing.JoinableQueue
The JoinableQueue which is meant to handle logging messages.
See the Multiprocessing documentation for more information.
num_processes : int
The number of Process objects that will be made by this PipelineManager.
process_job : function
The function that each Process will be given to work on.
Make sure that this lines up with what the input/output Queues expect.
timeout_duration : int
The number of seconds which a Process will attempt to get an item from
input_queue before throwing a TimeoutError.
Returns
-------
self : PipelineManager
A PipelineManager object.
Raises
------
None.
"""
self.name = name
#An attempt to idiot-proof the PipelineManager by instantiating a JoinableQueue() if one didn't exist already.
self.input_queue = input_queue if input_queue else JoinableQueue()
self.output_queue = output_queue if output_queue else JoinableQueue()
self.error_queue = error_queue if error_queue else JoinableQueue()
self.slack_queue = slack_queue
self.logging_queue = logging_queue
self.num_processes = num_processes
self.process_job = process_job
self.timeout_duration = timeout_duration
#A list of active processes comprised of Process objects
self.process_list: List[Process] = []
#An internal restart flag (used when all processes managed die)
self.restart_required = False
self.logger = logging.getLogger(self.name)
self.logger.setLevel(logging.DEBUG)
def setup_manager(self) -> None:
"""
Create Processes to be run by the PipelineManager.
This function will create a number of Process objects,
but does not run them immediately. This function is meant
to be used if data is not yet ready in the JoinableQueue.
Parameters
----------
None.
Returns
-------
None.
"""
#Clean out the process list.
self.process_list.clear()
for _ in range(self.num_processes):
p = Process(target=self.multiprocessing_job,
args=(self.process_job,))
self.process_list.append(p)
self.restart_required = False
def run_manager(self) -> None:
"""
Begin running the PipelineManager.
This function will start any Process objects
inside of the PipelineManager. From there,
it will poll every 1 seconds to check if any of
the Process objects are still active.
Parameters
----------
None.
Returns
-------
None.
"""
for p in self.process_list:
try:
p.daemon = True
p.start()
except:
self.process_list.remove(p)
p = Process(target=self.multiprocessing_job, args=(self.process_job,))
p.daemon = True
self.process_list.append(p)
p.start()
#Every 1 seconds, check for active Processes.
while True:
sleep(1)
running = any(p.is_alive() for p in self.process_list)
if not running or not active_children:
self.restart_required = True
break
self.logger.info(self.name + " has finished managing.")
def end_manager(self) -> None:
"""
Send poison pills to the processes under the manager.
Sends None Tasks into the JoinableQueue, which
will cause the Process that picks them
up to terminate.
Parameters
----------
None.
Returns
-------
None.
"""
for _ in range(self.num_processes):
self.input_queue.put(None)
def kill_manager(self) -> None:
"""
Kill all processes under the manager
in the event of interruption.
This function will terminate all processes
and attempt to release any resources held
in the event of interruption to avoid potential
leakage and orphaned processes.
Parameters
----------
None.
Returns
-------
None.
"""
for p in self.process_list:
p.terminate()
# NOTE: Seems Python does not appreciate if close is called too quickly.
sleep(0.5)
# Release the resources held by the Proess (Python 3.7 and up)
p.close()
def purge_manager(self) -> None:
"""
Purge the manager in the event of interruption.
This function flushes the input queue and attempts
to place any pending tasks into the error queue for
further handling/disposal, flushing the queue afterwards.
Parameters
----------
None.
Returns
-------
None.
"""
self.logger.debug(f"Beginning purging of {self.name}")
# Don't need to join an empty queue, likely will result in more issues if you do.
if self.input_queue.empty():
self.logger.debug(f"Input Queue of {self.name} is empty.")
else:
# Try to process all the remaining values put in (no need to change behavior around poison pills)
while not self.input_queue.empty():
try:
task = self.input_queue.get_nowait()
self.logger.debug(f"Receiving Task to purge: {task.get_job_id()}")
self.error_queue.put(task)
# NOTE: This sleep call may be unnecessary but I placed it here to err on the side of caution.
sleep(1)
self.input_queue.task_done()
except:
break
# NOTE: This loop is a bit hacky probably, but it does ensure that the correct number of task_done calls are made.
# NOTE: This is meant to handle any "unfinished tasks" (meaning ones that haven't had their task_done calls).
while not self.input_queue._unfinished_tasks._semlock._is_zero():
self.input_queue.task_done()
self.restart_required = True
def cleanup_manager(self) -> None:
"""
Clean up the manager by releasing the resources held by each process.
Parameters
----------
None.
Returns
-------
None.
"""
for p in self.process_list:
if p.is_alive():
p.terminate()
sleep(1)
p.close()
def print_process_list(self) -> None:
"""
Print the process list.
A convenience function that
prints the processes under
this PipelineManager.
Parameters
----------
None.
Returns
-------
None.
"""
print(f"Process List: {self.process_list}")
def print(self) -> None:
"""
Print all relevant information on this PipelineManager.
Prints out all arguments (except the JoinableQueue objects) that
are associated with this PipelineManager. Meant for debugging
purposes.
Parameters
----------
None.
Returns
-------
None.
"""
print("Name: {}".format(self.name))
print("Input Queue: {}".format(self.input_queue))
print("Output Queue: {}".format(self.output_queue))
print("Restart Required: {}".format(str(self.restart_required)))
print("Number of Processes: {}".format(str(self.num_processes)))
print("Process Job: {}".format(self.process_job.__name__))
print("Timeout Duration: {}".format(str(self.timeout_duration)))
self.print_process_list()
def _to_string(self) -> str:
"""
Generate a string representation of the object.
Generates a string that contains all the information
about the object found inside of the __dict__ attribute.
This includes variable names and values, and is predominately
used for logging purposes.
Parameters
----------
None.
Returns
-------
result : str
A string that contains all information about an object found in the __dict__ attribute.
"""
string_list = []
for key, value in self.__dict__.items():
if isinstance(value, dict):
string_list.append(key)
string_list.extend('\n'.join(["Key: {:24}\tValue: {}".format(_key, _value) for _key, _value in value.items()]))
else:
string_list.append("Key: {:24}\tValue: {}\n".format(key, value))
return ''.join(string_list)
def push_information_to_slack_bot(self, q: 'SlackBot.SlackQueue', item: List[Optional[Union[str, int]]]) -> None:
"""
Push some information into the Slack Bot.
| |
<reponame>autodidact-m/Projects
# coding: utf-8
# In[1]:
import csv
import pandas as pd
import os
pd.options.mode.chained_assignment = None
import numpy as np
import boto3
from boto3.s3.transfer import S3Transfer
import sys
# In[5]:
def readFile():
homepath = os.path.expanduser('~')
indicator_data = pd.read_csv('./Data/Clustering/Indicators_Clustering_Combined.csv', low_memory=False)
return indicator_data
# # Handling Missing values for Australia
# In[25]:
def australia():
indicator_data = readFile()
australia_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & (indicator_data['CountryCode'] == 'AU')]
australia_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & (indicator_data['CountryCode'] == 'AU')]
australia_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & (indicator_data['CountryCode'] == 'AU')]
australia_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'AU')]
australia_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.IMP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'AU')]
australia_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & (indicator_data['CountryCode'] == 'AU')]
australia_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & (indicator_data['CountryCode'] == 'AU')]
australia_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & (indicator_data['CountryCode'] == 'AU')]
australia_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & (indicator_data['CountryCode'] == 'AU')]
australia_df_ind10 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'AU')]
australia_df_ind1['Value'] = australia_df_ind1['Value'].fillna(method='bfill', axis = 0)
australia_df_ind2['Value'] = australia_df_ind2['Value'].fillna(method='bfill', axis = 0)
# Combining all the Argentina Dataframes
australia_df = pd.concat([australia_df_ind1, australia_df_ind2, australia_df_ind3, australia_df_ind4, australia_df_ind5, australia_df_ind6, australia_df_ind7, australia_df_ind8, australia_df_ind9, australia_df_ind10])
print('Clustering Wrangling completed for Australia!!', '\n')
return australia_df
# # Handling Missing values for Canada
# In[26]:
def canada():
indicator_data = readFile()
canada_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & (indicator_data['CountryCode'] == 'CA')]
canada_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & (indicator_data['CountryCode'] == 'CA')]
canada_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & (indicator_data['CountryCode'] == 'CA')]
canada_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'CA')]
canada_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.IMP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'CA')]
canada_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & (indicator_data['CountryCode'] == 'CA')]
canada_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & (indicator_data['CountryCode'] == 'CA')]
canada_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & (indicator_data['CountryCode'] == 'CA')]
canada_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & (indicator_data['CountryCode'] == 'CA')]
canada_df_ind10 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'CA')]
canada_df_ind1['Value'] = canada_df_ind1['Value'].fillna(method='bfill', axis = 0)
canada_df_ind2['Value'] = canada_df_ind2['Value'].fillna(method='bfill', axis = 0)
# Combining all the Brazil Dataframes
canada_df = pd.concat([canada_df_ind1, canada_df_ind2, canada_df_ind3, canada_df_ind4, canada_df_ind5, canada_df_ind6, canada_df_ind7, canada_df_ind8, canada_df_ind9, canada_df_ind10])
print('Clustering Wrangling completed for Canada!!', '\n')
return canada_df
# # Handling Missing values for Saudi Arabia
# In[27]:
def saudi_Arabia():
indicator_data = readFile()
saudi_arabia_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & (indicator_data['CountryCode'] == 'SA')]
saudi_arabia_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & (indicator_data['CountryCode'] == 'SA')]
saudi_arabia_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & (indicator_data['CountryCode'] == 'SA')]
saudi_arabia_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'SA')]
saudi_arabia_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.IMP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'SA')]
saudi_arabia_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & (indicator_data['CountryCode'] == 'SA')]
saudi_arabia_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & (indicator_data['CountryCode'] == 'SA')]
saudi_arabia_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & (indicator_data['CountryCode'] == 'SA')]
saudi_arabia_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & (indicator_data['CountryCode'] == 'SA')]
saudi_arabia_df_ind10 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'SA')]
saudi_arabia_df_ind1['Value'] = saudi_arabia_df_ind1['Value'].fillna(method='bfill', axis = 0)
saudi_arabia_df_ind2['Value'] = saudi_arabia_df_ind2['Value'].fillna(method='bfill', axis = 0)
# Combining all the Ecuador Dataframes
saudi_arabia_df = pd.concat([saudi_arabia_df_ind1, saudi_arabia_df_ind2, saudi_arabia_df_ind3, saudi_arabia_df_ind4, saudi_arabia_df_ind5, saudi_arabia_df_ind6, saudi_arabia_df_ind7, saudi_arabia_df_ind8, saudi_arabia_df_ind9, saudi_arabia_df_ind10])
print('Clustering Wrangling completed for Saudi Arabia!!', '\n')
return saudi_arabia_df
# # Handling Missing values for United States
# In[28]:
def united_States():
indicator_data = readFile()
united_states_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & (indicator_data['CountryCode'] == 'US')]
united_states_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & (indicator_data['CountryCode'] == 'US')]
united_states_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & (indicator_data['CountryCode'] == 'US')]
united_states_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'US')]
united_states_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.IMP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'US')]
united_states_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & (indicator_data['CountryCode'] == 'US')]
united_states_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & (indicator_data['CountryCode'] == 'US')]
united_states_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & (indicator_data['CountryCode'] == 'US')]
united_states_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & (indicator_data['CountryCode'] == 'US')]
united_states_df_ind10 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'US')]
united_states_df_ind1['Value'] = united_states_df_ind1['Value'].fillna(method='bfill', axis = 0)
united_states_df_ind2['Value'] = united_states_df_ind2['Value'].fillna(method='bfill', axis = 0)
united_states_df_ind4['Value'] = united_states_df_ind4['Value'].fillna(method='bfill', axis = 0)
united_states_df_ind5['Value'] = united_states_df_ind5['Value'].fillna(method='bfill', axis = 0)
united_states_df_ind10['Value'] = united_states_df_ind10['Value'].fillna(method='bfill', axis = 0)
# Combining all the Libya Dataframes
united_states_df = pd.concat([united_states_df_ind1, united_states_df_ind2, united_states_df_ind3, united_states_df_ind4, united_states_df_ind5, united_states_df_ind6, united_states_df_ind7, united_states_df_ind8, united_states_df_ind9, united_states_df_ind10])
print('Clustering Wrangling completed for United States!!', '\n')
return united_states_df
# # Handling Missing values for India
# In[10]:
def india():
indicator_data = readFile()
india_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & (indicator_data['CountryCode'] == 'IN')]
india_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & (indicator_data['CountryCode'] == 'IN')]
india_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & (indicator_data['CountryCode'] == 'IN')]
india_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'IN')]
india_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.IMP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'IN')]
india_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & (indicator_data['CountryCode'] == 'IN')]
india_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & (indicator_data['CountryCode'] == 'IN')]
india_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & (indicator_data['CountryCode'] == 'IN')]
india_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & (indicator_data['CountryCode'] == 'IN')]
india_df_ind10 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'IN')]
india_df_ind1['Value'] = india_df_ind1['Value'].fillna(method='bfill', axis = 0)
india_df_ind2['Value'] = india_df_ind2['Value'].fillna(method='bfill', axis = 0)
# Combining all the India Dataframes
india_df = pd.concat([india_df_ind1, india_df_ind2, india_df_ind3, india_df_ind4, india_df_ind5, india_df_ind6, india_df_ind7, india_df_ind8, india_df_ind9, india_df_ind10])
print('Clustering Wrangling completed for India!!', '\n')
return india_df
# # Handling Missing values for Russia
# In[11]:
def russia():
indicator_data = readFile()
russia_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & (indicator_data['CountryCode'] == 'RU')]
russia_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & (indicator_data['CountryCode'] == 'RU')]
russia_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & (indicator_data['CountryCode'] == 'RU')]
russia_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'RU')]
russia_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.IMP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'RU')]
russia_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & (indicator_data['CountryCode'] == 'RU')]
russia_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & (indicator_data['CountryCode'] == 'RU')]
russia_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & (indicator_data['CountryCode'] == 'RU')]
russia_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & (indicator_data['CountryCode'] == 'RU')]
russia_df_ind10 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'RU')]
russia_df_ind1['Value'] = russia_df_ind1['Value'].fillna(method='bfill', axis = 0)
russia_df_ind2['Value'] = russia_df_ind2['Value'].fillna(method='bfill', axis = 0)
# Combining all the South_Africa Dataframes
russia_df = pd.concat([russia_df_ind1, russia_df_ind2, russia_df_ind3, russia_df_ind4, russia_df_ind5, russia_df_ind6, russia_df_ind7, russia_df_ind8, russia_df_ind9, russia_df_ind10])
print('Clustering Wrangling completed for Russia!!', '\n')
return russia_df
# # Handling Missing values for South Africa
# In[12]:
def south_Africa():
indicator_data = readFile()
south_africa_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & (indicator_data['CountryCode'] == 'ZA')]
south_africa_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & (indicator_data['CountryCode'] == 'ZA')]
south_africa_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & (indicator_data['CountryCode'] == 'ZA')]
south_africa_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'ZA')]
south_africa_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.IMP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'ZA')]
south_africa_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & (indicator_data['CountryCode'] == 'ZA')]
south_africa_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & (indicator_data['CountryCode'] == 'ZA')]
south_africa_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & (indicator_data['CountryCode'] == 'ZA')]
south_africa_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & (indicator_data['CountryCode'] == 'ZA')]
south_africa_df_ind10 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'ZA')]
south_africa_df_ind1['Value'] = south_africa_df_ind1['Value'].fillna(method='bfill', axis = 0)
south_africa_df_ind2['Value'] = south_africa_df_ind2['Value'].fillna(method='bfill', axis = 0)
# Combining all the South_Africa Dataframes
south_africa_df = pd.concat([south_africa_df_ind1, south_africa_df_ind2, south_africa_df_ind3, south_africa_df_ind4, south_africa_df_ind5, south_africa_df_ind6, south_africa_df_ind7, south_africa_df_ind8, south_africa_df_ind9, south_africa_df_ind10])
print('Clustering Wrangling completed for South Africa!!', '\n')
return south_africa_df
# # Handling Missing values for Turkey
# In[13]:
def turkey():
indicator_data = readFile()
turkey_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & (indicator_data['CountryCode'] == 'TR')]
turkey_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & (indicator_data['CountryCode'] == 'TR')]
turkey_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & (indicator_data['CountryCode'] == 'TR')]
turkey_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'TR')]
turkey_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.IMP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'TR')]
turkey_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & (indicator_data['CountryCode'] == 'TR')]
turkey_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & (indicator_data['CountryCode'] == 'TR')]
turkey_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & (indicator_data['CountryCode'] == 'TR')]
turkey_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & (indicator_data['CountryCode'] == 'TR')]
turkey_df_ind10 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'TR')]
turkey_df_ind1['Value'] = turkey_df_ind1['Value'].fillna(method='bfill', axis = 0)
turkey_df_ind2['Value'] = turkey_df_ind2['Value'].fillna(method='bfill', axis = 0)
# Combining all the South_Africa Dataframes
turkey_df = pd.concat([turkey_df_ind1, turkey_df_ind2, turkey_df_ind3, turkey_df_ind4, turkey_df_ind5, turkey_df_ind6, turkey_df_ind7, turkey_df_ind8, turkey_df_ind9, turkey_df_ind10])
print('Clustering Wrangling completed for Turkey!!', '\n')
return turkey_df
# # Handling Missing values for Argentina
# In[14]:
def argentina():
indicator_data = readFile()
argentina_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & (indicator_data['CountryCode'] == 'AR')]
argentina_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & (indicator_data['CountryCode'] == 'AR')]
argentina_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & (indicator_data['CountryCode'] == 'AR')]
argentina_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'AR')]
argentina_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.IMP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'AR')]
argentina_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & (indicator_data['CountryCode'] == 'AR')]
argentina_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & (indicator_data['CountryCode'] == 'AR')]
argentina_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & (indicator_data['CountryCode'] == 'AR')]
argentina_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & (indicator_data['CountryCode'] == 'AR')]
argentina_df_ind10 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'AR')]
argentina_df_ind1['Value'] = argentina_df_ind1['Value'].fillna(method='bfill', axis = 0)
argentina_df_ind2['Value'] = argentina_df_ind2['Value'].fillna(method='bfill', axis = 0)
# Combining all the Argentina Dataframes
argentina_df = pd.concat([argentina_df_ind1, argentina_df_ind2, argentina_df_ind3, argentina_df_ind4, argentina_df_ind5, argentina_df_ind6, argentina_df_ind7, argentina_df_ind8, argentina_df_ind9, argentina_df_ind10])
print('Clustering Wrangling completed for Argentina!!', '\n')
return argentina_df
# # Handling Missing values for Brazil
# In[17]:
def brazil():
indicator_data = readFile()
brazil_df_ind1 = indicator_data[(indicator_data['IndicatorCode'].isin(['AG.LND.AGRI.ZS'])) & (indicator_data['CountryCode'] == 'BR')]
brazil_df_ind2 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.DYN.CBRT.IN'])) & (indicator_data['CountryCode'] == 'BR')]
brazil_df_ind3 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.DPND'])) & (indicator_data['CountryCode'] == 'BR')]
brazil_df_ind4 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.EXP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'BR')]
brazil_df_ind5 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.IMP.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'BR')]
brazil_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.CD'])) & (indicator_data['CountryCode'] == 'BR')]
brazil_df_ind7 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & (indicator_data['CountryCode'] == 'BR')]
brazil_df_ind8 = indicator_data[(indicator_data['IndicatorCode'].isin(['SP.POP.GROW'])) & (indicator_data['CountryCode'] == 'BR')]
brazil_df_ind9 = indicator_data[(indicator_data['IndicatorCode'].isin(['FI.RES.TOTL.CD'])) & (indicator_data['CountryCode'] == 'BR')]
brazil_df_ind10 = indicator_data[(indicator_data['IndicatorCode'].isin(['NE.TRD.GNFS.ZS'])) & (indicator_data['CountryCode'] == 'BR')]
brazil_df_ind1['Value'] = brazil_df_ind1['Value'].fillna(method='bfill', axis = 0)
brazil_df_ind2['Value'] = brazil_df_ind2['Value'].fillna(method='bfill', axis = 0)
# Combining all the Brazil Dataframes
brazil_df = pd.concat([brazil_df_ind1, brazil_df_ind2, brazil_df_ind3, brazil_df_ind4, brazil_df_ind5, brazil_df_ind6, brazil_df_ind7, brazil_df_ind8, brazil_df_ind9, brazil_df_ind10])
print('Clustering Wrangling completed for Brazil!!', '\n')
return brazil_df
# # Handling Missing values for Mexico
# | |
<filename>old_game/maps.py
import pygame
from . import image
import weakref
from . import characters
import math
from . import pfov
import collections
from . import pygwrap
from . import enchantments
from . import items
from . import context
import random
from . import monsters
from . import container
# Enumerated constants for sprite sheets.
SPRITE_GROUND, SPRITE_WALL, SPRITE_BORDER, SPRITE_INTERIOR, SPRITE_FLOOR, \
SPRITE_DECOR, SPRITE_CHEST, SPRITE_SIGNS = list(range( 8))
class SingTerrain( object ):
# A singleton terrain class; use these objects as tokens for maps.
def __init__( self, ident, spritesheet = SPRITE_GROUND, block_vision = False, block_walk = False, block_fly = False, frame = 0 ):
# ident should be the module-level name of this stat.
self.ident = ident
self.spritesheet = spritesheet
self.block_vision = block_vision
self.block_walk = block_walk
self.block_fly = block_fly
self.frame = frame
def render( self, screen, dest, view, data ):
view.sprites[ self.spritesheet ].render( screen, dest, self.frame )
def prerender( self, screen, dest, view, data ):
"""Some wall types need a border that gets drawn first."""
pass
def get_data( self, view, x, y ):
"""Pre-generate display data for this tile."""
return None
def place( self, scene, pos ):
if scene.on_the_map( *pos ):
scene.map[pos[0]][pos[1]].decor = self
def __str__( self ):
return self.ident
def __reduce__( self ):
return self.ident
class VariableTerrain( SingTerrain ):
def __init__( self, ident, spritesheet = SPRITE_FLOOR, block_vision = False, block_walk = False, block_fly = False, frames = (0,1,) ):
# ident should be the module-level name of this stat.
self.ident = ident
self.spritesheet = spritesheet
self.block_vision = block_vision
self.block_walk = block_walk
self.block_fly = block_fly
self.frames = frames
def render( self, screen, dest, view, data ):
view.sprites[ self.spritesheet ].render( screen, dest, self.frames[ data ] )
def get_data( self, view, x, y ):
"""Pre-generate display data for this tile."""
return view.get_pseudo_random() % len(self.frames)
class GroundTerrain( SingTerrain ):
# A singleton terrain class; use these objects as tokens for maps.
def __init__( self, ident, spritesheet = SPRITE_GROUND, block_vision = False, block_walk = False, block_fly = False, frame = 0, edge = None ):
# ident should be the module-level name of this stat.
self.ident = ident
self.spritesheet = spritesheet
self.block_vision = block_vision
self.block_walk = block_walk
self.block_fly = block_fly
self.frame = frame
self.edge = edge
def render( self, screen, dest, view, data ):
view.sprites[ self.spritesheet ].render( screen, dest, self.frame + data )
def get_data( self, view, x, y ):
"""Pre-generate display data for this tile- frame offset."""
n = view.calc_floor_score( x , y , self.edge )
if n > 0:
n += 6
else:
n = view.get_pseudo_random() % 7
return n
class WaterTerrain( SingTerrain ):
# A singleton terrain class; use these objects as tokens for maps.
def __init__( self, ident, spritesheet = SPRITE_GROUND, block_vision = False, block_walk = True, block_fly = False, frame = 0 ):
# ident should be the module-level name of this stat.
self.ident = ident
self.spritesheet = spritesheet
self.block_vision = block_vision
self.block_walk = block_walk
self.block_fly = block_fly
self.frame = frame
def get_data( self, view, x, y ):
"""Pre-generate display data for this tile- phase offset."""
return ( x + y ) % 2
def render( self, screen, dest, view, data ):
view.sprites[ self.spritesheet ].render( screen, dest, self.frame + ( view.phase // 5 + data ) % 2 )
class WallTerrain( SingTerrain ):
# A singleton terrain class; use these objects as tokens for maps.
def __init__( self, ident, spritesheet = SPRITE_WALL, block_vision = True, block_walk = True, block_fly = True ):
# ident should be the module-level name of this stat.
self.ident = ident
self.spritesheet = spritesheet
self.block_vision = block_vision
self.block_walk = block_walk
self.block_fly = block_fly
def prerender( self, screen, dest, view, data ):
if data[0] != None:
view.sprites[ SPRITE_BORDER ].render( screen, dest, data[0] )
def render( self, screen, dest, view, data ):
if data[1] != None:
view.sprites[ self.spritesheet ].render( screen, dest, data[1] )
def get_data( self, view, x, y ):
"""Pre-generate display data for this tile- border frame, wall frame."""
bor = view.calc_border_score( x, y )
if bor == -1:
bor = None
if bor == 14:
wal = None
else:
wal = view.calc_wall_score( x, y )
return (bor,wal)
class DoorTerrain( WallTerrain ):
# A singleton terrain class; use these objects as tokens for maps.
def __init__( self, ident, spritesheet = SPRITE_WALL, block_vision = True, block_walk = True, block_fly = True, frame=0 ):
# ident should be the module-level name of this stat.
self.ident = ident
self.spritesheet = spritesheet
self.block_vision = block_vision
self.block_walk = block_walk
self.block_fly = block_fly
self.frame = frame
def render( self, screen, dest, view, data ):
if data[1] != None:
view.sprites[ self.spritesheet ].render( screen, dest, self.frame + data[1] )
def get_data( self, view, x, y ):
"""Pre-generate display data for this tile- border frame, wall frame."""
bor = view.calc_border_score( x, y )
if bor == -1:
bor = None
if view.space_to_south( x, y ):
wal = 1
else:
wal = 0
return (bor,wal)
class OnTheWallTerrain( SingTerrain ):
def __init__( self, ident, spritesheet = SPRITE_DECOR, block_vision = False, block_walk = False, block_fly = False, frame = 0 ):
# ident should be the module-level name of this stat.
self.ident = ident
self.spritesheet = spritesheet
self.block_vision = block_vision
self.block_walk = block_walk
self.block_fly = block_fly
self.frame = frame
ATTACH_TO_WALL = True
def render( self, screen, dest, view, data ):
view.sprites[ self.spritesheet ].render( screen, dest, self.frame + data )
def get_data( self, view, x, y ):
"""Pre-generate display data for this tile- facing offset."""
if view.space_to_south( x, y ):
return 1
else:
return 0
class OnTheWallVariable( SingTerrain ):
def __init__( self, ident, spritesheet = SPRITE_DECOR, block_vision = False, block_walk = False, block_fly = False, frames = (0,1) ):
# ident should be the module-level name of this stat.
self.ident = ident
self.spritesheet = spritesheet
self.block_vision = block_vision
self.block_walk = block_walk
self.block_fly = block_fly
self.frames = frames
ATTACH_TO_WALL = True
def render( self, screen, dest, view, data ):
view.sprites[ self.spritesheet ].render( screen, dest, data )
def get_data( self, view, x, y ):
"""Pre-generate display data for this tile- facing offset."""
if view.space_to_south( x, y ):
return self.frames[ view.get_pseudo_random() % len( self.frames ) ] + 1
else:
return self.frames[ view.get_pseudo_random() % len( self.frames ) ]
class BedHeadTerrain( SingTerrain ):
def __init__( self, ident, spritesheet = SPRITE_INTERIOR, block_vision = False, block_walk = False, block_fly = False, frame = 0, partner=None ):
# ident should be the module-level name of this stat.
self.ident = ident
self.spritesheet = spritesheet
self.block_vision = block_vision
self.block_walk = block_walk
self.block_fly = block_fly
self.frame = frame
self.partner = partner
def render( self, screen, dest, view, data ):
view.sprites[ self.spritesheet ].render( screen, dest, self.frame + data )
def get_data( self, view, x, y ):
"""Pre-generate display data for this tile- facing offset."""
if view.scene.get_decor( x, y+1 ) == self.partner:
return 1
else:
return 0
class BedFootTerrain( SingTerrain ):
def __init__( self, ident, spritesheet = SPRITE_INTERIOR, block_vision = False, block_walk = False, block_fly = False, frame = 0, partner=None ):
# ident should be the module-level name of this stat.
self.ident = ident
self.spritesheet = spritesheet
self.block_vision = block_vision
self.block_walk = block_walk
self.block_fly = block_fly
self.frame = frame
self.partner = partner
def render( self, screen, dest, view, data ):
view.sprites[ self.spritesheet ].render( screen, dest, self.frame + data )
def get_data( self, view, x, y ):
"""Pre-generate display data for this tile- facing offset."""
if view.scene.get_decor( x, y-1 ) == self.partner:
return 1
else:
return 0
class CrowdTerrain( SingTerrain ):
def __init__( self, ident, spritesheet = SPRITE_GROUND, block_vision = False, block_walk = True, block_fly = True, inner_frames = (0,1), outer_frames=(2,3) ):
# ident should be the module-level name of this stat.
self.ident = ident
self.spritesheet = spritesheet
self.block_vision = block_vision
self.block_walk = block_walk
self.block_fly = block_fly
self.inner_frames = inner_frames
self.outer_frames = outer_frames
def render( self, screen, dest, view, data ):
view.sprites[ self.spritesheet ].render( screen, dest, data )
def get_data( self, view, x, y ):
"""Pre-generate display data for this tile- facing offset."""
if view.space_nearby( x, y ):
return self.outer_frames[ view.get_pseudo_random() % len(self.outer_frames) ]
else:
return self.inner_frames[ view.get_pseudo_random() % len(self.inner_frames) ]
# GROUND | |
= self._create_excel_writer_styles(excel_writer_styles)
def __str__(self):
return f'{self.__class__.__name__}(name={self.name})'
@classmethod
def _create_excel_writer_styles(cls, styles=None):
"""
Sets the styles for the ouput Excel file.
Ensures that at least cls.excel_styles are included in the style dictionary.
Parameters
----------
styles : dict, optional
The input dictionary to override the default styles.
Returns
-------
format_kwargs : dict
The input styles dictionary with any missing keys from
DataSource.excel_styles added.
"""
format_kwargs = styles if styles is not None else {}
for key, value in cls.excel_styles.items():
if key not in format_kwargs:
format_kwargs[key] = value
return format_kwargs
def _validate_target_columns(self):
"""
Ensures that the target columns and function names for each Function are valid.
Raises
------
ValueError
Raises ValueError if the target column for a Function does not exist, or if
two Function objects have the same name. If the target column does not
exist, it can either be due to that target not existing, or that the
functions are calculated in the wrong order.
"""
target_error = ('"{target}" is not an available column for {function} to modify. '
'Check that {self} a) has the correct function order and/or b) has '
'the correct unique_variables specified.')
unique_keys = set(self.unique_variables)
for function in (self.preprocess_functions
+ self.calculation_functions
+ self.sample_summary_functions
+ self.dataset_summary_functions):
# ensure function names are unique
if function.name in unique_keys:
raise ValueError((
f'The name "{function.name}" is associated with two '
f'different objects for {self}, which is not allowed.'
))
# ensure targets exist
for target in function.target_columns:
if target not in unique_keys:
raise ValueError(target_error.format(target=target, self=self,
function=function))
# remove keys for columns removed by PreprocessFunction
if isinstance(function, PreprocessFunction):
for key in function.deleted_columns:
if key in unique_keys:
unique_keys.remove(key)
else:
raise ValueError(target_error.format(target=key, self=self,
function=function))
# ensure columns exist if function modifies columns
elif not isinstance(function.added_columns, int):
for target in function.added_columns:
if target not in unique_keys:
raise ValueError(target_error.format(target=target, self=self,
function=function))
# ensure summary functions either add columns or modify other summary columns
if (isinstance(function, SummaryFunction)
and not isinstance(function.added_columns, int)):
if function.sample_summary:
sum_funcs = [function.name for function in self.sample_summary_functions]
else:
sum_funcs = [function.name for function in self.dataset_summary_functions]
if any(column not in sum_funcs for column in function.added_columns):
raise ValueError((
f'Error with {function}. SummaryFunctions can only modify columns '
'for other SummaryFunctions with matching sample_summary attributes.'
))
unique_keys.add(function.name)
def _create_references(self, dataset, import_values):
"""Creates a dictionary to reference the column indices."""
references = [[] for sample in dataset]
for i, sample in enumerate(dataset):
for j, dataframe in enumerate(sample):
reference = {key: [value] for key, value in import_values[i][j].items()}
start_index = len(dataframe.columns)
for function in self.calculation_functions:
if isinstance(function.added_columns, int):
end_index = start_index + function.added_columns
reference[function.name] = list(range(start_index, end_index))
for num in range(start_index, end_index):
dataframe[num] = pd.Series(np.nan, dtype=np.float32)
start_index = end_index
else:
reference[function.name] = []
for target in function.added_columns:
reference[function.name].extend(reference[target])
references[i].append(reference)
return references
def _add_summary_dataframes(self, dataset, references):
"""Adds the dataframes for summary functions and creates references for them."""
if self.sample_summary_functions:
for reference in references:
reference.append({})
for i, sample in enumerate(dataset):
start_index = 0
data = {}
for function in self.sample_summary_functions:
if isinstance(function.added_columns, int):
end_index = start_index + function.added_columns
references[i][-1][function.name] = list(range(start_index, end_index))
for num in range(start_index, end_index):
data[num] = np.nan
start_index = end_index
else:
references[i][-1][function.name] = []
for target in function.added_columns:
references[i][-1][function.name].extend(
references[i][-1][target]
)
sample.append(pd.DataFrame(data, index=[0], dtype=np.float32))
if self.dataset_summary_functions:
references.append([{}])
start_index = 0
data = {}
for function in self.dataset_summary_functions:
if isinstance(function.added_columns, int):
end_index = start_index + function.added_columns
references[-1][-1][function.name] = list(
range(start_index, end_index)
)
for num in range(start_index, end_index):
data[num] = np.nan
start_index = end_index
else:
references[-1][-1][function.name] = []
for target in function.added_columns:
references[-1][-1][function.name].extend(
references[-1][-1][target]
)
dataset.append([pd.DataFrame(data, index=[0], dtype=np.float32)])
def _merge_references(self, dataframes, references):
"""Merges all the references for the merged dataframe."""
functions = (self.calculation_functions
+ self.sample_summary_functions
+ self.dataset_summary_functions)
merged_references = []
for i, dataset in enumerate(dataframes):
start_index = 0
merged_reference = {
key: [] for key in (self.unique_variables + [func.name for func in functions])
}
for j, sample in enumerate(dataset):
for key in merged_reference:
merged_reference[key].append([])
for k, entry in enumerate(sample):
for key, value in references[i][j][k].items():
merged_reference[key][j].extend([index + start_index for index in value])
start_index += len(entry.columns)
merged_references.append(merged_reference)
return merged_references
def _set_references(self, dataframes, import_values):
"""
Creates a dictionary to reference the column indices for calculations.
Also adds the necessary columns to the input dataframes for all calculations,
creates dataframes for the SummaryCalculations, and adds spacing between
samples and entries.
Assigns the merged references to the references attribute.
Parameters
----------
dataframes : list(list(list(pd.DataFrame)))
A list of lists of lists of dataframes.
import_values : list
A list of lists of dictionaries containing the values used to import the data
from files. The relevant keys are the DataSource's unique variables
"""
# create references, add summary dataframes, and add in empty spacing columns
references = []
for i, dataset in enumerate(dataframes):
reference = self._create_references(dataset, import_values[i])
self._add_summary_dataframes(dataset, reference)
references.append(reference)
# add entry spacings
for sample in dataset:
for k, entry in enumerate(sample):
if k < len(sample) - 1:
start_index = len(entry.columns)
for num in range(start_index, start_index + self.entry_separation):
entry[num] = pd.Series(np.nan, dtype=np.float16)
# add sample spacings
start_index = len(sample[-1].columns)
for num in range(start_index, start_index + self.sample_separation):
sample[-1][num] = pd.Series(np.nan, dtype=np.float16)
# merges the references into one for each dataset
self.references = self._merge_references(dataframes, references)
self._added_separators = True
def _do_preprocessing(self, dataframes, import_values):
"""
Performs the function for all PreprocessFunctions.
Parameters
----------
dataframes : list(list(list(pd.DataFrame)))
A list of lists of lists of dataframes.
import_values : list
A list of lists of dictionaries containing the values used to import the data
from files. The relevant keys are the DataSource's unique variables
Returns
-------
new_dataframes : list(list(list(pd.DataFrame)))
The list of lists of lists of dataframes, after performing the
preprocessing.
new_import_values : list
The list of lists of dictionaries containing the values used to
import the data from files, after performing the
preprocessing.
"""
new_dataframes = []
new_import_values = []
for i, dataset in enumerate(dataframes):
for function in self.preprocess_functions:
dataset, import_values[i] = function._preprocess_data(
dataset, import_values[i]
)
new_dataframes.append(dataset)
new_import_values.append(import_values[i])
self._remove_unneeded_variables()
return new_dataframes, new_import_values
def _remove_unneeded_variables(self):
"""Removes unique variables that are not needed for processing."""
for function in self.preprocess_functions:
for variable in function.deleted_columns:
if variable in self.unique_variables:
variable_index = self.unique_variables.index(variable)
column_index = self.unique_variable_indices[variable_index]
for i, col_num in enumerate(self.unique_variable_indices):
if col_num > column_index:
self.unique_variable_indices[i] -= 1
self.unique_variables.pop(variable_index)
self.unique_variable_indices.pop(variable_index)
self.column_numbers.pop(column_index)
self._deleted_columns = 0
def merge_datasets(self, dataframes):
"""
Merges all entries and samples into one dataframe for each dataset.
Also sets the length attribute, which will later be used to separate each
dataframes back into individual dataframes for each entry.
Parameters
----------
dataframes : list(list(list(pd.DataFrame)))
A nested list of list of lists of dataframes.
Returns
-------
merged_dataframes : list(pd.DataFrame)
A list of dataframes.
"""
merged_dataframes = []
# length of each individual entry for splitting later
lengths = [[[] for sample in dataset] for dataset in dataframes]
for i, dataset in enumerate(dataframes):
for j, sample in enumerate(dataset):
lengths[i][j] = [len(entry.columns) for entry in sample]
# merges all dataframes in the dataset using generators
dataset_dataframe = pd.concat(
(pd.concat((entry for entry in sample), axis=1) for sample in dataset),
axis=1
)
dataset_dataframe.columns = list(range(len(dataset_dataframe.columns)))
merged_dataframes.append(dataset_dataframe)
self.lengths = lengths
return merged_dataframes
def _do_functions(self, dataframes, index):
"""
Performs the function for all CalculationFunctions and SummaryFunctions.
Parameters
----------
dataframes : list(pd.DataFrame)
A list of dataframes, one per dataset.
index : int
If index is 0, will perform the Excel functions; if index is 1, will
perform the python functions.
Returns
-------
dataframes : list(pd.DataFrame)
The list of dataframes after processing.
Notes
-----
The start row is set to self.excel_row_offset + 3 since openpyxl is 1-based
and there are two header rows. The start column is also set to
self.excel_column_offset + 1 since openpyxl is 1-based.
All dataframes are overwritten for each processing step so that no copies
are made.
"""
functions = (self.calculation_functions + self.sample_summary_functions
+ self.dataset_summary_functions)
first_column = self.excel_column_offset + 1
for i, dataset in enumerate(dataframes):
if index == 1:
excel_columns = None
else:
excel_columns = [
utils.excel_column_name(num) for num in range(first_column, len(dataset.columns) + first_column)
]
for function in functions:
dataset = function._do_function(
dataset, self.references[i], index, | |
# Miscellaneous functions used to search collect lyrics for all Billboard
# Hot 100 number one songs from 1959 - 2011
# Lyric data scraped from lyricsfly.com where available
from django.conf import settings
from django.template import Template, Context
from gdutils.scrape import post, get
import BeautifulSoup as BS # next time, just use html5lib from the start =/
import html5lib as hl
import codecs
import datetime
import re
import sys
import time
import urllib, urllib2
UASTRING = "GreaterDebater Crawler - report abuse to <EMAIL>"
YEARS = range(1959, 2012)
MONTHS = ['January', 'February', 'March', 'April', 'May', 'June', 'July', \
'August', 'September', 'October', 'November', 'December']
SEARCH_URL = 'http://lyricsfly.com/search/search.php'
RESULT_URL = 'http://lyricsfly.com/search/'
ERR_TEMPLATE = 'errtemplate.xml'
ERR_FILE = '/home/gabe/python/selfishmusic/errors.xml'
DATA_PATH = '/home/gabe/data/music/'
def make_urls():
"""generate the wikipedia urls for the pages of lists of number one singles from 1959 - 2011"""
urlfile = open('/home/gabe/data/music/urls.txt', 'w')
urls = ["http://en.wikipedia.org/wiki/List_of_Hot_100_number-one_singles_of_%i_(U.S.)\n" % year for year in range(1959, 2012)]
for url in urls:
urlfile.write(url)
urlfile.close()
def get_text(cell):
""" get stripped text from a BeautifulSoup td object"""
return ''.join([x.strip() + ' ' for x in cell.findAll(text=True)]).strip()
def read_wiki():
"""read the saved wikipedia pages to get song titles, artist, and first date they hit #1"""
base_path = '/home/gabe/data/music/'
songdata = []
for year in range(1959, 2012):
filename = "List_of_Hot_100_number-one_singles_of_%i_(U.S.)" % year
f = open(base_path + filename)
soup = BS.BeautifulSoup(f.read())
if year < 2010:
table = soup.table
else:
table = soup.find('table', {'class': 'wikitable'})
# remove superscripts
ss = table.findAll('sup')
[s.extract() for s in ss]
for row in table.findAll('tr'):
song = {}
cells = row.findAll('td')
try:
txt = get_text(cells[0])
if txt.find("Issue") != -1:
continue
song['date'] = get_text(cells[0]) + ', ' + str(year)
song['title'] = get_text(cells[1])
song['artist'] = get_text(cells[2])
songdata.append(song)
except IndexError:
pass
# remove duplicates
checked = {}
for song in songdata:
date = [x.strip(' ,').encode('ascii', 'ignore') for x in song['date'].split(' ')]
song['date'] = datetime.date(month=MONTHS.index(date[0]) + 1,
day=int(date[1]),
year=int(date[2]))
song['title'] = song['title'].strip('" ')
checked[(song['artist'], song['title'])] = song
songs = checked.values()
songs.sort(key=lambda x: x['date'])
return songs
def save_songs(songs, template="songtemplate.xml", ofile="songs.xml"):
""" save songs to xml file """
try:
settings.configure(DEBUG=True,
TEMPLATE_DEBUG=True,
TEMPLATE_DIRS='/home/gabe/python/selfishmusic/templates/',
DATE_FORMAT='F d, Y')
except RuntimeError:
# running in interpreter and have already loaded settings
pass
song_t_file = open(template)
song_t = Template(song_t_file.read())
song_c = Context({'songs': songs})
# outfile = open(DATA_PATH + ofile, 'w')
outfile = codecs.open(DATA_PATH + ofile,
encoding='utf-8', mode='w')
outfile.write(song_t.render(song_c))
outfile.close()
print "Wrote %i songs to file %s" % (len(songs), ofile)
def read_songs(filename='songs.xml'):
""" read song data from xml file to a list of dictionaries """
songfile = open(DATA_PATH + filename)
soup = BS.BeautifulSoup(songfile.read(), convertEntities=BS.BeautifulSoup.ALL_ENTITIES)
songsxml = soup.findAll('song')
songs = []
for song in songsxml:
sd = {}
sd['title'] = get_text(song.title)
sd['artist'] = get_text(song.artist)
date = get_text(song.date)
date = [x.strip(' ,') for x in date.split(' ')]
sd['date'] = datetime.date(month=MONTHS.index(date[0]) + 1,
day=int(date[1]),
year=int(date[2]))
sd['lyrics'] = get_text(song.lyrics)
sd['found_title'] = get_text(song.found_title)
sd['found_artist'] = get_text(song.found_artist)
try:
sd['mr'] = float(get_text(song.mr))
except:
pass
songs.append(sd)
songfile.close()
return songs
def search_songs(search, songs, field):
results = []
for song in songs:
if search.lower() in song[field].lower():
results.append(song)
for index, song in enumerate(results):
print "%i: %s by %s" % (index, song['title'], song['artist'])
return results
def read_err_songs():
""" read song data from xml file to a list of dictionaries """
songfile = open('/home/gabe/python/selfishmusic/errors.xml')
soup = BS.BeautifulSoup(songfile.read())
songsxml = soup.findAll('song')
songs = []
for song in songsxml:
sd = {}
sd['songnum'] = int(get_text(song.songnum))
sd['title'] = get_text(song.title)
sd['artist'] = get_text(song.artist)
date = get_text(song.date)
date = [x.strip(' ,') for x in date.split(' ')]
sd['date'] = datetime.date(month=MONTHS.index(date[0]) + 1,
day=int(date[1]),
year=int(date[2]))
sd['lyrics'] = get_text(song.lyrics)
sd['found_title'] = get_text(song.found_title)
sd['found_artist'] = get_text(song.found_artist)
songs.append(sd)
songfile.close()
return songs
def save_err_songs(indexes, songs):
allbad = [songs[i] for i in indexes]
save_songs(allbad, template=ERR_TEMPLATE, ofile=ERR_FILE)
def get_search_results(song, options=1, sort=3, aside=False):
""" retrive the list of rows from the search results table for a given song lyrics"""
# search lyricsfly by song title
title = song['title']
if aside and ('/' in title):
# if aside is true the title is both a and b sides of a single
# split on the slash to search for the a-side only
title = title.split('/')[0].strip()
postdata = {'keywords': title.encode('ascii', errors='ignore'), 'options':options, 'sort':sort}
def search(url):
response = post(url, urllib.urlencode(postdata))
try:
soup = BS.BeautifulSoup(response, convertEntities=BS.BeautifulSoup.HTML_ENTITIES)
except TypeError:
soup = hl.parse(response, treebuilder='beautifulsoup')
cell = soup.find('td', {'class': 'list'})
if not cell:
return -1, -1
results_table = cell.parent.parent
rows = results_table.findAll('tr')
return rows, soup
rows, soup = search(SEARCH_URL)
if rows == -1: return -1
# check for a second page of results
# This should be more general, for n pages, but I think lyricsfly
# only ever returns 2 pages at most.
page2 = soup.find('form', {'name': 'search2'})
url2 = SEARCH_URL + '?page=2'
if page2:
# wait 1.1 seconds before requesting next page
time.sleep(1.1)
rows2, soup = search(url2)
if rows2 == -1:
return rows
rows.extend(rows2)
return rows
def get_lyrics(song, rows):
"""find the best match for title and artist among a list of rows from the search results table
then retrieve the lyrics from the url in that row"""
best_artist = 1
best_title = 1
best_index = -1
song_artist_words = song['artist'].split(' ')
song_title_words = song['title'].split(' ')
# titles and artists may not match up exactly
# pick the result that has the most words in common for both
# the artist and the title
for i, row in enumerate(rows):
cells = row.findAll('td')
row_artist_words = get_text(cells[0])
row_title_words = get_text(cells[1])
artist_count = 0
title_count = 0
for word in song_artist_words:
if word in row_artist_words:
artist_count += 1
for word in song_title_words:
if word in row_title_words:
title_count += 1
if artist_count >= best_artist and title_count >= best_title:
best_index = i
best_artist = artist_count
best_title = title_count
if best_index == -1:
return best_index, -1, -1
lyrics_url = RESULT_URL + rows[best_index].findAll('td')[1].a['href']
print lyrics_url
found_title = get_text(rows[best_index].findAll('td')[1])
found_artist = get_text(rows[best_index].findAll('td')[0])
lyrics_page = get(lyrics_url)
try:
soup = BS.BeautifulSoup(lyrics_page, convertEntities=BS.BeautifulSoup.HTML_ENTITIES)
except TypeError:
soup = hl.parse(lyrics_page, treebuilder='beautifulsoup')
span = soup.findAll('span', {'class':'adbriteinline'})[0]
# remove linebreaks
br = soup.findAll('br')
[b.extract() for b in br]
try:
lyrics = ''.join(span.contents)
except TypeError:
lyrics = ''.join(soup.p.contents)
if not lyrics:
lyrics = ''.join(soup.p.contents)
lyrics = lyrics.replace(u"\ufffd", '')
return lyrics, found_title, found_artist
def get_all_lyrics(songs):
"""loop through all songs and search lyricsfly for lyrics"""
for songnum, song in enumerate(songs):
print "Song %i getting lyrics for %s by %s" % (songnum, song['title'], song['artist'])
try:
rows = get_search_results(song)
if rows == -1:
print "\tSong %i No results for %s by %s" % (songnum, song['title'], song['artist'])
song['lyrics'] = 'Not found'
continue
lyrics, found_title, found_artist = get_lyrics(song, rows)
time.sleep(1.1)
if lyrics == -1:
print "\t Song %i No match for %s by %s" % (songnum, song['title'], song['artist'])
song['lyrics'] = "No match"
continue
song['lyrics'] = lyrics
song['found_artist'] = found_artist
song['found_title'] = found_title
print "\tFound %s by %s -- saving lyrics" % (found_title, found_artist)
time.sleep(1.1)
except:
print "\t Song %i Error fetching lyrics for %s by %s - skipping" % (songnum, song['title'], song['artist'])
song['lyrics'] = "Error"
return songs
def find_errors(songs):
notfound = []
nomatch = []
err = []
empty = []
for index, song in enumerate(songs):
song['songnum'] = index
if song['lyrics'] == 'Not found':
notfound.append(index)
elif song['lyrics'] == 'No match':
nomatch.append(index)
elif song['lyrics'] == 'Error':
err.append(index)
elif song['lyrics'] == '':
empty.append(index)
return notfound, nomatch, err, empty
def fix_empties(empties, songs, aside=False):
"""retry search for lyrics to songs where none were found or there was an error"""
for e in empties:
songnum = e
print "Song %i getting lyrics for %s by %s" % (songnum, songs[e]['title'], songs[e]['artist'])
if aside and (not '/' in songs[e]['title']): continue
try:
rows = get_search_results(songs[e], aside=aside)
if rows == -1:
print "\tSong %i No results for %s by %s" % (songnum, title, songs[e]['artist'])
songs[e]['lyrics'] = 'Not found'
continue
lyrics, found_title, found_artist = get_lyrics(songs[e], rows)
time.sleep(1.1)
if lyrics == -1:
print "\t Song %i No match for %s by %s" % (songnum, songs[e]['title'], songs[e]['artist'])
songs[e]['lyrics'] = "No match"
continue
songs[e]['lyrics'] = lyrics
songs[e]['found_artist'] = found_artist
songs[e]['found_title'] = found_title
print "\tFound %s by %s -- saving lyrics" % (found_title, found_artist)
time.sleep(1.1)
except:
print "\t Song %pi Error fetching lyrics for %s by %s - skipping" % (songnum, songs[e]['title'], songs[e]['artist'])
songs[e]['lyrics'] | |
"""
Cadastre - Dialog classeas
This plugins helps users to import the french land registry ('cadastre')
into a database. It is meant to ease the use of the data in QGIs
by providing search tools and appropriate layer symbology.
begin : 2013-06-11
copyright : (C) 2013,2019 by 3liz
email : <EMAIL>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import os
import re
import sys
from pathlib import Path
from typing import Any, Dict, List, Union
from db_manager.db_plugins.plugin import BaseError, ConnectionError
from db_manager.db_plugins.postgis.connector import (
DBConnector,
PostGisDBConnector,
)
from qgis.core import (
Qgis,
QgsDataSourceUri,
QgsMapLayer,
QgsMessageLog,
QgsProject,
)
from qgis.PyQt.QtCore import QObject
def hasSpatialiteSupport() -> bool:
"""
Check whether or not
spatialite support is ok
"""
try:
from db_manager.db_plugins.spatialite.connector import ( # NOQA
SpatiaLiteDBConnector,
)
return True
except ImportError:
return False
pass
def openFile(filename: str) -> None:
"""
Opens a file with default system app
"""
import subprocess
if sys.platform == "win32":
os.startfile(filename)
else:
opener = "open" if sys.platform == "darwin" else "xdg-open"
subprocess.call([opener, filename])
def getLayerFromLegendByTableProps(project: QgsProject, tableName: str, geomCol: str = 'geom',
sql: str = '') -> QgsMapLayer:
"""
Get the layer from QGIS legend
corresponding to a database
table name (postgis or sqlite)
"""
import re
layer = None
lr = project
for lid, l in list(lr.mapLayers().items()):
if not hasattr(l, 'providerType'):
continue
if hasattr(l, 'type') and l.type() != QgsMapLayer.VectorLayer:
# Ignore this layer as it's not a vector
# QgsMapLayer.VectorLayer is an equivalent to QgsMapLayerType.VectorLayer since 3.8
continue
if not l.providerType() in ('postgres', 'spatialite'):
# Ignore this layer as it's not a postgres or spatialite vector layer
continue
connectionParams = getConnectionParameterFromDbLayer(l)
reg = r'(\.| )?(%s)' % tableName
if connectionParams and \
( \
connectionParams['table'] == tableName or \
(re.findall(reg, '%s' % connectionParams['table']) and
re.findall(reg, '%s' % connectionParams['table'])[0]) \
) and \
connectionParams['geocol'] == geomCol:
# and connectionParams['sql'] == sql:
return l
return layer
def getConnectionParameterFromDbLayer(layer: QgsMapLayer) -> Dict[str, str]:
"""
Get connection parameters
from the layer datasource
"""
connectionParams = None
if layer.providerType() == 'postgres':
dbType = 'postgis'
else:
dbType = 'spatialite'
src = layer.source()
uri = QgsDataSourceUri(src)
# TODO Use immutable namedtuple
connectionParams = {
'service': uri.service(),
'dbname': uri.database(),
'host': uri.host(),
'port': uri.port(),
'user': uri.username(),
'password': <PASSWORD>.password(),
'sslmode': uri.sslMode(),
'key': uri.keyColumn(),
'estimatedmetadata': str(uri.useEstimatedMetadata()),
'checkPrimaryKeyUnicity': '',
'srid': uri.srid(),
'type': uri.wkbType(),
'schema': uri.schema(),
'table': uri.table(),
'geocol': uri.geometryColumn(),
'sql': uri.sql(),
'dbType': dbType
}
return connectionParams
def setSearchPath(sql: str, schema: str) -> str:
"""
Set the search_path parameters if postgis database
"""
prefix = 'SET search_path = "%s", public, pg_catalog;' % schema
if re.search('^BEGIN;', sql):
sql = sql.replace('BEGIN;', 'BEGIN;%s' % prefix)
else:
sql = prefix + sql
return sql
def fetchDataFromSqlQuery(connector: 'DBConnector',
sql: str, schema: str = None) -> List[Any]:
"""
Execute a SQL query and
return [header, data, rowCount]
NB: commit qgis/QGIS@14ab5eb changes QGIS DBmanager behaviour
"""
# print(sql)
data = []
header = []
rowCount = 0
c = None
ok = True
# print "run query"
try:
c = connector._execute(None, str(sql))
data = []
header = connector._get_cursor_columns(c)
if header is None:
header = []
if len(header) > 0:
data = connector._fetchall(c)
rowCount = len(data)
except BaseError as e:
ok = False
QgsMessageLog.logMessage(
"Error while fetching data from database : {}".format(str(e.msg)),
"cadastre",
Qgis.Critical
)
QgsMessageLog.logMessage(sql, "cadastre", Qgis.Info)
finally:
if c:
# print "close connection"
c.close()
del c
# TODO: Return tuple
return [header, data, rowCount, ok]
def getConnectorFromUri(connectionParams: Dict[str, str]) -> 'DBConnector':
"""
Set connector property
for the given database type
and parameters
"""
connector = None
uri = QgsDataSourceUri()
if connectionParams['dbType'] == 'postgis':
if connectionParams['host']:
uri.setConnection(
connectionParams['host'],
connectionParams['port'],
connectionParams['dbname'],
connectionParams['user'],
connectionParams['password']
)
if connectionParams['service']:
uri.setConnection(
connectionParams['service'],
connectionParams['dbname'],
connectionParams['user'],
connectionParams['password']
)
if Qgis.QGIS_VERSION_INT >= 31200:
# we need a fake DBPlugin object
# with connectionName and providerName methods
obj = QObject()
obj.connectionName = lambda: 'fake'
obj.providerName = lambda: 'postgres'
connector = PostGisDBConnector(uri, obj)
else:
connector = PostGisDBConnector(uri)
if connectionParams['dbType'] == 'spatialite':
uri.setConnection('', '', connectionParams['dbname'], '', '')
if hasSpatialiteSupport():
from db_manager.db_plugins.spatialite.connector import (
SpatiaLiteDBConnector,
)
# Il y a bug évident ici si il n'y pas le support spatialite, quid de SpatiaLiteDBConnector ?
try:
connector = SpatiaLiteDBConnector(uri)
except ConnectionError as e:
QgsMessageLog.logMessage(
"Erreur lors de la récupération du fichier SQLite : {}".format(str(e)),
'cadastre',
Qgis.Critical)
return connector
def postgisToSpatialite(sql: str, targetSrid: str = '2154') -> str:
"""
Convert postgis SQL statement
into spatialite compatible
statements
"""
# delete some incompatible options
# replace other by spatialite syntax
replaceDict = [
# delete
{'in': r'with\(oids=.+\)', 'out': ''},
{'in': r'comment on [^;]+;', 'out': ''},
{'in': r'alter table ([^;]+) add primary key( )+\(([^;]+)\);',
'out': r'create index idx_\1_\3 on \1 (\3);'},
{'in': r'alter table ([^;]+) add constraint [^;]+ primary key( )+\(([^;]+)\);',
'out': r'create index idx_\1_\3 on \1 (\3);'},
{'in': r'alter table [^;]+drop column[^;]+;', 'out': ''},
{'in': r'alter table [^;]+drop constraint[^;]+;', 'out': ''},
# ~ {'in': r'^analyse [^;]+;', 'out': ''},
# replace
{'in': r'truncate (bati|fanr|lloc|nbat|pdll|prop)',
'out': r'drop table \1;create table \1 (tmp text)'},
{'in': r'truncate ', 'out': 'delete from '},
{'in': r'distinct on *\([a-z, ]+\)', 'out': 'distinct'},
{'in': r'serial', 'out': 'INTEGER PRIMARY KEY AUTOINCREMENT'},
{'in': r'string_agg', 'out': 'group_concat'},
{'in': r'current_schema::text, ', 'out': ''},
{'in': r'substring', 'out': 'SUBSTR'},
{'in': r"(to_char\()([^']+) *, *'[09]+' *\)", 'out': r"CAST(\2 AS TEXT)"},
{'in': r"(to_number\()([^']+) *, *'[09]+' *\)", 'out': r"CAST(\2 AS float)"},
{'in': r"(to_date\()([^']+) *, *'DDMMYYYY' *\)",
'out': r"date(substr(\2, 5, 4) || '-' || substr(\2, 3, 2) || '-' || substr(\2, 1, 2))"},
{'in': r"(to_date\()([^']+) *, *'DD/MM/YYYY' *\)",
'out': r"date(substr(\2, 7, 4) || '-' || substr(\2, 4, 2) || '-' || substr(\2, 1, 2))"},
{'in': r"(to_date\()([^']+) *, *'YYYYMMDD' *\)",
'out': r"date(substr(\2, 1, 4) || '-' || substr(\2, 5, 2) || '-' || substr(\2, 7, 2))"},
{'in': r"(to_char\()([^']+) *, *'dd/mm/YYYY' *\)",
'out': r"strftime('%d/%m/%Y', \2)"},
{'in': r"ST_MakeValid\(geom\)",
'out': r"CASE WHEN ST_IsValid(geom) THEN geom ELSE ST_Buffer(geom,0) END"},
{'in': r"ST_MakeValid\(p\.geom\)",
'out': r"CASE WHEN ST_IsValid(p.geom) THEN p.geom ELSE ST_Buffer(p.geom,0) END"},
{'in': r' ~ ', 'out': ' regexp '}
]
for a in replaceDict:
r = re.compile(a['in'], re.IGNORECASE | re.MULTILINE)
sql = r.sub(a['out'], sql)
# index spatiaux
r = re.compile(r'(create index [^;]+ ON )([^;]+)( USING +)(gist +)?\(([^;]+)\);', re.IGNORECASE | re.MULTILINE)
sql = r.sub(r"SELECT createSpatialIndex('\2', '\5');", sql)
# replace postgresql "update from" statement
r = re.compile(r'(update [^;=]+)(=)([^;=]+ FROM [^;]+)(;)', re.IGNORECASE | re.MULTILINE)
sql = r.sub(r'\1=(SELECT \3);', sql)
return sql
def postgisToSpatialiteLocal10(sql: str, dataYear: str) -> str:
# majic formatage : replace multiple column update for loca10
r = re.compile(r'update local10 set[^;]+;', re.IGNORECASE | re.MULTILINE)
res = r.findall(sql)
replaceBy = ''
for statement in res:
replaceBy = '''
DROP TABLE IF EXISTS ll;
CREATE TABLE ll AS
SELECT DISTINCT l.invar, l.ccopre , l.ccosec, l.dnupla, l.ccoriv, l.ccovoi, l.dnvoiri,
l10.ccodep || l10.ccodir || l10.invar AS local00,
REPLACE(l10.ccodep || l10.ccodir || l10.ccocom || l.ccopre || l.ccosec || l.dnupla,' ', '0') AS parcelle,
REPLACE(l10.ccodep || l10.ccodir || l10.ccocom || l.ccovoi,' ', '0') AS voie
FROM local00 l
INNER JOIN local10 AS l10 ON l.invar = l10.invar AND l.annee = l10.annee
WHERE l10.annee='?';
CREATE INDEX idx_ll_invar ON ll (invar);
UPDATE local10 SET ccopre = (SELECT DISTINCT ll.ccopre FROM ll WHERE ll.invar = local10.invar)
WHERE local10.annee = '?';
UPDATE local10 SET ccosec = (SELECT DISTINCT ll.ccosec FROM ll WHERE ll.invar = local10.invar)
WHERE local10.annee = '?';
UPDATE local10 SET dnupla = (SELECT DISTINCT ll.dnupla FROM ll WHERE ll.invar = local10.invar)
WHERE local10.annee = '?';
UPDATE local10 SET ccoriv = (SELECT DISTINCT ll.ccoriv FROM ll WHERE ll.invar = local10.invar)
WHERE local10.annee = '?';
UPDATE local10 SET ccovoi = (SELECT DISTINCT ll.ccovoi FROM ll WHERE ll.invar = local10.invar)
WHERE local10.annee = '?';
UPDATE local10 SET dnvoiri = (SELECT DISTINCT ll.dnvoiri FROM ll WHERE ll.invar = local10.invar)
WHERE local10.annee = '?';
UPDATE local10 SET local00 = (SELECT DISTINCT ll.local00 FROM ll WHERE ll.invar = local10.invar)
WHERE local10.annee = '?';
UPDATE local10 SET parcelle = (SELECT DISTINCT ll.parcelle FROM ll WHERE ll.invar = local10.invar)
WHERE local10.annee = '?';
UPDATE local10 SET voie = (SELECT DISTINCT ll.voie FROM ll | |
Returns: true.
"""
pass
def IsControlElementCore(self, *args): #cannot find CLR method
"""
IsControlElementCore(self: UIElementAutomationPeer) -> bool
Gets or sets a value that indicates whether the System.Windows.UIElement that
is associated with this System.Windows.Automation.Peers.UIElementAutomationPeer
is understood by the end user as interactive. Optionally, the user might
understand the System.Windows.UIElement as contributing to the logical
structure of the control in the GUI. This method is called by
System.Windows.Automation.Peers.AutomationPeer.IsControlElement.
Returns: true.
"""
pass
def IsEnabledCore(self, *args): #cannot find CLR method
"""
IsEnabledCore(self: UIElementAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer
can accept keyboard focus. This method is called by
System.Windows.Automation.Peers.AutomationPeer.IsKeyboardFocusable.
Returns: A boolean that contains the value of System.Windows.UIElement.IsEnabled.
"""
pass
def IsKeyboardFocusableCore(self, *args): #cannot find CLR method
"""
IsKeyboardFocusableCore(self: UIElementAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer
can accept keyboard focus. This method is called by
System.Windows.Automation.Peers.AutomationPeer.IsKeyboardFocusable.
Returns: true if the element is focusable by the keyboard; otherwise false.
"""
pass
def IsOffscreenCore(self, *args): #cannot find CLR method
"""
IsOffscreenCore(self: UIElementAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer is
off the screen. This method is called by
System.Windows.Automation.Peers.AutomationPeer.IsOffscreen.
Returns: true if the element is not on the screen; otherwise, false.
"""
pass
def IsPasswordCore(self, *args): #cannot find CLR method
"""
IsPasswordCore(self: UIElementAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer
contains protected content. This method is called by
System.Windows.Automation.Peers.AutomationPeer.IsPassword.
Returns: false.
"""
pass
def IsRequiredForFormCore(self, *args): #cannot find CLR method
"""
IsRequiredForFormCore(self: UIElementAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer is
required to be completed on a form. This method is called by
System.Windows.Automation.Peers.AutomationPeer.IsRequiredForForm.
Returns: A boolean that contains the value that is returned by
System.Windows.Automation.AutomationProperties.GetIsRequiredForForm(System.Windo
ws.DependencyObject), if it's set; otherwise false.
"""
pass
def PeerFromProvider(self, *args): #cannot find CLR method
"""
PeerFromProvider(self: AutomationPeer, provider: IRawElementProviderSimple) -> AutomationPeer
Gets an System.Windows.Automation.Peers.AutomationPeer for the specified
System.Windows.Automation.Provider.IRawElementProviderSimple proxy.
provider: The class that implements
System.Windows.Automation.Provider.IRawElementProviderSimple.
Returns: The System.Windows.Automation.Peers.AutomationPeer.
"""
pass
def ProviderFromPeer(self, *args): #cannot find CLR method
"""
ProviderFromPeer(self: AutomationPeer, peer: AutomationPeer) -> IRawElementProviderSimple
Gets the System.Windows.Automation.Provider.IRawElementProviderSimple for the
specified System.Windows.Automation.Peers.AutomationPeer.
peer: The automation peer.
Returns: The proxy.
"""
pass
def SetFocusCore(self, *args): #cannot find CLR method
"""
SetFocusCore(self: UIElementAutomationPeer)
Sets the keyboard input focus on the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer.
This method is called by
System.Windows.Automation.Peers.AutomationPeer.SetFocus.
"""
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, owner):
""" __new__(cls: type, owner: TabControl) """
pass
IsHwndHost = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value that indicates whether the element that is associated with this System.Windows.Automation.Peers.AutomationPeer hosts hwnds in Windows Presentation Foundation (WPF).
"""
IsVirtualized = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class TabItemAutomationPeer(SelectorItemAutomationPeer, IVirtualizedItemProvider, ISelectionItemProvider):
"""
Exposes System.Windows.Controls.TabItem types to UI Automation.
TabItemAutomationPeer(owner: object, tabControlAutomationPeer: TabControlAutomationPeer)
"""
def GetAcceleratorKeyCore(self, *args): #cannot find CLR method
"""
GetAcceleratorKeyCore(self: ItemAutomationPeer) -> str
Gets the accelerator key for the System.Windows.UIElement that corresponds to
the data item in the System.Windows.Controls.ItemsControl.Items collection that
is associated with this System.Windows.Automation.Peers.ItemAutomationPeer.
Returns: The accelerator key.
"""
pass
def GetAccessKeyCore(self, *args): #cannot find CLR method
"""
GetAccessKeyCore(self: ItemAutomationPeer) -> str
Gets the access key for the System.Windows.UIElement that corresponds to the
data item in the System.Windows.Controls.ItemsControl.Items collection that is
associated with this System.Windows.Automation.Peers.ItemAutomationPeer.
Returns: The access key.
"""
pass
def GetAutomationControlTypeCore(self, *args): #cannot find CLR method
"""
GetAutomationControlTypeCore(self: TabItemAutomationPeer) -> AutomationControlType
Gets the control type for the System.Windows.Controls.TabItem that is
associated with this System.Windows.Automation.Peers.TabItemAutomationPeer.
Called by
System.Windows.Automation.Peers.AutomationPeer.GetAutomationControlType.
Returns: System.Windows.Automation.Peers.AutomationControlType.TabItem
"""
pass
def GetAutomationIdCore(self, *args): #cannot find CLR method
"""
GetAutomationIdCore(self: ItemAutomationPeer) -> str
Gets the string that uniquely identifies the System.Windows.UIElement that
corresponds to the data item in the System.Windows.Controls.ItemsControl.Items
collection that is associated with this
System.Windows.Automation.Peers.ItemAutomationPeer.
Returns: A string that contains the UI Automation�identifier.
"""
pass
def GetBoundingRectangleCore(self, *args): #cannot find CLR method
"""
GetBoundingRectangleCore(self: ItemAutomationPeer) -> Rect
Gets the System.Windows.Rect that represents the bounding rectangle of the
specified System.Windows.UIElement.
Returns: The bounding rectangle.
"""
pass
def GetChildrenCore(self, *args): #cannot find CLR method
"""
GetChildrenCore(self: TabItemAutomationPeer) -> List[AutomationPeer]
Gets the collection of child elements of the System.Windows.Controls.TabItem
that is associated with this
System.Windows.Automation.Peers.TabItemAutomationPeer. Called by
System.Windows.Automation.Peers.AutomationPeer.GetChildren.
Returns: The collection of child elements.
"""
pass
def GetClassNameCore(self, *args): #cannot find CLR method
"""
GetClassNameCore(self: TabItemAutomationPeer) -> str
Gets the name of the System.Windows.Controls.TabItem that is associated with
this System.Windows.Automation.Peers.TabItemAutomationPeer. Called by
System.Windows.Automation.Peers.AutomationPeer.GetClassName.
Returns: A string that contains "TabItem".
"""
pass
def GetClickablePointCore(self, *args): #cannot find CLR method
"""
GetClickablePointCore(self: ItemAutomationPeer) -> Point
Gets a System.Windows.Point that represents the clickable space that is on the
specified System.Windows.UIElement.
Returns: The point that represents the clickable space that is on the specified
System.Windows.UIElement.
"""
pass
def GetHelpTextCore(self, *args): #cannot find CLR method
"""
GetHelpTextCore(self: ItemAutomationPeer) -> str
Gets the string that describes the functionality of the
System.Windows.UIElement that corresponds to the data item in the
System.Windows.Controls.ItemsControl.Items collection that is associated with
this System.Windows.Automation.Peers.ItemAutomationPeer.
Returns: The help text.
"""
pass
def GetHostRawElementProviderCore(self, *args): #cannot find CLR method
"""
GetHostRawElementProviderCore(self: AutomationPeer) -> HostedWindowWrapper
Tells UI Automation where in the UI Automation tree to place the hwnd being
hosted by a Windows Presentation Foundation (WPF) element.
Returns: This method returns the hosted hwnd to UI Automation for controls that host
hwnd objects.
"""
pass
def GetItemStatusCore(self, *args): #cannot find CLR method
"""
GetItemStatusCore(self: ItemAutomationPeer) -> str
Gets a string that conveys the visual status of the specified
System.Windows.UIElement.
Returns: The status. Examples include "Busy" or "Online".
"""
pass
def GetItemTypeCore(self, *args): #cannot find CLR method
"""
GetItemTypeCore(self: ItemAutomationPeer) -> str
Gets a human-readable string that contains the type of item that the specified
System.Windows.UIElement represents.
Returns: The item type. An example includes "Mail Message" or "Contact".
"""
pass
def GetLabeledByCore(self, *args): #cannot find CLR method
"""
GetLabeledByCore(self: ItemAutomationPeer) -> AutomationPeer
Gets the System.Windows.Automation.Peers.AutomationPeer for the
System.Windows.Controls.Label that is targeted to the specified
System.Windows.UIElement.
Returns: The System.Windows.Automation.Peers.LabelAutomationPeer for the element that is
targeted by the System.Windows.Controls.Label.
"""
pass
def GetLocalizedControlTypeCore(self, *args): #cannot find CLR method
"""
GetLocalizedControlTypeCore(self: AutomationPeer) -> str
When overridden in a derived class, is called by
System.Windows.Automation.Peers.AutomationPeer.GetLocalizedControlType.
Returns: The type of the control.
"""
pass
def GetNameCore(self, *args): #cannot find CLR method
"""
GetNameCore(self: TabItemAutomationPeer) -> str
Gets the text label of the System.Windows.Controls.TabItem that is associated
with this System.Windows.Automation.Peers.TabItemAutomationPeer. Called by
System.Windows.Automation.Peers.AutomationPeer.GetName.
Returns: The string that contains the label. If set, this method returns the value of
the System.Windows.Automation.AutomationProperties.Name property; otherwise
this method will return the value of the
System.Windows.Controls.HeaderedContentControl.Header property.
"""
pass
def GetOrientationCore(self, *args): #cannot find CLR method
"""
GetOrientationCore(self: ItemAutomationPeer) -> AutomationOrientation
Gets a value that indicates whether the specified System.Windows.UIElement is
laid out in a particular direction.
Returns: The direction of the specified System.Windows.UIElement. Optionally, the method
returns System.Windows.Automation.Peers.AutomationOrientation.None if the | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# "Probabilistic Grammar Fuzzing" - a chapter of "The Fuzzing Book"
# Web site: https://www.fuzzingbook.org/html/ProbabilisticGrammarFuzzer.html
# Last change: 2022-02-09 08:26:36+01:00
#
# Copyright (c) 2021 CISPA Helmholtz Center for Information Security
# Copyright (c) 2018-2020 Saarland University, authors, and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
r'''
The Fuzzing Book - Probabilistic Grammar Fuzzing
This file can be _executed_ as a script, running all experiments:
$ python ProbabilisticGrammarFuzzer.py
or _imported_ as a package, providing classes, functions, and constants:
>>> from fuzzingbook.ProbabilisticGrammarFuzzer import <identifier>
but before you do so, _read_ it and _interact_ with it at:
https://www.fuzzingbook.org/html/ProbabilisticGrammarFuzzer.html
A _probabilistic_ grammar allows to attach individual _probabilities_ to production rules. To set the probability of an individual expansion `S` to the value `X` (between 0 and 1), replace it with a pair
(S, opts(prob=X))
If we want to ensure that 90% of phone numbers generated have an area code starting with `9`, we can write:
>>> from Grammars import US_PHONE_GRAMMAR, extend_grammar, opts
>>> PROBABILISTIC_US_PHONE_GRAMMAR: Grammar = extend_grammar(US_PHONE_GRAMMAR,
>>> {
>>> "": [
>>> "2", "3", "4", "5", "6", "7", "8",
>>> ("9", opts(prob=0.9))
>>> ],
>>> })
A `ProbabilisticGrammarFuzzer` will extract and interpret these options. Here is an example:
>>> probabilistic_us_phone_fuzzer = ProbabilisticGrammarFuzzer(PROBABILISTIC_US_PHONE_GRAMMAR)
>>> [probabilistic_us_phone_fuzzer.fuzz() for i in range(5)]
['(918)925-2501',
'(981)925-0792',
'(934)995-5029',
'(955)999-7801',
'(964)927-0877']
As you can see, the large majority of area codes now starts with `9`.
For more details, source, and documentation, see
"The Fuzzing Book - Probabilistic Grammar Fuzzing"
at https://www.fuzzingbook.org/html/ProbabilisticGrammarFuzzer.html
'''
# Allow to use 'from . import <module>' when run as script (cf. PEP 366)
if __name__ == '__main__' and __package__ is None:
__package__ = 'fuzzingbook'
# Probabilistic Grammar Fuzzing
# =============================
if __name__ == '__main__':
print('# Probabilistic Grammar Fuzzing')
if __name__ == '__main__':
from .bookutils import YouTubeVideo
YouTubeVideo('XzJqzawOubo')
## Synopsis
## --------
if __name__ == '__main__':
print('\n## Synopsis')
## The Law of Leading Digits
## -------------------------
if __name__ == '__main__':
print('\n## The Law of Leading Digits')
def first_digit_via_string(x: int) -> int:
return ord(repr(x)[0]) - ord('0')
if __name__ == '__main__':
first_digit_via_string(2001)
import math
def first_digit_via_log(x: int) -> int:
frac, whole = math.modf(math.log10(x))
return int(10 ** frac)
if __name__ == '__main__':
first_digit_via_log(2001)
if __name__ == '__main__':
(math.log10(1), math.log10(2))
if __name__ == '__main__':
(math.log10(2), math.log10(3))
def prob_leading_digit(d: int) -> float:
return math.log10(d + 1) - math.log10(d)
if __name__ == '__main__':
digit_probs = [prob_leading_digit(d) for d in range(1, 10)]
[(d, "%.2f" % digit_probs[d - 1]) for d in range(1, 10)]
if __name__ == '__main__':
import matplotlib.pyplot as plt # type: ignore
if __name__ == '__main__':
labels = range(1, 10)
fig1, ax1 = plt.subplots()
ax1.pie(digit_probs, labels=labels, shadow=True, autopct='%1.1f%%',
counterclock=False, startangle=90)
ax1.axis('equal');
## Specifying Probabilities
## ------------------------
if __name__ == '__main__':
print('\n## Specifying Probabilities')
if __name__ == '__main__':
# We use the same fixed seed as the notebook to ensure consistency
import random
random.seed(2001)
from .Fuzzer import Fuzzer
from .GrammarFuzzer import GrammarFuzzer, all_terminals, display_tree, DerivationTree
from .Grammars import is_valid_grammar, EXPR_GRAMMAR, START_SYMBOL, crange
from .Grammars import opts, exp_string, exp_opt, set_opts
from .Grammars import Grammar, Expansion
from typing import List, Dict, Set, Optional, cast, Any, Tuple
PROBABILISTIC_EXPR_GRAMMAR: Grammar = {
"<start>":
["<expr>"],
"<expr>":
[("<term> + <expr>", opts(prob=0.1)),
("<term> - <expr>", opts(prob=0.2)),
"<term>"],
"<term>":
[("<factor> * <term>", opts(prob=0.1)),
("<factor> / <term>", opts(prob=0.1)),
"<factor>"
],
"<factor>":
["+<factor>", "-<factor>", "(<expr>)",
"<leadinteger>", "<leadinteger>.<integer>"],
"<leadinteger>":
["<leaddigit><integer>", "<leaddigit>"],
# Benford's law: frequency distribution of leading digits
"<leaddigit>":
[("1", opts(prob=0.301)),
("2", opts(prob=0.176)),
("3", opts(prob=0.125)),
("4", opts(prob=0.097)),
("5", opts(prob=0.079)),
("6", opts(prob=0.067)),
("7", opts(prob=0.058)),
("8", opts(prob=0.051)),
("9", opts(prob=0.046)),
],
# Remaining digits are equally distributed
"<integer>":
["<digit><integer>", "<digit>"],
"<digit>":
["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"],
}
if __name__ == '__main__':
assert is_valid_grammar(PROBABILISTIC_EXPR_GRAMMAR, supported_opts={'prob'})
if __name__ == '__main__':
leaddigits: List[Expansion] = PROBABILISTIC_EXPR_GRAMMAR["<leaddigit>"]
leaddigits
if __name__ == '__main__':
leaddigit_expansion = leaddigits[0]
leaddigit_expansion
if __name__ == '__main__':
exp_string(leaddigit_expansion)
def exp_prob(expansion: Expansion) -> float:
"""Return the options of an expansion"""
return exp_opt(expansion, 'prob')
if __name__ == '__main__':
exp_prob(leaddigit_expansion)
if __name__ == '__main__':
f = GrammarFuzzer(PROBABILISTIC_EXPR_GRAMMAR)
f.fuzz()
from .GrammarCoverageFuzzer import GrammarCoverageFuzzer # minor dependency
if __name__ == '__main__':
f = GrammarCoverageFuzzer(PROBABILISTIC_EXPR_GRAMMAR)
f.fuzz()
## Computing Probabilities
## -----------------------
if __name__ == '__main__':
print('\n## Computing Probabilities')
### Distributing Probabilities
if __name__ == '__main__':
print('\n### Distributing Probabilities')
def exp_probabilities(expansions: List[Expansion],
nonterminal: str ="<symbol>") \
-> Dict[Expansion, float]:
probabilities = [exp_prob(expansion) for expansion in expansions]
prob_dist = prob_distribution(probabilities, nonterminal) # type: ignore
prob_mapping: Dict[Expansion, float] = {}
for i in range(len(expansions)):
expansion = exp_string(expansions[i])
prob_mapping[expansion] = prob_dist[i]
return prob_mapping
def prob_distribution(probabilities: List[Optional[float]],
nonterminal: str = "<symbol>"):
epsilon = 0.00001
number_of_unspecified_probabilities = probabilities.count(None)
if number_of_unspecified_probabilities == 0:
sum_probabilities = cast(float, sum(probabilities))
assert abs(sum_probabilities - 1.0) < epsilon, \
nonterminal + ": sum of probabilities must be 1.0"
return probabilities
sum_of_specified_probabilities = 0.0
for p in probabilities:
if p is not None:
sum_of_specified_probabilities += p
assert 0 <= sum_of_specified_probabilities <= 1.0, \
nonterminal + ": sum of specified probabilities must be between 0.0 and 1.0"
default_probability = ((1.0 - sum_of_specified_probabilities)
/ number_of_unspecified_probabilities)
all_probabilities = []
for p in probabilities:
if p is None:
p = default_probability
all_probabilities.append(p)
assert abs(sum(all_probabilities) - 1.0) < epsilon
return all_probabilities
if __name__ == '__main__':
print(exp_probabilities(PROBABILISTIC_EXPR_GRAMMAR["<leaddigit>"]))
if __name__ == '__main__':
print(exp_probabilities(PROBABILISTIC_EXPR_GRAMMAR["<digit>"]))
if __name__ == '__main__':
exp_probabilities(PROBABILISTIC_EXPR_GRAMMAR["<expr>"])
### Checking Probabilities
if __name__ == '__main__':
print('\n### Checking Probabilities')
def is_valid_probabilistic_grammar(grammar: Grammar,
start_symbol: str = START_SYMBOL) -> bool:
if not is_valid_grammar(grammar, start_symbol):
return False
for nonterminal in grammar:
expansions = grammar[nonterminal]
_ = exp_probabilities(expansions, nonterminal)
return True
if __name__ == '__main__':
assert is_valid_probabilistic_grammar(PROBABILISTIC_EXPR_GRAMMAR)
if __name__ == '__main__':
assert is_valid_probabilistic_grammar(EXPR_GRAMMAR)
from .ExpectError import ExpectError
if __name__ == '__main__':
with ExpectError():
assert is_valid_probabilistic_grammar({"<start>": [("1", opts(prob=0.5))]})
if __name__ == '__main__':
with ExpectError():
assert is_valid_probabilistic_grammar(
{"<start>": [("1", opts(prob=1.5)), "2"]})
## Expanding by Probability
## ------------------------
if __name__ == '__main__':
print('\n## Expanding by Probability')
import random
class ProbabilisticGrammarFuzzer(GrammarFuzzer):
"""A grammar-based fuzzer respecting probabilities in grammars."""
def check_grammar(self) -> None:
super().check_grammar()
assert is_valid_probabilistic_grammar(self.grammar)
def supported_opts(self) -> Set[str]:
return super().supported_opts() | {'prob'}
class ProbabilisticGrammarFuzzer(ProbabilisticGrammarFuzzer):
def choose_node_expansion(self, node: DerivationTree,
children_alternatives: List[Any]) -> int:
(symbol, tree) = node
expansions = self.grammar[symbol]
probabilities = exp_probabilities(expansions)
weights: List[float] = []
for children in children_alternatives:
expansion = all_terminals((symbol, children))
children_weight = probabilities[expansion]
if self.log:
print(repr(expansion), "p =", children_weight)
weights.append(children_weight)
if sum(weights) == 0:
# No alternative (probably expanding at minimum cost)
return random.choices(
range(len(children_alternatives)))[0]
else:
return random.choices(
range(len(children_alternatives)), weights=weights)[0]
if __name__ == '__main__':
natural_fuzzer = ProbabilisticGrammarFuzzer(
PROBABILISTIC_EXPR_GRAMMAR, start_symbol="<leadinteger>")
print([natural_fuzzer.fuzz() for i in range(20)])
if __name__ == '__main__':
integer_fuzzer = GrammarFuzzer(
PROBABILISTIC_EXPR_GRAMMAR, start_symbol="<leadinteger>")
print([integer_fuzzer.fuzz() for i in range(20)])
if __name__ == '__main__':
leaddigit_fuzzer = ProbabilisticGrammarFuzzer(
PROBABILISTIC_EXPR_GRAMMAR, start_symbol="<leaddigit>")
leaddigit_fuzzer.fuzz()
if __name__ == '__main__':
trials = 10000
count = {}
for c in crange('0', '9'):
count[c] = 0
for i in range(trials):
count[leaddigit_fuzzer.fuzz()] += 1
print([(digit, count[digit] / trials) for digit in count])
## Directed Fuzzing
## ----------------
if __name__ == '__main__':
print('\n## Directed Fuzzing')
def set_prob(grammar: Grammar, symbol: str,
expansion: Expansion, prob: Optional[float]) -> None:
"""Set the probability of the given expansion of grammar[symbol]"""
set_opts(grammar, symbol, expansion, opts(prob=prob))
from .Grammars import URL_GRAMMAR, extend_grammar
if __name__ == '__main__':
probabilistic_url_grammar = extend_grammar(URL_GRAMMAR)
set_prob(probabilistic_url_grammar, "<scheme>", "ftps", 0.8)
assert is_valid_probabilistic_grammar(probabilistic_url_grammar)
if __name__ == '__main__':
probabilistic_url_grammar["<scheme>"]
if __name__ == '__main__':
prob_url_fuzzer = ProbabilisticGrammarFuzzer(probabilistic_url_grammar)
for i in range(10):
print(prob_url_fuzzer.fuzz())
if __name__ == '__main__':
set_prob(probabilistic_url_grammar, "<scheme>", "ftps", 0.0)
assert is_valid_probabilistic_grammar(probabilistic_url_grammar)
if __name__ == '__main__':
prob_url_fuzzer = ProbabilisticGrammarFuzzer(probabilistic_url_grammar)
for i in range(10):
print(prob_url_fuzzer.fuzz())
from .Grammars import EXPR_GRAMMAR
if __name__ == '__main__':
probabilistic_expr_grammar = extend_grammar(EXPR_GRAMMAR)
probabilistic_expr_grammar["<expr>"]
if __name__ == '__main__':
set_prob(probabilistic_expr_grammar, "<expr>", "<term>", 0.0)
assert is_valid_probabilistic_grammar(probabilistic_expr_grammar)
if __name__ == '__main__':
prob_expr_fuzzer = ProbabilisticGrammarFuzzer(probabilistic_expr_grammar)
prob_expr_fuzzer.fuzz()
## Probabilities in Context
## ------------------------
if __name__ == '__main__':
print('\n## Probabilities in Context')
def decrange(start: int, end: int) -> List[Expansion]:
"""Return a list with string | |
<gh_stars>0
########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
import json
import shlex
import shutil
import zipfile
import subprocess
import contextlib
from cloudify.workflows import ctx
from cloudify import constants, manager
from . import constants as snapshot_constants
from .constants import SECURITY_FILE_LOCATION, SECURITY_FILENAME
from cloudify.utils import ManagerVersion, get_local_rest_certificate
from cloudify.utils import get_tenant_name, internal as internal_utils
# Path to python binary in the manager environment
PYTHON_MANAGER_ENV = '/opt/manager/env/bin/python'
# Path to database migration script
SCHEMA_SCRIPT = '/opt/manager/resources/cloudify/migrations/schema.py'
class DictToAttributes(object):
def __init__(self, dic):
self._dict = dic
def __getattr__(self, name):
return self._dict[name]
def __str__(self):
try:
# try to convert to json,
# may fail on UTF-8 and stuff, don't sweat on it..
return json.dumps(self._dict)
except Exception:
return self._dict
def copy_files_between_manager_and_snapshot(archive_root,
config,
to_archive=True,
tenant_name=None):
"""
Copy files/dirs between snapshot/manager and manager/snapshot.
:param archive_root: Path to the snapshot archive root.
:param config: Config of manager.
:param to_archive: If True then copying is from manager to snapshot,
otherwise from snapshot to manager.
:param tenant_name: If passed, will restore files to this tenant name.
Expected to be used only for 3.x upgrades.
"""
ctx.logger.info('Copying files/directories...')
data_to_copy = [
constants.FILE_SERVER_BLUEPRINTS_FOLDER,
constants.FILE_SERVER_DEPLOYMENTS_FOLDER,
constants.FILE_SERVER_UPLOADED_BLUEPRINTS_FOLDER,
constants.FILE_SERVER_PLUGINS_FOLDER,
]
# To work with cert dir logic for archiving
if tenant_name:
# This is a 3.x install, files go in tenant folders
data_to_copy = [
(
# The root path to copy the files to in the manager for each
# type of restored file
# e.g. blueprints/<tenant_name>/
os.path.join(path, tenant_name)
# Plugins are an exception as they are all stored in one path
# under UUIDs without tenant names
if path != constants.FILE_SERVER_PLUGINS_FOLDER else path,
# The path of the file type in the snapshot
path,
) for path in data_to_copy
]
else:
# This is a 4.x+ install, files go where they went.
data_to_copy = [(path, path) for path in data_to_copy]
local_cert_dir = os.path.dirname(get_local_rest_certificate())
if to_archive:
data_to_copy.append((local_cert_dir,
snapshot_constants.ARCHIVE_CERT_DIR))
data_to_copy.append((SECURITY_FILE_LOCATION, SECURITY_FILENAME))
ctx.logger.info(str(data_to_copy))
for p1, p2 in data_to_copy:
# first expand relative paths
if p1[0] != '/':
p1 = os.path.join(config.file_server_root, p1)
if p2[0] != '/':
p2 = os.path.join(archive_root, p2)
# make p1 to always point to source and p2 to target of copying
if not to_archive:
p1, p2 = p2, p1
copy_snapshot_path(p1, p2)
def copy_stage_files(archive_root):
"""Copy Cloudify Stage files into the snapshot"""
stage_data = [
snapshot_constants.STAGE_CONFIG_FOLDER,
snapshot_constants.STAGE_USERDATA_FOLDER
]
for folder in stage_data:
copy_snapshot_path(
os.path.join(snapshot_constants.STAGE_BASE_FOLDER, folder),
os.path.join(archive_root, 'stage', folder))
def restore_stage_files(archive_root, override=False):
"""Copy Cloudify Stage files from the snapshot archive to stage folder.
Note that only the stage user can write into the stage directory,
so we use sudo to run a script (created during bootstrap) that copies
the restored files.
"""
stage_archive = os.path.join(archive_root, 'stage')
if not os.path.exists(stage_archive):
# no stage files in the snapshot archive - nothing to do
# (perhaps the snapshot was made before stage was included in it?)
return
# let's not give everyone full read access to the snapshot, instead,
# copy only the stage-related parts and give the stage user read access
# to those
stage_tempdir = '{0}_stage'.format(archive_root)
shutil.copytree(stage_archive, stage_tempdir)
run(['/bin/chmod', 'a+r', '-R', stage_tempdir])
try:
sudo(
[
snapshot_constants.MANAGER_PYTHON,
snapshot_constants.STAGE_TOKEN_SCRIPT
],
user=snapshot_constants.STAGE_USER,
)
restore_command = [snapshot_constants.STAGE_RESTORE_SCRIPT,
stage_tempdir]
if override:
restore_command.append('--override-existing')
sudo(restore_command,
user=snapshot_constants.STAGE_USER)
finally:
shutil.rmtree(stage_tempdir)
sudo(['/usr/bin/systemctl', 'restart', 'cloudify-stage'],
ignore_failures=True)
def copy_composer_files(archive_root):
"""Copy Cloudify Composer files into the snapshot"""
composer_data = [
snapshot_constants.COMPOSER_CONFIG_FOLDER,
snapshot_constants.COMPOSER_BLUEPRINTS_FOLDER,
]
for folder in composer_data:
copy_snapshot_path(
os.path.join(snapshot_constants.COMPOSER_BASE_FOLDER, folder),
os.path.join(archive_root, 'composer', folder))
def restore_composer_files(archive_root):
"""Copy Composer files from the snapshot archive to Composer folder.
"""
composer_archive = os.path.join(archive_root, 'composer')
if not os.path.exists(composer_archive):
# no composer files in the snapshot archive - nothing to do
# (perhaps the snapshot was made before composer was included in it?)
return
composer_data = [
snapshot_constants.COMPOSER_CONFIG_FOLDER,
snapshot_constants.COMPOSER_BLUEPRINTS_FOLDER,
]
for folder in composer_data:
dest_path = os.path.join(snapshot_constants.COMPOSER_BASE_FOLDER,
folder)
copied = copy_snapshot_path(
os.path.join(archive_root, 'composer', folder),
dest_path)
if copied:
run(['/bin/chmod', '-R', 'g+w', dest_path])
def copy_snapshot_path(source, destination):
# source doesn't need to exist, then ignore
if not os.path.exists(source):
ctx.logger.warning('Source not found: {0}. Skipping...'.format(source))
return False
ctx.logger.debug(
'Copying from dump: {0} to: {1}..'.format(source, destination))
# copy data
if os.path.isfile(source):
shutil.copy(source, destination)
else:
if os.path.exists(destination):
shutil.rmtree(destination)
shutil.copytree(source, destination)
return True
def copy(source, destination):
ctx.logger.debug('Copying {0} to {1}..'.format(source,
destination))
destination_dir = os.path.dirname(destination)
if not os.path.exists(destination_dir):
ctx.logger.debug(
'Path does not exist: {0}. Creating it...'.format(
destination_dir))
sudo(['mkdir', '-p', destination_dir])
sudo(['cp', '-rp', source, destination])
def sudo(command, user=None, ignore_failures=False):
if isinstance(command, str):
command = shlex.split(command)
if user is not None:
command = ['sudo', '-u', user] + command
else:
command.insert(0, 'sudo')
return run(command=command, ignore_failures=ignore_failures)
def run(command, ignore_failures=False, redirect_output_path=None, cwd=None):
if isinstance(command, str):
command = shlex.split(command)
command_str = ' '.join(command)
ctx.logger.debug('Running command: {0}'.format(command_str))
stderr = subprocess.PIPE
stdout = subprocess.PIPE
if redirect_output_path:
ctx.logger.debug('Command: {0} Redirect output to: {1}'.
format(' '.join(command), redirect_output_path))
with open(redirect_output_path, 'a') as output:
proc = subprocess.Popen(command, stdout=output, stderr=stderr,
cwd=cwd)
else:
proc = subprocess.Popen(command, stdout=stdout, stderr=stderr, cwd=cwd)
proc.aggr_stdout, proc.aggr_stderr = proc.communicate()
if proc and proc.returncode != 0:
if not ignore_failures:
msg = 'Failed running command: {0} ({1}).'.format(
command_str, proc.aggr_stderr)
raise RuntimeError(msg)
return proc
def get_manager_version(client):
return ManagerVersion(client.manager.get_version()['version'])
def get_tenants_list():
client = manager.get_rest_client(snapshot_constants.DEFAULT_TENANT_NAME)
version = client.manager.get_version()
if version['edition'] != 'premium':
return [snapshot_constants.DEFAULT_TENANT_NAME]
tenants = client.tenants.list(_include=['name'], _get_all_results=True)
return [tenant.name for tenant in tenants]
def get_dep_contexts(version):
deps = {}
tenants = [get_tenant_name()] if version < snapshot_constants.V_4_0_0 \
else get_tenants_list()
for tenant_name in tenants:
# Temporarily assign the context a different tenant name so that
# we can retrieve that tenant's deployment contexts
with internal_utils._change_tenant(ctx, tenant_name):
# We have to zero this out each time or the cached version for
# the previous tenant will be used
ctx._dep_contexts = None
# Get deployment contexts for this tenant
deps[tenant_name] = ctx.deployments_contexts
return deps.items()
def is_compute(node):
return constants.COMPUTE_NODE_TYPE in node.type_hierarchy
def make_zip64_archive(zip_filename, directory):
"""Create zip64 archive that contains all files in a directory.
zip64 is a set of extensions on top of the zip file format that allows to
have files larger than 2GB. This is important in snapshots where the amount
of data to backup might be huge.
Note that `shutil` provides a method to register new formats based on the
extension of the target file, since a `.zip` extension is still desired,
using such an extension mechanism is not an option to avoid patching the
already registered zip format.
In any case, this function is heavily inspired in stdlib's
`shutil._make_zipfile`.
:param zip_filename: Path to the zip file to be created
:type zip_filename: str
:path directory: Path to directory where all files to compress are located
:type directory: str
"""
zip_context_manager = zipfile.ZipFile(
zip_filename,
'w',
compression=zipfile.ZIP_DEFLATED,
allowZip64=True,
)
with zip_context_manager as zip_file:
path = os.path.normpath(directory)
ctx.logger.debug('Creating zip archive of: {0}'.format(path))
base_dir = path
for dirpath, dirnames, filenames in os.walk(directory):
for dirname in sorted(dirnames):
path = os.path.normpath(os.path.join(dirpath, dirname))
zip_file.write(path, os.path.relpath(path, base_dir))
for filename in filenames:
path = os.path.normpath(os.path.join(dirpath, filename))
# Not sure why this check is needed,
# but it's in the original stdlib's implementation
if os.path.isfile(path):
zip_file.write(path, os.path.relpath(path, base_dir))
@contextlib.contextmanager
def db_schema(revision, config=None):
"""Downgrade schema to desired revision to perform operation and upgrade.
Used when restoring a snapshot to make sure the restore operation happens
whith the same version of the schema that was used when the snapshot was
created.
:param revision: Revision to downgrade to before performing any operation.
:type revision: str
"""
db_schema_downgrade(revision, config=config)
yield
db_schema_upgrade(config=config)
def db_schema_downgrade(revision='-1', config=None):
"""Downgrade database schema.
Used before restoring a snapshot to make sure that the schema matches the
one that was used when the snapshot was created.
:param revision: Revision to downgrade to.
:type revision: str
"""
_schema(config, ['downgrade', revision])
def db_schema_upgrade(revision='head', config=None):
"""Upgrade database schema.
Used after restoring snapshot to get an up-to-date schema.
:param revision: Revision to upgrade to.
:type revision: str
"""
_schema(config, ['upgrade', | |
<gh_stars>1-10
#############################################################################
#
#
# webGen module to BFA c7
#
#
#############################################################################
""" This is a simple HTML/JS/CSS generator/framework mainly for the purpose of maintaining the webclient of bfa.
It's borrowing some ideas from the model-view-presenter scheme to produce 'views' containing html, css, js.
This package is still under heavy-development and will continue to change a lot. The next concept that should be
integrated is that of 'subviews', not actually served from the webservice on their own but, that can concatenate
together to produce 'views' actually served from the webservice.
Dependencies:
webgen -> framework
note:: Author(s): Mitch last-check: 07.07.2021 """
from __future__ import annotations
from os.path import exists
from bfassist.webgen.framework import *
from bfassist.webgen.framework.html import HTML_Node
from bfassist.webgen.framework.css import CSS_Style
from bfassist.webgen.framework.js import JS_Function
# noinspection PyUnusedLocal
def __preload__(forClient: bool = True):
pass
# noinspection PyUnusedLocal
def __postload__(forClient: bool = True):
pass
class View:
""" Represents a specific view from the browser at the webclient.
:param Name: Name and importantly also the name of the folder/package that's used for production.
:param DisplayName: Name of the view when displayed e.g. in the navigation.
:param Description: A description of this view.
:param HTML_DOCUMENT: HTML document to produce this view.
:param STYLE_SHEET: "Private" CSS style sheet of this view.
:param SCRIPT: "Private" JS script of this view.
:param Stylesheets: Dictionary of stylesheets used in this view with the view the stylesheets originally
belong to as keys.
:param Scripts: Dictionary of scripts used in this view with names as keys.
:param cached: A cached version of the view. A list containing the html, css and js.
:param exported: An exported version of the view. A list containing the paths to the html, css and js.
note:: Author(s): Mitch """
def __init__(self, Name: str, DisplayName: str, Description: str = "",
HTML_DOCUMENT: HTML_Document = None, STYLE_SHEET: CSS_Stylesheet = None, SCRIPT: JS_Script = None,
Stylesheets: dict = None, Scripts: dict = None, cached: list = None, exported: list = None):
self.Name = Name
self.DisplayName = DisplayName
self.Description = Description
if HTML_DOCUMENT is None:
self.HTML_DOCUMENT = HTML_Document()
else:
self.HTML_DOCUMENT = HTML_DOCUMENT
if STYLE_SHEET is None:
self.STYLE_SHEET = CSS_Stylesheet(self.Name + 'Styles.css')
else:
self.STYLE_SHEET = STYLE_SHEET
if SCRIPT is None:
self.SCRIPT = JS_Script(self.Name + 'Script.js')
else:
self.SCRIPT = SCRIPT
if Stylesheets is None:
self.Stylesheets = {self: self.STYLE_SHEET}
else:
self.Stylesheets = Stylesheets
for view in self.Stylesheets:
if view == self:
self.HTML_DOCUMENT.addStyleSheet(self.styleSheetToNode())
else:
self.addScriptFromForeignView(view)
if Scripts is None:
self.Scripts = {self: self.SCRIPT}
else:
self.Scripts = Scripts
for view in self.Scripts:
if view == self:
self.HTML_DOCUMENT.addScript(self.scripToNode())
else:
self.addScriptFromForeignView(view)
self.cached = cached
self.exported = exported
def setTitle(self, title: str):
self.HTML_DOCUMENT.setTitle(title)
def __iadd__(self, other):
""" Truly magic function that can receive a single HTML_Node, CSS_Style or JS_Function and correctly append it
to the HTML, CSS or JS of this view. It can also receive tuples of valid inner HTML or JS functions as well as
sets of CSS styles and even tuples containing 3 of these in a random order.
:param other: Value as specified in the description.
:return: The return value of the respective __iadd__ called.
note:: Author(s): Mitch """
if isinstance(other, HTML_Node):
self.HTML_DOCUMENT.__iadd__(other)
return self
elif isinstance(other, CSS_Style):
self.STYLE_SHEET.__iadd__(other)
return self
elif isinstance(other, JS_Function):
self.SCRIPT.__iadd__(other)
return self
elif isinstance(other, tuple):
if HTML_Node.isValidInnerHTML(other):
self.HTML_DOCUMENT.__iadd__(other)
return self
elif JS_Script.allFunctionsAreFunctions(other):
self.SCRIPT.__iadd__(other)
return self
else:
if 1 <= len(other) <= 3:
try:
for elem in other:
self.__iadd__(elem)
return self
except TypeError:
raise TypeError("Can only += tuples containing HTML_Node, CSS_Style, JS_Function or tuples"
"containing tuples of valid inner HTML or exclusively JS functions or sets of"
"CSS styles. But " + str(other) + " is not.")
else:
raise TypeError("Can only += tuples with valid innerHTML or containing only JS_Function but " +
str(other) + " is neither.")
elif isinstance(other, set):
if CSS_Stylesheet.allStylesAreStyles(other):
self.STYLE_SHEET.__iadd__(other)
return self
else:
raise TypeError("Can only += sets containing only CSS_Style but " + str(other) + " is not.")
else:
raise TypeError("Can only += values of type HTML_Node, CSS_Style or JS_Function but value was of type " +
str(type(other)))
@staticmethod
def build():
""" Function to be overridden for building the view so its code can be read after the rest of the application
was loaded.
note:: Author(s): Mitch """
pass
def styleSheetToNode(self):
""" Function to turn the style sheet of this view into a node for linking it in the head of its HTML document.
:return: The node that links to the style sheet.
note:: Author(s): Mitch """
return self.STYLE_SHEET.toNode(self.Name + '/' + self.STYLE_SHEET.filename)
def asForeignStyleSheetToNode(self, foreignView: View):
""" Function to turn the sheet of this view into a node for a foreign view.
:param foreignView: The foreign view that wants to link this style sheet.
:return: The node links to the style sheet.
note:: Author(s): Mitch """
depth = foreignView.Name.count('/') + 1
return self.STYLE_SHEET.toNode('../'*depth + self.Name + '/' + self.STYLE_SHEET.filename)
def addStyleSheetFromForeignView(self, foreignView: View):
""" Function to add a foreign stylesheet to this view.
:param foreignView: The view to add the stylesheet from.
note:: Author(s): Mitch """
self.Stylesheets[foreignView] = foreignView.STYLE_SHEET
self.HTML_DOCUMENT.addStyleSheet(foreignView.asForeignStyleSheetToNode(self))
def scripToNode(self):
""" Function to turn the script of this view into a node for linking it in the head of a HTML document.
:return: The node that links to the script.
note:: Author(s): Mitch """
return self.SCRIPT.toNode(self.Name + '/' + self.SCRIPT.filename)
def asForeignScriptToNode(self, foreignView: View):
""" Function to turn the script of this view into a node for a foreign view.
:param foreignView: The foreign view that wants to link this script.
:return: The node links to the script.
note:: Author(s): Mitch """
depth = foreignView.Name.count('/') + 1
return self.SCRIPT.toNode('../' * depth + self.Name + '/' + self.SCRIPT.filename)
def addScriptFromForeignView(self, foreignView: View):
""" Function to add a foreign already exported script to this view.
:param foreignView: The view to add the script from.
note:: Author(s): Mitch """
self.Scripts[foreignView] = foreignView.SCRIPT
self.HTML_DOCUMENT.addScript(foreignView.asForeignScriptToNode(self))
def exportHTML(self):
""" Function to export the HTML document of this view.
note:: Author(s): Mitch """
with open('bfassist/standalone/webclient/' + self.Name + '/' + self.HTML_DOCUMENT.filename, 'w') as htmlFile:
htmlFile.write(self.HTML_DOCUMENT.toString())
def exportStyleSheet(self):
""" Function to export the stylesheets attached to this view.
note:: Author(s): Mitch """
with open('bfassist/standalone/webclient/' + self.Name + '/' + self.STYLE_SHEET.filename, 'w') as cssFile:
cssFile.write(self.STYLE_SHEET.toString())
def exportScript(self):
""" Function to export the scripts attached to this view.
note:: Author(s): Mitch """
with open('bfassist/standalone/webclient/' + self.Name + '/' + self.SCRIPT.filename, 'w') as jsFile:
jsFile.write(self.SCRIPT.toString())
def export(self):
""" Function to export all documents attached to this view.
note:: Author(s): Mitch """
try:
if not exists('bfassist/standalone/webclient/' + self.Name + '/' + self.HTML_DOCUMENT.filename):
self.exportHTML()
else:
with open('bfassist/standalone/webclient/' + self.Name + '/' + self.HTML_DOCUMENT.filename, 'r') \
as htmlFile:
HTML = htmlFile.read()
if len(HTML) != len(self.HTML_DOCUMENT.toString()):
self.exportHTML()
if self.exported:
self.exported[0] = 'bfassist/standalone/webclient/' + self.Name + '/' + self.HTML_DOCUMENT.filename
else:
self.exported = ['bfassist/standalone/webclient/' + self.Name + '/' + self.HTML_DOCUMENT.filename, None,
None]
if not exists('bfassist/standalone/webclient/' + self.Name + '/' + self.STYLE_SHEET.filename):
self.exportStyleSheet()
else:
with open('bfassist/standalone/webclient/' + self.Name + '/' + self.STYLE_SHEET.filename, 'r') \
as cssFile:
CSS = cssFile.read()
if len(CSS) != len(self.STYLE_SHEET.toString()):
self.exportStyleSheet()
if self.exported:
self.exported[1] = 'bfassist/standalone/webclient/' + self.Name + '/' + self.STYLE_SHEET.filename
else:
self.exported = [None, 'bfassist/standalone/webclient/' + self.Name + '/' + self.STYLE_SHEET.filename,
None]
if not exists('bfassist/standalone/webclient/' + self.Name + '/' + self.SCRIPT.filename):
self.exportScript()
else:
with open('bfassist/standalone/webclient/' + self.Name + '/' + self.SCRIPT.filename, 'r') as jsFile:
JS = jsFile.read()
if len(JS) != len(self.SCRIPT.toString()):
self.exportScript()
if self.exported:
self.exported[2] = 'bfassist/standalone/webclient/' + self.Name + '/' + self.SCRIPT.filename
else:
self.exported = [None, None, 'bfassist/standalone/webclient/' + self.Name + '/' + self.SCRIPT.filename]
except FileNotFoundError:
print("Using module outside of valid bfa environment. Commencing without exporting the " +
self.DisplayName + " view.")
def cacheExported(self):
""" Function to cache the exported documents of this view.
note:: Author(s): Mitch """
with open(self.exported[0], 'r') as htmlFile:
HTML = htmlFile.read()
with open(self.exported[1], 'r') as cssFile:
CSS = cssFile.read()
with open(self.exported[2], 'r') as jsFile:
JS = jsFile.read()
self.cached = [HTML, CSS, JS]
def serveHTML(self):
""" Function to serve the HTML document of this view.
:return: The HTML document of this view as string.
note:: Author(s): Mitch """
if self.cached:
| |
14 - 14: iIii1I11I1II1 - iIii1I11I1II1
I1IiIiiIiIII = lisp_core_commands [ "lisp debug" ]
I1IiIiiIiIII = I1IiIiiIiIII [ 1 ]
if 5 - 5: IiII
if 84 - 84: II111iiii * oO0o * II111iiii % IiII / I1IiiI
if 100 - 100: IiII . Ii1I - iIii1I11I1II1 . i11iIiiIii / II111iiii
if 71 - 71: I1Ii111 * Oo0Ooo . I11i
if 49 - 49: IiII * O0 . IiII
o0O0O0ooo0oOO , iI11 , I1IiIiiIiIII = lisp_syntax_check ( I1IiIiiIiIII , clause )
if 19 - 19: II111iiii - IiII
if 59 - 59: o0oOOo0O0Ooo * OoO0O00 - Ii1I . OOooOOo
if 89 - 89: OOooOOo
if 69 - 69: ooOoO0o - OoooooooOO * O0
if ( o0O0O0ooo0oOO == True ) : return ( iI11 )
if 84 - 84: ooOoO0o + i11iIiiIii - OOooOOo * ooOoO0o
O0O0oo = { "itr" : "lisp-itr" , "etr" : "lisp-etr" , "rtr" : "lisp-rtr" ,
"map-resolver" : "lisp-mr" , "map-server" : "lisp-ms" ,
"ddt-node" : "lisp-ddt" , "core" : "" }
if 33 - 33: ooOoO0o % i1IIi - oO0o . O0 / O0
for oo00o0 in I1IiIiiIiIII :
Oo0 = O0O0oo [ oo00o0 ]
if ( single_process and single_process != Oo0 ) : continue
if 17 - 17: Ii1I / iIii1I11I1II1 - OoO0O00 + I1IiiI % OOooOOo
OOOO0o0 = I1IiIiiIiIII [ oo00o0 ]
oOOI11I = ( "lisp debug {\n" + " {} = {}\n" . format ( oo00o0 , OOOO0o0 ) + "}\n" )
if 14 - 14: o0oOOo0O0Ooo % IiII + I1ii11iIi11i + OoO0O00
if 76 - 76: OoO0O00 - i11iIiiIii + OoOoOO00 + OOooOOo / OoooooooOO
if 50 - 50: II111iiii - I1Ii111 + iIii1I11I1II1 + iIii1I11I1II1
if 91 - 91: II111iiii - O0 . iIii1I11I1II1 . O0 + I1ii11iIi11i - II111iiii
if 26 - 26: o0oOOo0O0Ooo
if ( oo00o0 == "core" ) :
lisp_process_command ( None , None , oOOI11I , None , [ None ] )
continue
if 12 - 12: OoooooooOO / O0 + II111iiii * I1ii11iIi11i
if 46 - 46: II111iiii - IiII * OoooooooOO / oO0o % IiII
oOOI11I = lisp . lisp_command_ipc ( oOOI11I , "lisp-core" )
lisp . lisp_ipc ( oOOI11I , lisp_socket , Oo0 )
if ( single_process ) : break
if 11 - 11: iIii1I11I1II1 . OoOoOO00 / IiII % ooOoO0o
return ( iI11 )
if 61 - 61: ooOoO0o - OOooOOo + OOooOOo
if 40 - 40: i11iIiiIii . iIii1I11I1II1
if 2 - 2: i1IIi * oO0o - oO0o + OoooooooOO % OoOoOO00 / OoOoOO00
if 3 - 3: OoooooooOO
if 71 - 71: IiII + i1IIi - iII111i - i11iIiiIii . I11i - ooOoO0o
if 85 - 85: I1ii11iIi11i - OoOoOO00 / I1ii11iIi11i + OOooOOo - iII111i
if 49 - 49: OoO0O00 - O0 / OoO0O00 * OoOoOO00 + I1Ii111
def lisp_replace_password_in_clause ( clause , keyword_string ) :
O0OO0O = clause . find ( keyword_string )
if 35 - 35: II111iiii . I1IiiI / i1IIi / I1IiiI * oO0o
if 85 - 85: II111iiii . ooOoO0o % OOooOOo % I11i
if 80 - 80: oO0o * I11i / iIii1I11I1II1 % oO0o / iIii1I11I1II1
if 42 - 42: i1IIi / i11iIiiIii . Oo0Ooo * iII111i . i11iIiiIii * O0
if ( O0OO0O == - 1 ) : return ( clause )
if 44 - 44: i1IIi . I1IiiI / i11iIiiIii + IiII
if 27 - 27: OOooOOo
if 52 - 52: I1Ii111 % OoOoOO00 + iIii1I11I1II1 * oO0o . Ii1I
if 95 - 95: iIii1I11I1II1 . IiII - OoooooooOO * OoO0O00 / o0oOOo0O0Ooo
O0OO0O += len ( keyword_string )
o0o0O0O00oOOo = clause [ O0OO0O : : ] . find ( "\n" )
o0o0O0O00oOOo += O0OO0O
Ii1IIi = clause [ O0OO0O : o0o0O0O00oOOo ] . replace ( " " , "" )
if 74 - 74: oO0o
if ( len ( Ii1IIi ) != 0 and Ii1IIi [ 0 ] == "=" ) : return ( clause )
if 34 - 34: iII111i
if 44 - 44: i1IIi % I1IiiI % o0oOOo0O0Ooo
if 9 - 9: Oo0Ooo % OoooooooOO - Ii1I
if 43 - 43: OoO0O00 % OoO0O00
if 46 - 46: Oo0Ooo % iIii1I11I1II1 . iII111i . O0 * ooOoO0o / OoooooooOO
if 7 - 7: oO0o - O0 * I11i - o0oOOo0O0Ooo - II111iiii
Ii1IIi = Ii1IIi . replace ( " " , "" )
Ii1IIi = Ii1IIi . replace ( "\t" , "" )
Ii1IIi = lisp_hash_password ( Ii1IIi )
clause = clause [ 0 : O0OO0O ] + " =" + Ii1IIi + clause [ o0o0O0O00oOOo : : ]
if 41 - 41: I1IiiI - I1Ii111 % II111iiii . I1Ii111 - I11i
if 45 - 45: Ii1I - OOooOOo
if 70 - 70: OoO0O00 % I1IiiI / I1IiiI . I11i % ooOoO0o . II111iiii
if 10 - 10: Ii1I - i11iIiiIii . I1ii11iIi11i % i1IIi
return ( clause )
if 78 - 78: iIii1I11I1II1 * Oo0Ooo . Oo0Ooo - OOooOOo . iIii1I11I1II1
if 30 - 30: ooOoO0o + ooOoO0o % IiII - o0oOOo0O0Ooo - I1ii11iIi11i
if 36 - 36: I11i % OOooOOo
if 72 - 72: I1IiiI / iII111i - O0 + I11i
if 83 - 83: O0
if 89 - 89: Oo0Ooo + I1ii11iIi11i - o0oOOo0O0Ooo
if 40 - 40: OoO0O00 + OoO0O00
if 94 - 94: iII111i * iIii1I11I1II1 . I11i
if 13 - 13: iIii1I11I1II1 * OoOoOO00 / I1Ii111 % ooOoO0o + oO0o
def lisp_user_account_command ( clause ) :
if 41 - 41: I1ii11iIi11i
oOOI11I = clause . split ( " " )
oOOI11I = oOOI11I [ 0 ] + " " + oOOI11I [ 1 ]
if 5 - 5: Oo0Ooo
I1IiIiiIiIII = lisp_core_commands [ "lisp user-account" ]
I1IiIiiIiIII = I1IiIiiIiIII [ 1 ]
if 100 - 100: Ii1I + iIii1I11I1II1
if 59 - 59: IiII
if 89 - 89: OoOoOO00 % iIii1I11I1II1
if 35 - 35: I1ii11iIi11i + I1Ii111 - OoOoOO00 % oO0o % o0oOOo0O0Ooo % OoOoOO00
if 45 - 45: I1IiiI * OOooOOo % OoO0O00
o0O0O0ooo0oOO , iI11 , I1IiIiiIiIII = lisp_syntax_check ( I1IiIiiIiIII , clause )
if 24 - 24: ooOoO0o - I11i * oO0o
if 87 - 87: Ii1I - I1ii11iIi11i % I1ii11iIi11i . oO0o / I1ii11iIi11i
if 6 - 6: OoOoOO00 / iIii1I11I1II1 * OoooooooOO * i11iIiiIii
if 79 - 79: IiII % OoO0O00
if ( o0O0O0ooo0oOO == False ) :
iI11 = lisp_replace_password_in_clause ( iI11 , "password =" )
if 81 - 81: i11iIiiIii + i11iIiiIii * OoO0O00 + IiII
return ( iI11 )
if 32 - 32: O0 . OoooooooOO
if 15 - 15: I1IiiI . OoO0O00
if 17 - 17: i11iIiiIii / Oo0Ooo . OoO0O00 / I1IiiI
if 38 - 38: i1IIi . I1ii11iIi11i % Ii1I + iIii1I11I1II1 + O0
if 47 - 47: OoO0O00 + IiII / II111iiii
if 97 - 97: I1ii11iIi11i / I1IiiI % O0 + i1IIi - ooOoO0o
if 38 - 38: o0oOOo0O0Ooo % I1Ii111 + i11iIiiIii + iII111i + ooOoO0o / i11iIiiIii
def lisp_rtr_list_command ( clause ) :
oOOI11I = clause . split ( " " )
oOOI11I = oOOI11I [ 0 ] + " " + oOOI11I [ 1 ]
if 94 - 94: iII111i - Oo0Ooo + oO0o
I1IiIiiIiIII = lisp_core_commands [ "lisp rtr-list" ]
I1IiIiiIiIII = I1IiIiiIiIII [ 1 ]
if 59 - 59: I11i . I1IiiI - iIii1I11I1II1 + iIii1I11I1II1
if 56 - 56: oO0o + ooOoO0o
if 32 - 32: II111iiii + OoOoOO00 % ooOoO0o / OoOoOO00 + I1ii11iIi11i
if 2 - 2: i11iIiiIii - I1Ii111 + OoO0O00 % I11i * Ii1I
if 54 - 54: O0 - iII111i . OOooOOo % iII111i + iII111i
o0O0O0ooo0oOO , iI11 , I1IiIiiIiIII = lisp_syntax_check ( I1IiIiiIiIII , clause )
if 36 - 36: OOooOOo % i11iIiiIii
if ( o0O0O0ooo0oOO ) : return ( iI11 )
if 47 - 47: i1IIi + II111iiii . | |
+ eY**2 + fXY
BiQuadratic solution requires a minimum of 6 data pairs
QuadraticLinear Table Equation: Output = a + bX + cX**2 + dY + eXY + fX**2Y
QuadraticLinear solution requires a minimum of 6 data pairs
"""
_schema = {'extensible-fields': OrderedDict([(u'x value',
{'name': u'X Value',
'pyname': u'x_value',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'y value',
{'name': u'Y Value',
'pyname': u'y_value',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'output value',
{'name': u'Output Value',
'pyname': u'output_value',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real'})]),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'curve type',
{'name': u'Curve Type',
'pyname': u'curve_type',
'required-field': True,
'autosizable': False,
'accepted-values': [u'BiQuadratic',
u'QuadraticLinear'],
'autocalculatable': False,
'type': 'alpha'}),
(u'interpolation method',
{'name': u'Interpolation Method',
'pyname': u'interpolation_method',
'default': u'LagrangeInterpolationLinearExtrapolation',
'required-field': False,
'autosizable': False,
'accepted-values': [u'LinearInterpolationOfTable',
u'EvaluateCurveToLimits',
u'LagrangeInterpolationLinearExtrapolation'],
'autocalculatable': False,
'type': 'alpha'}),
(u'minimum value of x',
{'name': u'Minimum Value of X',
'pyname': u'minimum_value_of_x',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum value of x',
{'name': u'Maximum Value of X',
'pyname': u'maximum_value_of_x',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum value of y',
{'name': u'Minimum Value of Y',
'pyname': u'minimum_value_of_y',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum value of y',
{'name': u'Maximum Value of Y',
'pyname': u'maximum_value_of_y',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum table output',
{'name': u'Minimum Table Output',
'pyname': u'minimum_table_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum table output',
{'name': u'Maximum Table Output',
'pyname': u'maximum_table_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'input unit type for x',
{'name': u'Input Unit Type for X',
'pyname': u'input_unit_type_for_x',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless',
u'Temperature',
u'VolumetricFlow',
u'MassFlow',
u'Power',
u'Distance'],
'autocalculatable': False,
'type': 'alpha'}),
(u'input unit type for y',
{'name': u'Input Unit Type for Y',
'pyname': u'input_unit_type_for_y',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless',
u'Temperature',
u'VolumetricFlow',
u'MassFlow',
u'Power',
u'Distance'],
'autocalculatable': False,
'type': 'alpha'}),
(u'output unit type',
{'name': u'Output Unit Type',
'pyname': u'output_unit_type',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless',
u'Capacity',
u'Power'],
'autocalculatable': False,
'type': 'alpha'}),
(u'normalization reference',
{'name': u'Normalization Reference',
'pyname': u'normalization_reference',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'})]),
'format': None,
'group': u'Performance Tables',
'min-fields': 22,
'name': u'Table:TwoIndependentVariables',
'pyname': u'TableTwoIndependentVariables',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def curve_type(self):
"""field `Curve Type`
Args:
value (str): value for IDD Field `Curve Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `curve_type` or None if not set
"""
return self["Curve Type"]
@curve_type.setter
def curve_type(self, value=None):
"""Corresponds to IDD field `Curve Type`"""
self["Curve Type"] = value
@property
def interpolation_method(self):
"""field `Interpolation Method`
| Default value: LagrangeInterpolationLinearExtrapolation
Args:
value (str): value for IDD Field `Interpolation Method`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `interpolation_method` or None if not set
"""
return self["Interpolation Method"]
@interpolation_method.setter
def interpolation_method(
self,
value="LagrangeInterpolationLinearExtrapolation"):
"""Corresponds to IDD field `Interpolation Method`"""
self["Interpolation Method"] = value
@property
def minimum_value_of_x(self):
"""field `Minimum Value of X`
| Units are based on field `A4`
Args:
value (float): value for IDD Field `Minimum Value of X`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_x` or None if not set
"""
return self["Minimum Value of X"]
@minimum_value_of_x.setter
def minimum_value_of_x(self, value=None):
"""Corresponds to IDD field `Minimum Value of X`"""
self["Minimum Value of X"] = value
@property
def maximum_value_of_x(self):
"""field `Maximum Value of X`
| Units are based on field `A4`
Args:
value (float): value for IDD Field `Maximum Value of X`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_x` or None if not set
"""
return self["Maximum Value of X"]
@maximum_value_of_x.setter
def maximum_value_of_x(self, value=None):
"""Corresponds to IDD field `Maximum Value of X`"""
self["Maximum Value of X"] = value
@property
def minimum_value_of_y(self):
"""field `Minimum Value of Y`
| Units are based on field `A5`
Args:
value (float): value for IDD Field `Minimum Value of Y`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_y` or None if not set
"""
return self["Minimum Value of Y"]
@minimum_value_of_y.setter
def minimum_value_of_y(self, value=None):
"""Corresponds to IDD field `Minimum Value of Y`"""
self["Minimum Value of Y"] = value
@property
def maximum_value_of_y(self):
"""field `Maximum Value of Y`
| Units are based on field `A5`
Args:
value (float): value for IDD Field `Maximum Value of Y`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_y` or None if not set
"""
return self["Maximum Value of Y"]
@maximum_value_of_y.setter
def maximum_value_of_y(self, value=None):
"""Corresponds to IDD field `Maximum Value of Y`"""
self["Maximum Value of Y"] = value
@property
def minimum_table_output(self):
"""field `Minimum Table Output`
| Specify the minimum value calculated by this table lookup object
| Units are based on field `A6`
Args:
value (float): value for IDD Field `Minimum Table Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_table_output` or None if not set
"""
return self["Minimum Table Output"]
@minimum_table_output.setter
def minimum_table_output(self, value=None):
"""Corresponds to IDD field `Minimum Table Output`"""
self["Minimum Table Output"] = value
@property
def maximum_table_output(self):
"""field `Maximum Table Output`
| Specify the maximum value calculated by this table lookup object
| Units are based on field `A6`
Args:
value (float): value for IDD Field `Maximum Table Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_table_output` or None if not set
"""
return self["Maximum Table Output"]
@maximum_table_output.setter
def maximum_table_output(self, value=None):
"""Corresponds to IDD field `Maximum Table Output`"""
self["Maximum Table Output"] = value
@property
def input_unit_type_for_x(self):
"""field `Input Unit Type for X`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Input Unit Type for X`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `input_unit_type_for_x` or None if not set
"""
return self["Input Unit Type for X"]
@input_unit_type_for_x.setter
def input_unit_type_for_x(self, value="Dimensionless"):
"""Corresponds to IDD field `Input Unit Type for X`"""
self["Input Unit Type for X"] = value
@property
def input_unit_type_for_y(self):
"""field `Input Unit Type for Y`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Input Unit Type for Y`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `input_unit_type_for_y` or None if not set
"""
return self["Input Unit Type for Y"]
@input_unit_type_for_y.setter
def input_unit_type_for_y(self, value="Dimensionless"):
"""Corresponds to IDD field `Input Unit Type for Y`"""
self["Input Unit Type for Y"] = value
@property
def output_unit_type(self):
"""field `Output Unit Type`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Output Unit Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `output_unit_type` or None if not set
"""
return self["Output Unit Type"]
@output_unit_type.setter
def output_unit_type(self, value="Dimensionless"):
"""Corresponds to IDD field `Output Unit Type`"""
self["Output Unit Type"] = value
@property
def normalization_reference(self):
"""field `Normalization Reference`
| This field is used to normalize the following output data.
| The minimum and maximum table output fields are also normalized.
| If this field is blank or 1, the table data presented below will be used.
Args:
value (float): value for IDD Field `Normalization Reference`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `normalization_reference` or None if not set
"""
return self["Normalization Reference"]
@normalization_reference.setter
def normalization_reference(self, value=None):
"""Corresponds to IDD field `Normalization Reference`"""
self["Normalization Reference"] = value
def add_extensible(self,
x_value=None,
y_value=None,
output_value=None,
):
"""Add values for extensible fields.
Args:
x_value (float): value for IDD Field `X Value`
if `value` is None it will not be checked | |
stoptimestamp timestamp without time zone,
startremarks character varying(256),
stopremarks character varying(256),
dbupdatedtimestamp timestamp without time zone NOT NULL,
logfile character varying(256) NOT NULL);
"""))
op.execute(textwrap.dedent("""
CREATE TABLE tier2_switch_history (
lctn character varying(25) NOT NULL,
state character varying(1) NOT NULL,
sernum character varying(20),
type character varying(20),
owner character varying(1) NOT NULL,
dbupdatedtimestamp timestamp without time zone NOT NULL,
lastchgtimestamp timestamp without time zone NOT NULL);
"""))
op.execute(textwrap.dedent("""
CREATE TABLE tier2_wlmreservation_history (
reservationname character varying(35) NOT NULL,
users character varying(100) NOT NULL,
nodes character varying(100),
starttimestamp timestamp without time zone NOT NULL,
endtimestamp timestamp without time zone,
deletedtimestamp timestamp without time zone,
lastchgtimestamp timestamp without time zone NOT NULL,
dbupdatedtimestamp timestamp without time zone NOT NULL,
lastchgadaptertype character varying(20) NOT NULL,
lastchgworkitemid bigint NOT NULL);
"""))
op.execute(textwrap.dedent("""
CREATE TABLE tier2_workitem_history (
queue character varying(20) NOT NULL,
workingadaptertype character varying(20) NOT NULL,
id bigint NOT NULL,
worktobedone character varying(40) NOT NULL,
parameters character varying(15000),
notifywhenfinished character varying(1) NOT NULL,
state character varying(1) NOT NULL,
requestingworkitemid bigint NOT NULL,
requestingadaptertype character varying(20) NOT NULL,
workingadapterid bigint,
workingresults character varying(15000),
results character varying(262144),
starttimestamp timestamp without time zone NOT NULL,
dbupdatedtimestamp timestamp without time zone NOT NULL,
endtimestamp timestamp without time zone,
rowinsertedintohistory character varying(1) NOT NULL);
"""))
op.execute(textwrap.dedent("""
ALTER TABLE ONLY tier2_aggregatedenvdata
ADD CONSTRAINT tier2_aggregatedenvdata_pkey PRIMARY KEY (lctn, type, "timestamp");
"""))
op.execute(textwrap.dedent("""
ALTER TABLE ONLY tier2_diag
ADD CONSTRAINT tier2_diag_pkey PRIMARY KEY (diagid);
"""))
op.execute(textwrap.dedent("""
ALTER TABLE ONLY tier2_inventorysnapshot
ADD CONSTRAINT tier2_inventorysnapshot_pkey PRIMARY KEY (lctn, snapshottimestamp);
"""))
op.execute(textwrap.dedent("""
ALTER TABLE ONLY tier2_rasmetadata
ADD CONSTRAINT tier2_rasmetadata_pkey PRIMARY KEY (eventtype);
"""))
op.execute(textwrap.dedent("""
CREATE INDEX aggregatedenvdata_timelctn ON tier2_aggregatedenvdata USING btree ("timestamp", lctn);
"""))
op.execute(textwrap.dedent("""
CREATE INDEX computenode_dbupdatedtime ON tier2_computenode_history USING btree (dbupdatedtimestamp);
"""))
op.execute(textwrap.dedent("""
CREATE INDEX computenode_lastchgtimelctn ON tier2_computenode_history USING btree (lastchgtimestamp, lctn);
"""))
op.execute(textwrap.dedent("""
CREATE INDEX computenode_seqnumdbupdatedtime ON tier2_computenode_history USING btree (sequencenumber, dbupdatedtimestamp);
"""))
op.execute(textwrap.dedent("""
CREATE INDEX diag_endtimediagid ON tier2_diag USING btree (endtimestamp, diagid);
"""))
op.execute(textwrap.dedent("""
CREATE INDEX diag_startendtimediagid ON tier2_diag USING btree (starttimestamp, endtimestamp, diagid);
"""))
op.execute(textwrap.dedent("""
CREATE INDEX jobhistory_dbupdatedtime ON tier2_job_history USING btree (dbupdatedtimestamp);
"""))
op.execute(textwrap.dedent("""
CREATE INDEX jobhistory_lastchgtime ON tier2_job_history USING btree (lastchgtimestamp);
"""))
op.execute(textwrap.dedent("""
CREATE INDEX rasevent_dbupdatedtime ON tier2_rasevent USING btree (dbupdatedtimestamp);
"""))
op.execute(textwrap.dedent("""
CREATE INDEX rasevent_dbupdatedtimeeventtypeid ON tier2_rasevent USING btree (dbupdatedtimestamp desc, eventtype, id);
"""))
op.execute(textwrap.dedent("""
CREATE INDEX rasmetadata_eventtype ON tier2_rasmetadata USING btree (eventtype);
"""))
def create_functions():
op.execute(textwrap.dedent("""
CREATE OR REPLACE FUNCTION aggregatedenvdatalistattime(p_start_time timestamp without time zone, p_end_time timestamp without time zone) RETURNS SETOF tier2_aggregatedenvdata
LANGUAGE plpgsql
AS $$
BEGIN
if (p_start_time is not null) then
return query
select * from Tier2_AggregatedEnvData
where Timestamp <= coalesce(p_end_time, current_timestamp) and
Timestamp >= p_start_time
order by Lctn, Timestamp desc LIMIT 200;
else
return query
select * from Tier2_AggregatedEnvData
where Timestamp <= coalesce(p_end_time, current_timestamp)
order by Timestamp desc LIMIT 200;
end if;
return;
END
$$;
"""))
op.execute(textwrap.dedent("""
CREATE OR REPLACE FUNCTION aggregatedenvdatastore(p_location character varying, p_timestamp timestamp without time zone, p_type character varying, p_max_value double precision, p_min_value double precision, p_avg_value double precision, p_adapter_type character varying, p_work_item_id bigint) RETURNS void
LANGUAGE sql
AS $$
insert into Tier2_AggregatedEnvData(
Lctn,
Timestamp,
Type,
MaximumValue,
MinimumValue,
AverageValue,
AdapterType,
WorkItemId)
values(
p_location,
p_timestamp,
p_type,
p_max_value,
p_min_value,
p_avg_value,
p_adapter_type,
p_work_item_id);
$$;
"""))
op.execute(textwrap.dedent("""
CREATE OR REPLACE FUNCTION computenodehistorylistofstateattime(p_start_time timestamp without time zone, p_end_time timestamp without time zone) RETURNS TABLE(lctn character varying, state character varying)
LANGUAGE plpgsql
AS $$
DECLARE
prev_lctn varchar(20) := '';
BEGIN
if (p_end_time is null) then
for Lctn, State in
select CN.Lctn, CN.State from Tier2_ComputeNode_History CN
order by Lctn, LastChgTimestamp desc
loop
if (Lctn <> prev_lctn) then
prev_lctn := Lctn;
return next;
end if;
end loop;
elsif (p_start_time is null) then
for Lctn, State in
select CN.Lctn, CN.State from Tier2_ComputeNode_History CN
where LastChgTimestamp <= p_end_time
order by Lctn, LastChgTimestamp desc
loop
if (Lctn <> prev_lctn) then
prev_lctn := Lctn;
return next;
end if;
end loop;
else
for Lctn, State in
select CN.Lctn, CN.State from Tier2_ComputeNode_History CN
where LastChgTimestamp <= p_end_time and
LastChgTimestamp >= p_start_time
order by Lctn, LastChgTimestamp desc
loop
if (Lctn <> prev_lctn) then
prev_lctn := Lctn;
return next;
end if;
end loop;
end if;
return;
END
$$;
"""))
op.execute(textwrap.dedent("""
CREATE OR REPLACE FUNCTION computenodehistoryoldesttimestamp() RETURNS timestamp without time zone
LANGUAGE sql
AS $$
select min(LastChgTimestamp) from Tier2_ComputeNode_History;
$$;
"""))
op.execute(textwrap.dedent("""
CREATE OR REPLACE FUNCTION computenodeinventorylist(p_sequence_num integer) RETURNS SETOF tier2_computenode_history
LANGUAGE sql
AS $$
select DISTINCT ON (sequencenumber) lctn, sequencenumber, state, hostname, sernum, bootimageid, environment, ipaddr, macaddr, type, bmcipaddr,
bmcmacaddr, bmchostname, dbupdatedtimestamp, lastchgtimestamp, lastchgadaptertype, lastchgworkitemid, owner, inventoryinfo
from tier2_computenode_history where sequencenumber > p_sequence_num order by sequencenumber, dbupdatedtimestamp desc limit 500;
$$;
"""))
op.execute(textwrap.dedent("""
CREATE OR REPLACE FUNCTION dbchgtimestamps() RETURNS TABLE(key character varying, value timestamp without time zone)
LANGUAGE plpgsql
AS $$
BEGIN
return query
select 'Node_Max_LastChgTimestamp'::varchar,
max(LastChgTimestamp)
from Tier2_ComputeNode_History;
return query
select 'Node_Max_DbUpdatedTimestamp'::varchar,
max(DbUpdatedTimestamp)
from Tier2_ComputeNode_History;
return query
select 'Ras_Max_DbUpdatedTimestamp'::varchar,
max(DbUpdatedTimestamp)
from Tier2_RasEvent;
return query
select 'Job_Max_LastChgTimestamp'::varchar,
max(LastChgTimestamp)
from Tier2_Job_History;
return query
select 'Job_Max_DbUpdatedTimestamp'::varchar,
max(DbUpdatedTimestamp)
from Tier2_Job_History;
return query
select 'Reservation_Max_DbUpdatedTimestamp'::varchar,
max(DbUpdatedTimestamp)
from Tier2_wlmreservation_history;
return query
select 'Env_Max_Timestamp'::varchar,
max("timestamp")
from Tier2_aggregatedenvdata;
return query
select 'Inv_Max_Timestamp'::varchar,
max(dbupdatedtimestamp)
from Tier2_computenode_history;
return query
select 'InvSS_Max_Timestamp'::varchar,
max(snapshottimestamp)
from Tier2_inventorysnapshot;
return query
select 'Diags_Max_Timestamp'::varchar,
max(starttimestamp)
from Tier2_diag;
return query
select 'Replacement_Max_Timestamp'::varchar,
max(dbupdatedtimestamp)
from Tier2_replacement_history;
return;
END
$$;
"""))
op.execute(textwrap.dedent("""
CREATE OR REPLACE FUNCTION diaglistofactivediagsattime(p_end_time timestamp without time zone) RETURNS SETOF tier2_diag
LANGUAGE sql
AS $$
select * from Tier2_Diag
where StartTimestamp <= coalesce(p_end_time, current_timestamp) and
(EndTimestamp is null or
EndTimestamp > coalesce(p_end_time, current_timestamp))
order by DiagId desc;
$$;
"""))
op.execute(textwrap.dedent("""
CREATE OR REPLACE FUNCTION diaglistofnonactivediagsattime(p_end_time timestamp without time zone) RETURNS SETOF tier2_diag
LANGUAGE sql
AS $$
select * from Tier2_Diag
where EndTimestamp <= coalesce(p_end_time, current_timestamp)
order by DiagId desc;
$$;
"""))
op.execute(textwrap.dedent("""
CREATE OR REPLACE FUNCTION getlistnodelctns(p_job_nodes bytea) RETURNS character varying
LANGUAGE plpgsql
AS $$
DECLARE
v_lctn varchar;
v_list varchar;
v_first boolean := true;
v_num_bits integer;
v_bit_index integer;
BEGIN
v_num_bits := length(p_job_nodes) * 8;
for i in 0 .. v_num_bits - 1 loop
v_bit_index := v_num_bits - 1 - i;
if get_bit(p_job_nodes, v_bit_index) = 1 then
select Lctn into v_lctn
from Tier2_ComputeNode_History
where SequenceNumber = i
order by DbUpdatedTimestamp
limit 1;
if v_lctn is null then
raise exception 'GetListNodeLctns - can''t find corresponding Lctn string for node sequence number = %!', i;
end if;
if v_first then
v_list := v_lctn;
v_first := false;
else
v_list := v_list || ' ' || v_lctn;
end if;
end if;
end loop;
return v_list;
END
$$;
"""))
op.execute(textwrap.dedent("""
CREATE OR REPLACE FUNCTION getmanifestcontent(OUT manifestcontent character varying) RETURNS character varying
LANGUAGE sql
AS $$
select manifestcontent from tier2_machine_history order by dbupdatedtimestamp desc limit 1;
$$;
"""))
op.execute(textwrap.dedent("""
CREATE OR REPLACE FUNCTION insertorupdaterasmetadata(p_eventtype character varying, p_descriptivename character varying, p_severity character varying, p_category character varying, p_component character varying, p_controloperation character varying, p_msg character varying, p_dbupdatedtimestamp timestamp without time zone) RETURNS void
LANGUAGE sql
AS $$
insert into Tier2_RasMetaData(
EventType,
DescriptiveName,
Severity,
Category,
Component,
ControlOperation,
Msg,
DbUpdatedTimestamp)
values(
p_eventtype,
p_descriptivename,
p_severity,
p_category,
p_component,
p_controloperation,
p_msg,
p_dbupdatedtimestamp)
on conflict(EventType) do update set
DescriptiveName = p_descriptivename,
Severity = p_severity,
Category = p_category,
Component = p_component,
ControlOperation = p_controloperation,
Msg = p_msg,
DbUpdatedTimestamp = p_dbupdatedtimestamp;
$$;
"""))
op.execute(textwrap.dedent("""
CREATE OR REPLACE FUNCTION jobhistorylistofactivejobsattime(p_end_time timestamp without time zone) RETURNS SETOF jobactivetype
LANGUAGE plpgsql
AS $$
DECLARE
v_prev_job_id varchar := '';
v_node_list varchar;
v_job Tier2_Job_History;
v_rec JobActiveType%rowtype;
BEGIN
for v_job in
select *
from Tier2_Job_History
where LastChgTimestamp <= coalesce(p_end_time, current_timestamp)
order by JobId desc,
LastChgTimestamp desc,
DbUpdatedTimestamp desc
loop
if v_job.JobId <> v_prev_job_id then
v_prev_job_id := v_job.JobId;
if v_job.State = 'S' then
v_rec.JobId := v_job.JobId;
v_rec.JobName := v_job.JobName;
v_rec.State := v_job.State;
v_rec.Bsn := v_job.Bsn;
v_rec.UserName := v_job.UserName;
v_rec.StartTimestamp := v_job.StartTimestamp;
v_rec.NumNodes := v_job.NumNodes;
v_rec.Nodes := GetListNodeLctns(v_job.Nodes);
v_rec.Wlmjobstate := v_job.Wlmjobstate;
return next v_rec;
end if;
end if;
end loop;
return;
END
$$;
"""))
op.execute(textwrap.dedent("""
CREATE OR REPLACE FUNCTION jobhistorylistofnonactivejobsattime(p_end_time timestamp without time zone) RETURNS SETOF jobnonactivetype
LANGUAGE plpgsql
AS $$
DECLARE
v_prev_job_id varchar := '';
v_job Tier2_Job_History;
v_rec JobNonActiveType%rowtype;
BEGIN
for v_job in
select *
from Tier2_Job_History
where LastChgTimestamp <= coalesce(p_end_time, | |
Bit direction
[0,1], # top-left position contains white cell
[0,1], # top-center position contains white cell
[0,1], # top-right position contains white cell
[0,1], # mid-left position contains white cell
[0,1], # mid-center position contains white cell
[0,1], # mid-right position contains white cell
[0,1], # bottom-left position contains white cell
[0,1], # bottom-center position contains white cell
[0,1], # bottom-right position contains white cell
range(len(self.y_classes)+3), # Target color
range(len(self.y_classes)+3), # Action color
[0,1,2,3], # Target layer
[0,1,2,3]] # Action layer
i = 0
for y in range(self.size[0]):
for x in range(self.size[1]):
# Only mutate if the self.mutate_proba event is picked.
chance_to_mutate = random.random() < self.mutate_proba
if chance_to_mutate == 1:
child.world[y][x] = random.choice(mutate_map[x])
i += 1
child_len = child.world.shape[0] * child.world.shape[1]
assert i == child_len,\
'All data points were not iterated over. Got: {} Expected: {}'\
.format(i, parent_len)
return child
def birth_child(self, child):
'''
Add a child to the next population generation.
'''
self.next_pop.append(child)
return self
def update_pop(self):
'''
Replace the current pop with nextGen pop.
'''
self.current_pop = self.next_pop
self.next_pop = []
return self
def run(self):
if not self.intitialized:
self.initialize()
while True:
self.selection()
self.reproduction()
# Save self.best_child
self.best_child = self.current_pop[np.argmax(
self.current_pop_fitness)]
self.generation_list.append(self.best_child)
# Get fitness of best child
gen = len(self.generation_list)
# Gen is used to determine bag for sampling
random.seed(gen)
bag_index = random.choice(range(len(self.bags)))
selected_bag = self.bags[bag_index]
f = Fitness(self.best_child,
json_data=self.json_data,
gen=gen,
data=selected_bag,
y_classes=self.y_classes,
fitness_func=self.fitness_func)
f.run()
best_child_fitness = round(f.fitness_value, 2)
self.generation_fitness.append(best_child_fitness)
if len(self.generation_list) % 1 == 0:
if not self.silent:
print('Generation: {} Fitness: {}'.format(
len(self.generation_list), best_child_fitness))
if self.auto_plot:
# Plays through data in a pygame window
Viewer.Playback(f.fitness_data_array,
text='Generation: {} Fitness: {}'.\
format(len(self.generation_list),best_child_fitness),
dna=f.world.world).run()
# Update to the next generation
self.update_pop()
# If the max_epochs is met, then stop.
if self.max_epochs != -1:
if len(self.generation_list) > self.max_epochs:
break
return self
class ProbSpace():
'''
Builds a LIST of each LIST of problem spaces in the
shuffle split bag to be used by cellular automata.
Expects:
X np.ndarray int32|int64|float64 training data features
y np.ndarray int64|int32 training data labels
training BOOL (default: True) if False, return single space
quantize_size TUPLE x,y dim of the problem space
class_weight STR "auto" preserves strafication
of classes in population
splits
'''
def __init__(self,
X=np.array([]),
y=np.array([]),
training=True,
quantize_size=(15,15), # INHERIT THIS ONCE FIT
class_weight='auto',
splits=None):
assert isinstance(X, np.ndarray),\
'Expected X type np.array, got: {}'.format(type(X))
assert X.shape[0] >= 1, \
'Expected X to have at least 1 sample.'
assert training in [True, False], \
'Expected training to be type BOOL'
if X.dtype not in ['int32', 'int64', 'float32', 'float64']:
raise TypeError('X is incorrect dtype')
assert isinstance(quantize_size,tuple),\
'Expected quantize_size to be tuple. e.g. (15,15)'
assert len(quantize_size) == 2,\
'Expected quantize_size to have len 2, got: {}'\
.format(len(quantize_size))
if training:
assert isinstance(y, np.ndarray), \
'Expected y type np.adarry, got: {}'.format(type(y))
assert y.shape[0] >= 1, 'y was an empty array'
assert y.shape[1] == 1, 'y must use the shape ({},1)'.format(y.shape[1])
if y.dtype not in ['int32', 'int64']:
raise TypeError('y must be type INT, got TYPE {}'.format(y.dtype))
assert len(np.unique(y)) > 1, \
'There was a single class in y. Expected more than 1, ({})' \
.format(np.unique(y))
assert class_weight in ['balanced', 'auto'], \
'Expected class weight to be in ["balanced","auto"], got {}' \
.format(class_weight)
assert isinstance(splits, int), \
'Expected splits INT to be set, got: {}'.format(splits)
assert splits > 0, \
'Expected number of splits to be greater than 0'
assert X.shape[0] == y.shape[0], \
'X and y have incompatible shapes {},{}'.format(X.shape, y.shape)
self.X = X
self.y = y
self.training = training
self.quantize_size = quantize_size
self.class_weight = class_weight
self.splits = splits
def generate_space(self,
X=np.array([]),
y=None):
'''
Expects:
X np.ndarray a single X sample
y is ignored
Returns a LIST of problem spaces for all X samples.
'''
# X,y are assumed to have a shape of (,1)
assert len(X.shape) == 1,\
'Expected single sample of X, got dimensions {}'.format(X.shape)
numFeatures = X.shape[0]
width = (numFeatures * 4) + 3
# Initialize 2d space
d = [np.zeros((width,self.quantize_size[1], 3)) for i in range(4)]
# Set starting whites
# Layer 0
for i in range(numFeatures):
xPos = int(i * 4)-1
d[0][3+int(i *4)-1][0] = (255, 255, 255)
d[0][3+int(i * 4)][0] = (255, 255, 255)
d[0][3 + int(i * 4)+1][0] = (255, 255, 255)
d[0][3+int(i * 4) - 1][-1] = (255, 255, 255)
d[0][3+int(i * 4)][-1] = (255, 255, 255)
d[0][3 + int(i * 4)+1][-1] = (255, 255, 255)
d[0][3 + int(i * 4) - 1][-2] = (255, 255, 255)
d[0][3 + int(i * 4)][-2] = (255, 255, 255)
d[0][3 + int(i * 4) + 1][-2] = (255, 255, 255)
# Layer 1
# for i in range(7):
# for ii in range(5):
# d[1][0 + i][0 + ii] = (255, 255, 255)
for i in range(numFeatures):
d[1][3 + int(i * 4) - 1][1] = (255, 255, 255)
d[1][3 + int(i * 4)][1] = (255, 255, 255)
d[1][3 + int(i * 4) + 1][1] = (255, 255, 255)
d[1][3 + int(i * 4) - 1][0] = (255, 255, 255)
d[1][3 + int(i * 4)][0] = (255, 255, 255)
d[1][3 + int(i * 4) + 1][0] = (255, 255, 255)
d[1][3 + int(i * 4) - 1][-1] = (255, 255, 255)
d[1][3 + int(i * 4)][-1] = (255, 255, 255)
d[1][3 + int(i * 4) + 1][-1] = (255, 255, 255)
# Layer 2
for i in range(4):
for ii in range(5):
d[2][0 + i][0 + ii] = (255, 255, 255)
# Layer 3
for i in range(5):
for ii in range(7):
d[3][0 + i][0 + ii] = (255, 255, 255)
# Assert that there are 4 layers
assert len(d) == 4, 'Expected 4 layers in d'
# For each feature, attempt to fill layer 0
for i in range(numFeatures):
xPos = int(i * 4)
# If value is outside problem space range (i.e.
# value was not seen in training data), set to
# max value of y-axis self.quantize_size.
if X[i] > (self.quantize_size[1] - 3):
d[0][xPos+3,2:int(self.quantize_size[1])] = (0, 255, 0)
# or min value
elif X[i] < 0:
d[0][xPos+3,0:3] = (0, 255, 0)
else:
d[0][xPos+3,3:int(3+X[i])] = (0, 255, 0)
return d
def stratified_training_bags(self,
X=np.array([]),
y=np.array([])):
'''
Creates bags of training data which transform X,y into
the expected {'space','label'} format used in the Fitness()
and CellularAutomata() classes.
Works best when there are at least 100 samples per 2 classes
Expects:
X np.ndarray int32|int64|float64 training data features
y np.ndarray int64|int32 training data labels
Returns:
bags LIST has a shape (max_epochs, X.shape[0],2)
bags format:
[bag_1 LIST, bag_2 LIST, ... bag_n LIST]
bag_n format:
{'space': np.ndarray, 'label': np.ndarray}
'''
bags = []
if self.splits:
for i in range(self.splits):
training_bag = []
if self.class_weight == 'auto':
# Use class weights to determine how samples are stratified.
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.30, random_state=i, stratify=y)
else:
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.30, random_state=i)
for ii in range(X_test.shape[0]):
space = self.generate_space(X_test[ii])
assert isinstance(space,list)
training_bag.append({'space':space,
'label':y_test[ii]})
bags.append(training_bag)
assert bags, 'training bags were empty'
else:
for i in range(X.shape[0]):
space = self.generate_space(X[i])
bags.append({'space':space,'label':y})
bags = [bags]
if not self.training:
assert bags, 'space was not generated for X'
return bags
class Fitness():
'''
Defines an viewer object which is able to move through the world.
Expects:
world np.ndarray Containing the DNA of the current child
json_data OBJECT Containing some of the metadata used in model (e.g. color maps)
gen INT The generation of the current child
data LIST A LIST of samples in the
DICT form {'space',LIST,'label',np.ndarray}
fitness_func STR accuracy | accuracy_proba for determining fitness
'''
def __init__(self, world,
json_data=None,
gen=None,
data=[],
y_classes=[],
fitness_func='accuracy'):
try:
self.world = copy.deepcopy(world)
except NotImplementedError:
self.world = copy.copy(world)
assert json_data, 'Requires valid json_data for running CellularA()'
assert isinstance(data,list),\
' Expected data to be type LIST, got {}'.format(type(data))
assert data, 'data was empty'
assert 'space' in data[0]
assert y_classes, \
'y_classes must be set in Fitness()'
assert isinstance(fitness_func,str),\
'Expected fitness_func to be type | |
sol18 = Eq(f(x), C1 + C2 * x + C3 * x ** 2 + C4 * exp(-3 * x))
sol19 = Eq(f(x), C1 + C2 * x + C3 * exp(x * sqrt(2)) + C4 * exp(-x * sqrt(2)))
sol20 = Eq(f(x), (C1 + C2 * x) * exp(-3 * x) + (C3 + C4 * x) * exp(2 * x))
sol21 = Eq(
f(x),
C1 * exp(x / 2)
+ C2 * exp(-x)
+ C3 * exp(-x / 3)
+ C4 * exp(x * Rational(5, 6)),
)
sol22 = Eq(f(x), (C1 + C2 * x) * exp(-2 * x) + (C3 + C4 * x) * exp(2 * x))
sol23 = Eq(f(x), (C1 * sin(2 * x) + C2 * cos(2 * x)) * exp(x))
sol24 = Eq(
f(x), (C1 * sin(x * sqrt(3) / 2) + C2 * cos(x * sqrt(3) / 2)) * exp(x / 2)
)
sol25 = Eq(
f(x),
C1 * cos(x * sqrt(3))
+ C2 * sin(x * sqrt(3))
+ C3 * sin(x * sqrt(2))
+ C4 * cos(x * sqrt(2)),
)
sol26 = Eq(f(x), (C1 * sin(4 * x) + C2 * cos(4 * x)) * exp(2 * x))
sol27 = Eq(
f(x), (C1 + C2 * x) * sin(x * sqrt(2)) + (C3 + C4 * x) * cos(x * sqrt(2))
)
sol28 = Eq(
f(x),
(C1 * sin(x * sqrt(3)) + C2 * cos(x * sqrt(3))) * exp(x) + C3 * exp(-2 * x),
)
sol29 = Eq(f(x), C1 + C2 * sin(2 * x) + C3 * cos(2 * x) + C4 * x)
sol30 = Eq(f(x), C1 + (C2 + C3 * x) * sin(x) + (C4 + C5 * x) * cos(x))
sol31 = Eq(
f(x),
(C1 * sin(sqrt(3) * x / 2) + C2 * cos(sqrt(3) * x / 2)) / sqrt(exp(x))
+ (C3 * sin(sqrt(3) * x / 2) + C4 * cos(sqrt(3) * x / 2)) * sqrt(exp(x)),
)
sol32 = Eq(
f(x),
C1 * sin(x * sqrt(-sqrt(3) + 2))
+ C2 * sin(x * sqrt(sqrt(3) + 2))
+ C3 * cos(x * sqrt(-sqrt(3) + 2))
+ C4 * cos(x * sqrt(sqrt(3) + 2)),
)
sol1s = constant_renumber(sol1)
sol2s = constant_renumber(sol2)
sol3s = constant_renumber(sol3)
sol4s = constant_renumber(sol4)
sol5s = constant_renumber(sol5)
sol6s = constant_renumber(sol6)
sol7s = constant_renumber(sol7)
sol8s = constant_renumber(sol8)
sol9s = constant_renumber(sol9)
sol10s = constant_renumber(sol10)
sol11s = constant_renumber(sol11)
sol12s = constant_renumber(sol12)
sol13s = constant_renumber(sol13)
sol14s = constant_renumber(sol14)
sol15s = constant_renumber(sol15)
sol16s = constant_renumber(sol16)
sol17s = constant_renumber(sol17)
sol18s = constant_renumber(sol18)
sol19s = constant_renumber(sol19)
sol20s = constant_renumber(sol20)
sol21s = constant_renumber(sol21)
sol22s = constant_renumber(sol22)
sol23s = constant_renumber(sol23)
sol24s = constant_renumber(sol24)
sol25s = constant_renumber(sol25)
sol26s = constant_renumber(sol26)
sol27s = constant_renumber(sol27)
sol28s = constant_renumber(sol28)
sol29s = constant_renumber(sol29)
sol30s = constant_renumber(sol30)
assert dsolve(eq1) in (sol1, sol1s)
assert dsolve(eq2) in (sol2, sol2s)
assert dsolve(eq3) in (sol3, sol3s)
assert dsolve(eq4) in (sol4, sol4s)
assert dsolve(eq5) in (sol5, sol5s)
assert dsolve(eq6) in (sol6, sol6s)
assert dsolve(eq7) in (sol7, sol7s)
assert dsolve(eq8) in (sol8, sol8s)
assert dsolve(eq9) in (sol9, sol9s)
assert dsolve(eq10) in (sol10, sol10s)
assert dsolve(eq11) in (sol11, sol11s)
assert dsolve(eq12) in (sol12, sol12s)
assert dsolve(eq13) in (sol13, sol13s)
assert dsolve(eq14) in (sol14, sol14s)
assert dsolve(eq15) in (sol15, sol15s)
assert dsolve(eq16) in (sol16, sol16s)
assert dsolve(eq17) in (sol17, sol17s)
assert dsolve(eq18) in (sol18, sol18s)
assert dsolve(eq19) in (sol19, sol19s)
assert dsolve(eq20) in (sol20, sol20s)
assert dsolve(eq21) in (sol21, sol21s)
assert dsolve(eq22) in (sol22, sol22s)
assert dsolve(eq23) in (sol23, sol23s)
assert dsolve(eq24) in (sol24, sol24s)
assert dsolve(eq25) in (sol25, sol25s)
assert dsolve(eq26) in (sol26, sol26s)
assert dsolve(eq27) in (sol27, sol27s)
assert dsolve(eq28) in (sol28, sol28s)
assert dsolve(eq29) in (sol29, sol29s)
assert dsolve(eq30) in (sol30, sol30s)
assert dsolve(eq31) in (sol31,)
assert dsolve(eq32) in (sol32,)
assert checkodesol(eq1, sol1, order=2, solve_for_func=False)[0]
assert checkodesol(eq2, sol2, order=2, solve_for_func=False)[0]
assert checkodesol(eq3, sol3, order=2, solve_for_func=False)[0]
assert checkodesol(eq4, sol4, order=3, solve_for_func=False)[0]
assert checkodesol(eq5, sol5, order=2, solve_for_func=False)[0]
assert checkodesol(eq6, sol6, order=2, solve_for_func=False)[0]
assert checkodesol(eq7, sol7, order=3, solve_for_func=False)[0]
assert checkodesol(eq8, sol8, order=4, solve_for_func=False)[0]
assert checkodesol(eq9, sol9, order=4, solve_for_func=False)[0]
assert checkodesol(eq10, sol10, order=4, solve_for_func=False)[0]
assert checkodesol(eq11, sol11, order=2, solve_for_func=False)[0]
assert checkodesol(eq12, sol12, order=2, solve_for_func=False)[0]
assert checkodesol(eq13, sol13, order=4, solve_for_func=False)[0]
assert checkodesol(eq14, sol14, order=2, solve_for_func=False)[0]
assert checkodesol(eq15, sol15, order=3, solve_for_func=False)[0]
assert checkodesol(eq16, sol16, order=3, solve_for_func=False)[0]
assert checkodesol(eq17, sol17, order=2, solve_for_func=False)[0]
assert checkodesol(eq18, sol18, order=4, solve_for_func=False)[0]
assert checkodesol(eq19, sol19, order=4, solve_for_func=False)[0]
assert checkodesol(eq20, sol20, order=4, solve_for_func=False)[0]
assert checkodesol(eq21, sol21, order=4, solve_for_func=False)[0]
assert checkodesol(eq22, sol22, order=4, solve_for_func=False)[0]
assert checkodesol(eq23, sol23, order=2, solve_for_func=False)[0]
assert checkodesol(eq24, sol24, order=2, solve_for_func=False)[0]
assert checkodesol(eq25, sol25, order=4, solve_for_func=False)[0]
assert checkodesol(eq26, sol26, order=2, solve_for_func=False)[0]
assert checkodesol(eq27, sol27, order=4, solve_for_func=False)[0]
assert checkodesol(eq28, sol28, order=3, solve_for_func=False)[0]
assert checkodesol(eq29, sol29, order=4, solve_for_func=False)[0]
assert checkodesol(eq30, sol30, order=5, solve_for_func=False)[0]
assert checkodesol(eq31, sol31, order=4, solve_for_func=False)[0]
assert checkodesol(eq32, sol32, order=4, solve_for_func=False)[0]
# Issue #15237
eqn = Derivative(x * f(x), x, x, x)
hint = "nth_linear_constant_coeff_homogeneous"
raises(ValueError, lambda: dsolve(eqn, f(x), hint, prep=True))
raises(ValueError, lambda: dsolve(eqn, f(x), hint, prep=False))
def test_nth_linear_constant_coeff_homogeneous_rootof():
# One real root, two complex conjugate pairs
eq = f(x).diff(x, 5) + 11 * f(x).diff(x) - 2 * f(x)
r1, r2, r3, r4, r5 = [rootof(x ** 5 + 11 * x - 2, n) for n in range(5)]
sol = Eq(
f(x),
C5 * exp(r1 * x)
+ exp(re(r2) * x) * (C1 * sin(im(r2) * x) + C2 * cos(im(r2) * x))
+ exp(re(r4) * x) * (C3 * sin(im(r4) * x) + C4 * cos(im(r4) * x)),
)
assert dsolve(eq) == sol
# FIXME: assert checkodesol(eq, sol) == (True, [0]) # Hangs...
# Three real roots, one complex conjugate pair
eq = f(x).diff(x, 5) - 3 * f(x).diff(x) + f(x)
r1, r2, r3, r4, r5 = [rootof(x ** 5 - 3 * x + 1, n) for n in range(5)]
sol = Eq(
f(x),
C3 * exp(r1 * x)
+ C4 * exp(r2 * x)
+ C5 * exp(r3 * x)
+ exp(re(r4) * x) * (C1 * sin(im(r4) * x) + C2 * cos(im(r4) * x)),
)
assert dsolve(eq) == sol
# FIXME: assert checkodesol(eq, sol) == (True, [0]) # Hangs...
# Five distinct real roots
eq = f(x).diff(x, 5) - 100 * f(x).diff(x, 3) + 1000 * f(x).diff(x) + f(x)
r1, r2, r3, r4, r5 = [
rootof(x ** 5 - 100 * x ** 3 + 1000 * x + 1, n) for n in range(5)
]
sol = Eq(
f(x),
C1 * exp(r1 * x)
+ C2 * exp(r2 * x)
+ C3 * exp(r3 * x)
+ C4 * exp(r4 * x)
+ C5 * exp(r5 * x),
)
assert dsolve(eq) == sol
# FIXME: assert checkodesol(eq, sol) == (True, [0]) # Hangs...
# Rational root and unsolvable quintic
eq = (
f(x).diff(x, 6)
- 6 * f(x).diff(x, 5)
+ 5 * f(x).diff(x, 4)
+ 10 * f(x).diff(x)
- 50 * f(x)
)
r2, r3, r4, r5, r6 = [rootof(x ** 5 - x ** 4 + 10, n) for n in range(5)]
sol = Eq(
f(x),
C5 * exp(5 * x)
+ C6 * exp(x * r2)
+ exp(re(r3) * x) * (C1 * sin(im(r3) * x) + C2 * cos(im(r3) * x))
+ exp(re(r5) * x) * (C3 * sin(im(r5) * x) + C4 * cos(im(r5) * x)),
)
assert dsolve(eq) == sol
# FIXME: assert checkodesol(eq, sol) == (True, [0]) # Hangs...
# Five double roots (this is (x**5 - x + 1)**2)
eq = (
f(x).diff(x, 10)
- 2 * f(x).diff(x, 6)
+ 2 * f(x).diff(x, 5)
+ f(x).diff(x, 2)
- 2 * f(x).diff(x, 1)
+ f(x)
)
r1, r2, r3, r4, r5 = [rootof(x ** 5 - x + 1, n) for n in range(5)]
sol = Eq(
f(x),
(C1 + C2 * x) * exp(r1 * x)
+ exp(re(r2) * x)
* ((C3 + C4 * x) * sin(im(r2) * x) + (C5 + C6 * x) * cos(im(r2) | |
import abc
import asyncio
import enum
import json
import logging
import re
import ssl
from base64 import b64encode
from collections import defaultdict
from collections.abc import AsyncIterator, Callable, Iterable, Sequence
from contextlib import suppress
from dataclasses import dataclass, field, replace
from datetime import datetime
from enum import Enum
from math import ceil
from pathlib import Path, PurePath
from types import TracebackType
from typing import Any, ClassVar, NoReturn, Optional, Union
from urllib.parse import urlsplit
import aiohttp
import iso8601
from aiohttp import WSMsgType
from async_timeout import timeout
from multidict import MultiDict
from yarl import URL
from platform_api.utils.stream import Stream
from .job_request import (
Container,
ContainerResources,
ContainerTPUResource,
ContainerVolume,
DiskContainerVolume,
JobError,
JobNotFoundException,
JobRequest,
Secret,
SecretContainerVolume,
)
from .kube_config import KubeClientAuthType
logger = logging.getLogger(__name__)
class ServiceType(str, enum.Enum):
CLUSTER_IP = "ClusterIP"
NODE_PORT = "NodePort"
LOAD_BALANCER = "LoadBalancer"
class KubeClientException(Exception):
pass
class StatusException(KubeClientException):
pass
class AlreadyExistsException(StatusException):
pass
class NotFoundException(StatusException):
pass
class ResourceGoneException(KubeClientException):
pass
def _raise_status_job_exception(pod: dict[str, Any], job_id: Optional[str]) -> NoReturn:
if pod["code"] == 409:
raise AlreadyExistsException(pod.get("reason", "job already exists"))
elif pod["code"] == 404:
raise JobNotFoundException(f"job {job_id} was not found")
elif pod["code"] == 422:
raise JobError(f"cant create job with id {job_id}")
else:
raise JobError("unexpected")
class GroupVersion(str, Enum):
NETWORKING_V1 = "networking.k8s.io/v1"
@dataclass(frozen=True)
class APIResource:
group_version: str
resources: Sequence[str]
@property
def has_ingress(self) -> bool:
return self.has_resource("ingresses")
def has_resource(self, resource_name: str) -> bool:
return resource_name in self.resources
@classmethod
def from_primitive(cls, payload: dict[str, Any]) -> "APIResource":
return cls(
group_version=payload["groupVersion"],
resources=[p["name"] for p in payload["resources"]],
)
class APIResources(dict[str, APIResource]):
group_versions: list[str] = [GroupVersion.NETWORKING_V1]
@property
def networking_v1(self) -> Optional[APIResource]:
return self.get(GroupVersion.NETWORKING_V1)
@property
def has_networking_v1_ingress(self) -> bool:
return self.networking_v1 is not None and self.networking_v1.has_ingress
@dataclass(frozen=True)
class Volume(metaclass=abc.ABCMeta):
name: str
def create_mount(
self,
container_volume: ContainerVolume,
mount_sub_path: Optional[PurePath] = None,
) -> "VolumeMount":
raise NotImplementedError("Cannot create mount for abstract Volume type.")
def to_primitive(self) -> dict[str, Any]:
raise NotImplementedError
@dataclass(frozen=True)
class PathVolume(Volume):
# None for cluster storage.
# /org for organization/additional storage.
path: Optional[PurePath]
def create_mount(
self,
container_volume: ContainerVolume,
mount_sub_path: Optional[PurePath] = None,
) -> "VolumeMount":
try:
sub_path = container_volume.src_path.relative_to(
"/" if self.path is None else str(self.path)
)
except ValueError:
sub_path = container_volume.src_path.relative_to("/")
mount_sub_path = mount_sub_path or PurePath("")
return VolumeMount(
volume=self,
mount_path=container_volume.dst_path / mount_sub_path,
sub_path=sub_path,
read_only=container_volume.read_only,
)
@dataclass(frozen=True)
class HostVolume(PathVolume):
host_path: PurePath
def to_primitive(self) -> dict[str, Any]:
return {
"name": self.name,
"hostPath": {"path": str(self.host_path), "type": "Directory"},
}
@dataclass(frozen=True)
class SharedMemoryVolume(Volume):
def to_primitive(self) -> dict[str, Any]:
return {"name": self.name, "emptyDir": {"medium": "Memory"}}
def create_mount(
self,
container_volume: ContainerVolume,
mount_sub_path: Optional[PurePath] = None,
) -> "VolumeMount":
mount_sub_path = mount_sub_path or PurePath("")
return VolumeMount(
volume=self,
mount_path=container_volume.dst_path / mount_sub_path,
sub_path=PurePath(""),
read_only=container_volume.read_only,
)
@dataclass(frozen=True)
class NfsVolume(PathVolume):
server: str
export_path: PurePath
def to_primitive(self) -> dict[str, Any]:
return {
"name": self.name,
"nfs": {"server": self.server, "path": str(self.export_path)},
}
@dataclass(frozen=True)
class PVCVolume(PathVolume):
claim_name: str
def to_primitive(self) -> dict[str, Any]:
return {
"name": self.name,
"persistentVolumeClaim": {"claimName": self.claim_name},
}
@dataclass(frozen=True)
class SecretEnvVar:
name: str
secret: Secret
@classmethod
def create(cls, name: str, secret: Secret) -> "SecretEnvVar":
return cls(name=name, secret=secret)
def to_primitive(self) -> dict[str, Any]:
return {
"name": self.name,
"valueFrom": {
"secretKeyRef": {
"name": self.secret.k8s_secret_name,
"key": self.secret.secret_key,
}
},
}
@dataclass(frozen=True)
class VolumeMount:
volume: Volume
mount_path: PurePath
sub_path: PurePath = PurePath("")
read_only: bool = False
def to_primitive(self) -> dict[str, Any]:
sub_path = str(self.sub_path)
raw = {
"name": self.volume.name,
"mountPath": str(self.mount_path),
"readOnly": self.read_only,
}
if sub_path:
raw["subPath"] = sub_path
return raw
@dataclass(frozen=True)
class SecretVolume(Volume):
k8s_secret_name: str
def create_secret_mount(self, sec_volume: SecretContainerVolume) -> "VolumeMount":
return VolumeMount(
volume=self,
mount_path=sec_volume.dst_path,
sub_path=PurePath(sec_volume.secret.secret_key),
read_only=True,
)
def to_primitive(self) -> dict[str, Any]:
return {
"name": self.name,
"secret": {"secretName": self.k8s_secret_name, "defaultMode": 0o400},
}
@dataclass(frozen=True)
class PVCDiskVolume(Volume):
claim_name: str
def create_disk_mount(self, disk_volume: DiskContainerVolume) -> "VolumeMount":
return VolumeMount(
volume=self,
mount_path=disk_volume.dst_path,
read_only=disk_volume.read_only,
)
def to_primitive(self) -> dict[str, Any]:
return {
"name": self.name,
"persistentVolumeClaim": {"claimName": self.claim_name},
}
@dataclass(frozen=True)
class Resources:
cpu: float
memory: int
memory_request: Optional[int] = None
gpu: Optional[int] = None
shm: Optional[bool] = None
tpu_version: Optional[str] = None
tpu_cores: Optional[int] = None
gpu_key: ClassVar[str] = "nvidia.com/gpu"
tpu_key_prefix: ClassVar[str] = "cloud-tpus.google.com/"
def __post_init__(self) -> None:
if bool(self.tpu_version) ^ bool(self.tpu_cores):
raise ValueError("invalid TPU configuration")
@property
def cpu_mcores(self) -> int:
return int(self.cpu * 1000)
@property
def memory_mib(self) -> str:
return f"{self.memory}Mi"
@property
def memory_request_mib(self) -> str:
return f"{self.memory_request}Mi"
@property
def tpu_key(self) -> str:
assert self.tpu_version
return self.tpu_key_prefix + self.tpu_version
def to_primitive(self) -> dict[str, Any]:
payload: dict[str, Any] = {
"requests": {"cpu": f"{self.cpu_mcores}m", "memory": self.memory_mib},
"limits": {"cpu": f"{self.cpu_mcores}m", "memory": self.memory_mib},
}
if self.gpu:
payload["requests"][self.gpu_key] = self.gpu
payload["limits"][self.gpu_key] = self.gpu
if self.tpu_version:
payload["requests"][self.tpu_key] = self.tpu_cores
payload["limits"][self.tpu_key] = self.tpu_cores
if self.memory_request:
payload["requests"]["memory"] = self.memory_request_mib
return payload
@classmethod
def from_primitive(cls, payload: dict[str, Any]) -> "Resources":
requests = payload.get("requests", {})
gpu = None
if cls.gpu_key in requests:
gpu = int(requests[cls.gpu_key])
tpu_version, tpu_cores = cls._parse_tpu(requests)
return cls(
cpu=cls.parse_cpu(requests.get("cpu", "0")),
memory=cls.parse_memory(requests.get("memory", "0Mi")),
gpu=gpu,
tpu_version=tpu_version,
tpu_cores=tpu_cores,
)
@classmethod
def parse_cpu(cls, cpu: str) -> float:
try:
return float(cpu)
except ValueError:
return float(cpu[:-1]) / 1000
@classmethod
def parse_memory(cls, memory: str) -> int:
try:
memory_b = int(memory)
except ValueError:
if memory.endswith("Ki"):
memory_b = int(memory[:-2]) * 1024
elif memory.endswith("K"):
memory_b = int(memory[:-1]) * 1000
elif memory.endswith("Mi"):
return int(memory[:-2])
elif memory.endswith("M"):
memory_b = int(memory[:-1]) * 1000**2
elif memory.endswith("Gi"):
memory_b = int(memory[:-2]) * 1024**3
elif memory.endswith("G"):
memory_b = int(memory[:-1]) * 1000**3
else:
raise ValueError(f"{memory!r} memory format is not supported")
return ceil(memory_b / 1024**2)
@classmethod
def _parse_tpu(cls, payload: dict[str, Any]) -> tuple[Optional[str], Optional[int]]:
for key, value in payload.items():
if key.startswith(cls.tpu_key_prefix):
return key.split("/")[-1], int(value)
return None, None
@classmethod
def _parse_tpu_resource(cls, tpu: ContainerTPUResource) -> tuple[str, int]:
try:
tpu_version, tpu_cores = tpu.type.rsplit("-", 1)
return tpu_version, int(tpu_cores)
except (ValueError, TypeError):
raise ValueError(f"invalid TPU type format: '{tpu.type}'")
@classmethod
def from_container_resources(cls, resources: ContainerResources) -> "Resources":
kwargs: dict[str, Any] = {}
if resources.tpu:
kwargs["tpu_version"], kwargs["tpu_cores"] = cls._parse_tpu_resource(
resources.tpu
)
return cls(
cpu=resources.cpu,
memory=resources.memory_mb,
gpu=resources.gpu,
shm=resources.shm,
**kwargs,
)
@dataclass(frozen=True)
class Service:
name: str
target_port: Optional[int]
uid: Optional[str] = None
selector: dict[str, str] = field(default_factory=dict)
port: int = 80
service_type: ServiceType = ServiceType.CLUSTER_IP
cluster_ip: Optional[str] = None
labels: dict[str, str] = field(default_factory=dict)
def _add_port_map(
self,
port: Optional[int],
target_port: Optional[int],
port_name: str,
ports: list[dict[str, Any]],
) -> None:
if target_port:
ports.append({"port": port, "targetPort": target_port, "name": port_name})
def to_primitive(self) -> dict[str, Any]:
service_descriptor: dict[str, Any] = {
"metadata": {"name": self.name},
"spec": {
"type": self.service_type.value,
"ports": [],
"selector": self.selector,
},
}
if self.cluster_ip:
service_descriptor["spec"]["clusterIP"] = self.cluster_ip
if self.labels:
service_descriptor["metadata"]["labels"] = self.labels.copy()
self._add_port_map(
self.port, self.target_port, "http", service_descriptor["spec"]["ports"]
)
return service_descriptor
@classmethod
def create_for_pod(cls, pod: "PodDescriptor") -> "Service":
return cls(
name=pod.name,
selector=pod.labels,
target_port=pod.port,
labels=pod.labels,
)
@classmethod
def create_headless_for_pod(cls, pod: "PodDescriptor") -> "Service":
http_port = pod.port or cls.port
return cls(
name=pod.name,
selector=pod.labels,
cluster_ip="None",
target_port=http_port,
labels=pod.labels,
)
def make_named(self, name: str) -> "Service":
return replace(self, name=name)
@classmethod
def _find_port_by_name(
cls, name: str, port_mappings: list[dict[str, Any]]
) -> dict[str, Any]:
for port_mapping in port_mappings:
if port_mapping.get("name", None) == name:
return port_mapping
return {}
@classmethod
def from_primitive(cls, payload: dict[str, Any]) -> "Service":
http_payload = cls._find_port_by_name("http", payload["spec"]["ports"])
service_type = payload["spec"].get("type", Service.service_type.value)
return cls(
name=payload["metadata"]["name"],
uid=payload["metadata"]["uid"],
selector=payload["spec"].get("selector", {}),
target_port=http_payload.get("targetPort", None),
port=http_payload.get("port", Service.port),
service_type=ServiceType(service_type),
cluster_ip=payload["spec"].get("clusterIP"),
labels=payload["metadata"].get("labels", {}),
)
@dataclass(frozen=True)
class IngressRule:
host: str
service_name: Optional[str] = None
service_port: Optional[int] = None
@classmethod
def from_v1beta1_primitive(cls, payload: dict[str, Any]) -> "IngressRule":
http_paths = payload.get("http", {}).get("paths", [])
http_path = http_paths[0] if http_paths else {}
backend = http_path.get("backend", {})
service_name = backend.get("serviceName")
service_port = backend.get("servicePort")
return cls(
host=payload.get("host", ""),
service_name=service_name,
service_port=service_port,
)
@classmethod
def from_v1_primitive(cls, payload: dict[str, Any]) -> "IngressRule":
http_paths = payload.get("http", {}).get("paths", [])
http_path = http_paths[0] if http_paths else {}
service = http_path.get("backend", {}).get("service", {})
service_name = service.get("name")
service_port = service.get("port", {}).get("number")
return cls(
host=payload.get("host", ""),
service_name=service_name,
service_port=service_port,
)
def to_v1beta1_primitive(self) -> dict[str, Any]:
payload: dict[str, Any] = {"host": self.host}
if self.service_name:
payload["http"] = {
"paths": [
{
"backend": {
"serviceName": self.service_name,
"servicePort": self.service_port,
}
}
]
}
return payload
def to_v1_primitive(self) -> dict[str, Any]:
payload: dict[str, Any] = {"host": self.host}
if self.service_name:
payload["http"] = {
"paths": [
{
"pathType": "ImplementationSpecific",
"backend": {
"service": {
"name": self.service_name,
"port": {"number": self.service_port},
}
},
}
]
}
return payload
@classmethod
def from_service(cls, host: str, service: Service) -> "IngressRule":
return cls(host=host, service_name=service.name, service_port=service.port)
@dataclass(frozen=True)
class Ingress:
name: str
ingress_class: Optional[str] = None
rules: list[IngressRule] = field(default_factory=list)
annotations: dict[str, str] = field(default_factory=dict)
labels: dict[str, str] = field(default_factory=dict)
def to_v1beta1_primitive(self) -> dict[str, Any]:
rules: list[Any] = [rule.to_v1beta1_primitive() for rule in self.rules] or [
None
]
annotations = self.annotations.copy()
if self.ingress_class:
annotations["kubernetes.io/ingress.class"] = self.ingress_class
metadata = {"name": self.name, "annotations": annotations}
if self.labels:
metadata["labels"] = self.labels.copy()
primitive = {"metadata": metadata, "spec": {"rules": rules}}
return primitive
def to_v1_primitive(self) -> dict[str, Any]:
| |
await delete_article_response.release()
async def metadata(self, path, **kwargs):
"""Return metadata for entity identified by ``path`` under the parent project.
:param FigsharePath path: entity whose metadata will be returned
:rtype: FigshareFileMetadata obj or list of Metadata objs
"""
if path.is_root:
path.is_public = False
contents = await asyncio.gather(*[
# TODO: collections may need to use each['url'] for correct URN
# Use _get_url_super ? figshare API needs to get fixed first.
self._get_article_metadata(str(each['id']), path.is_public)
for each in await self._get_all_articles()
])
return [each for each in contents if each]
if not path.parts[-1].identifier:
raise exceptions.NotFoundError(str(path))
if len(path.parts) > 3:
raise exceptions.NotFoundError(str(path))
article_response = await self.make_request(
'GET',
self.build_url(path.is_public, *self.root_path_parts,
'articles', path.parts[1].identifier),
expects=(200, 404),
)
if article_response.status == 404:
raise exceptions.NotFoundError(str(path))
article_json = await article_response.json()
if len(path.parts) == 2:
if article_json['defined_type'] not in settings.FOLDER_TYPES:
raise exceptions.NotFoundError(str(path))
contents = []
for file in article_json['files']:
contents.append(metadata.FigshareFileMetadata(article_json, raw_file=file))
return contents
elif len(path.parts) == 3:
for file in article_json['files']:
if file['id'] == int(path.parts[2].identifier):
return metadata.FigshareFileMetadata(article_json, raw_file=file)
raise exceptions.NotFoundError(path.path)
else:
raise exceptions.NotFoundError('{} is not a valid path.'.format(path))
async def _get_article_metadata(self, article_id, is_public: bool):
"""Return Figshare*Metadata object for given article_id. Returns a FolderMetadata object
for filesets, a FileMetadat object for other article types, and ``None`` if the article
is not a fileset and has no files attached.
Defined separately to allow for taking advantage of ``asyncio.gather``.
:param str article_id: id of article whose metadata is requested
:param bool is_public: ``True`` if article is accessed through public URN
"""
response = await self.make_request(
'GET',
self.build_url(is_public, *self.root_path_parts, 'articles', article_id),
expects=(200, ),
)
article_json = await response.json()
if article_json['defined_type'] in settings.FOLDER_TYPES:
return metadata.FigshareFolderMetadata(article_json)
elif article_json['files']:
return metadata.FigshareFileMetadata(article_json)
return None # article without attached file
async def _delete_container_contents(self):
"""Delete all articles within this Project or Collection."""
# TODO: Needs logic for skipping public articles in collections
articles = await self._get_all_articles()
for article in articles:
delete_article_response = await self.make_request(
'DELETE',
self.build_url(False, *self.root_path_parts, 'articles', str(article['id'])),
expects=(204, ),
)
await delete_article_response.release()
async def _get_all_articles(self):
"""Get all articles under a project or collection. This endpoint is paginated and does not
provide limit metadata, so we keep querying until we receive an empty array response.
See https://docs.figshare.com/api/#searching-filtering-and-pagination for details.
:return: list of article json objects
:rtype: `list`
"""
all_articles, keep_going, page = [], True, 1
while keep_going:
resp = await self.make_request(
'GET',
self.build_url(False, *self.root_path_parts, 'articles'),
params={'page': str(page), 'page_size': str(settings.MAX_PAGE_SIZE)},
expects=(200, ),
)
articles = await resp.json()
all_articles.extend(articles)
page += 1
keep_going = len(articles) > 0
return all_articles
async def _create_article(self, data):
"""Create an article placeholder with the properties given in ``data``. Returns the id of
the new article. See https://docs.figshare.com/api/articles/#create-a-new-article for
valid properties.
:param dict data: properties to set for new article
:return: the id of the newly created article
:rtype: `str`
"""
resp = await self.make_request(
'POST',
self.build_url(False, *self.root_path_parts, 'articles'),
data=data,
expects=(201, ),
throws=exceptions.CreateFolderError,
)
articles_json = await resp.json()
article_id = articles_json['location'].rsplit('/', 1)[1]
return article_id
class FigshareArticleProvider(BaseFigshareProvider):
def __init__(self, auth, credentials, settings, child=False):
super().__init__(auth, credentials, settings)
async def validate_v1_path(self, path, **kwargs):
"""Take a string path from the url and attempt to map it to an entity within this article.
If the entity is found, returns a FigsharePath object with the entity identifiers included.
Otherwise throws a 404 Not Found. Will also assert that the entity type inferred from the
path matches the type of the entity at that url.
:param str path: entity path from the v1 API
:rtype FigsharePath:
"""
if path == '/':
return FigsharePath('/', _ids=('', ), folder=True, is_public=False)
path_parts = self._path_split(path)
if len(path_parts) != 2:
raise exceptions.InvalidPathError('{} is not a valid Figshare path.'.format(path))
file_id = path_parts[1]
resp = await self.make_request(
'GET',
self.build_url(False, *self.root_path_parts, 'files', file_id),
expects=(200, ),
)
file_json = await resp.json()
return FigsharePath('/' + file_json['name'], _ids=('', file_id), folder=False,
is_public=False)
async def validate_path(self, path, **kwargs):
"""Take a string path from the url and attempt to map it to an entity within this article.
If the entity is found, returns a FigsharePath object with the entity identifiers included.
Otherwise returns a FigsharePath with empty identifiers.
:param str path: identifier path URN as passed through the v0 API
:rtype FigsharePath:
Quirks:
* v0 may pass an identifier_path whose last part is a name and not an identifier, in the
case of file/folder creation calls.
* validate_path validates parent and returns a FigsharePath as accurately as possible.
"""
if path == '/':
return FigsharePath('/', _ids=('', ), folder=True, is_public=False)
path_parts = self._path_split(path)
if len(path_parts) != 2:
raise exceptions.InvalidPathError('{} is not a valid Figshare path.'.format(path))
file_id = path_parts[1]
resp = await self.make_request(
'GET',
self.build_url(False, *self.root_path_parts, 'files', file_id),
expects=(200, 404, ),
)
if resp.status == 200:
file_json = await resp.json()
file_name = file_json['name']
return FigsharePath('/' + file_name, _ids=('', file_id), folder=False, is_public=False)
# catch for create file in article root
await resp.release()
return FigsharePath('/' + file_id, _ids=('', ''), folder=False, is_public=False)
async def revalidate_path(self, parent_path, child_name, folder: bool):
"""Attempt to get child's id and return FigsharePath of child.
``revalidate_path`` is used to check for the existance of a child_name/folder
within the parent. Returning a FigsharePath of child. Child will have _id
if conflicting child_name/folder exists otherwise _id will be ''.
:param FigsharePath parent_path: Path of parent
:param str child_name: Name of child
:param bool folder: ``True`` if child is folder
Code notes:
Due to the fact that figshare allows duplicate titles/names for
articles/files, revalidate_path can not be relied on to always return
the correct id of an existing child_name. will return the first id that
matches the folder and child_name arguments or '' if no match.
"""
parent_is_folder = False
urn_parts = self.root_path_parts
if not parent_path.is_root:
if folder:
raise exceptions.NotFoundError(
'{} is not a valid parent path of folder={}. Folders can only exist at the '
'root level.'.format(parent_path.identifier_path, str(folder)))
else:
urn_parts = (*urn_parts, (parent_path.identifier))
list_children_response = await self.make_request(
'GET',
self.build_url(False, *urn_parts),
expects=(200, ),
)
child_id = ''
article_json = await list_children_response.json()
for file in article_json['files']:
if file['name'] == child_name:
child_id = str(file['id'])
break
return parent_path.child(child_name, _id=child_id, folder=folder,
parent_is_folder=parent_is_folder)
async def upload(self, stream, path, conflict='replace', **kwargs):
"""Upload a file to provider root or to an article whose defined_type is
configured to represent a folder.
:param asyncio.StreamReader stream: stream to upload
:param FigsharePath path: FigsharePath to upload the file to.
:param dict \*\*kwargs: Will be passed to returned metadata object
"""
path, exists = await self.handle_name_conflict(path, conflict=conflict)
if not path.parent.is_root:
parent_resp = await self.make_request(
'GET',
self.build_url(False, *self.root_path_parts, 'articles', path.parent.identifier),
expects=(200, ),
)
parent_json = await parent_resp.json()
if not parent_json['defined_type'] in settings.FOLDER_TYPES:
del path._parts[1]
file_id = await self._upload_file(self.container_id, path.name, stream)
# Build new file path and return metadata
path = FigsharePath('/' + file_id, _ids=('', file_id), folder=False, is_public=False)
return (await self.metadata(path, **kwargs)), True
async def create_folder(self, path, **kwargs):
raise exceptions.CreateFolderError('Cannot create folders within articles.', code=400)
async def delete(self, path, confirm_delete=0, **kwargs):
"""Delete the file at ``path``. If ``path`` is ``/`` and ``confirm_delete`` is ``1``, then
delete all of the files within the article, but not the article itself.
:param FigsharePath path: Path to be deleted
:param int confirm_delete: Must be 1 to confirm root folder delete
:rtype: None
:raises: :class:`waterbutler.core.exceptions.NotFoundError`
:raises: :class:`waterbutler.core.exceptions.DeleteError`
Quirks:
* If the FigsharePath given is for the provider root path, then the contents of the
provider root path will be deleted, but not the provider root itself.
"""
if path.is_root:
if confirm_delete == 1:
return await self._delete_container_contents()
raise exceptions.DeleteError(
'confirm_delete=1 is required for deleting root provider folder',
code=400
)
await self._delete_file(path.parts[-1]._id)
async def metadata(self, path, **kwargs):
"""Return metadata for entity identified by ``path``. May be the containing article or
a file in a fileset article.
:param FigsharePath path: entity whose metadata will be returned
:rtype FigshareFileMetadata obj or list of Metadata objs:
"""
article = await self._get_article(not path.is_public)
if path.is_root: # list files in article
contents = []
for file in article['files']:
contents.append(metadata.FigshareFileMetadata(article, raw_file=file))
return contents
elif len(path.parts) == 2: # metadata for a particular file
for file in article['files']:
if str(file['id']) == path.parts[1].identifier:
return metadata.FigshareFileMetadata(article, raw_file=file)
# Invalid | |
'--use-old-getter-setter':
self.UseOldGetterSetter = 1
elif option[0] in ('-u', '--user-methods'):
UserMethodsPath = option[1]
elif option[0] == '--no-process-includes':
self.processIncludes = 0
elif option[0] == "--silence":
self.outputText = False
elif option[0] == "--namespacedef":
self.namespacedef = option[1]
elif option[0] == '--external-encoding':
self.ExternalEncoding = option[1]
elif option[0] in ('-q', '--no-questions'):
self.NoQuestions = True
elif option[0] == '--version':
showVersion = True
elif option[0] == '--member-specs':
MemberSpecs = option[1]
if MemberSpecs not in ('list', 'dict', ):
raise RuntimeError('Option --member-specs must be "list" or "dict".')
elif option[0] in ('-l', '--generated-language'):
self.genLang = option[1]
if self.genLang not in ('py', 'c++'):
raise RuntimeError('Option --generated-language must be "py" or "c++".')
elif option[0] in ('-g', '--generator-category'):
self.genCategory = option[1]
if self.genCategory not in ('type',
'service',
'ifmap-frontend',
'ifmap-backend',
'device-api',
'java-api',
'golang-api',
'contrail-json-schema',
'json-schema'):
raise RuntimeError('Option --generator-category must be "type", service", "ifmap-frontend", "ifmap-backend", "device-api", "java-api", "golang-api", "contrail-json-schema" or "json-schema".')
if showVersion:
print('generateDS.py version %s' % VERSION)
sys.exit(0)
def countChildren(self, element, count):
count += len(element.getChildren())
base = element.getBase()
if base and base in self.ElementDict:
parent = self.ElementDict[base]
count = self.countChildren(parent, count)
return count
def getParentName(self, element):
base = element.getBase()
rBase = element.getRestrictionBaseObj()
parentName = None
parentObj = None
if base and base in self.ElementDict:
parentObj = self.ElementDict[base]
parentName = self.cleanupName(parentObj.getName())
elif rBase:
base = element.getRestrictionBase()
parentObj = self.ElementDict[base]
parentName = self.cleanupName(parentObj.getName())
return parentName, parentObj
def makeFile(self, outFileName, outAppend = False):
outFile = None
if ((not self.Force) and os.path.exists(outFileName)
and not outAppend):
if self.NoQuestions:
sys.stderr.write('File %s exists. Change output file or use -f (force).\n' % outFileName)
sys.exit(1)
else:
reply = eval(input('File %s exists. Overwrite? (y/n): ' % outFileName))
if reply == 'y':
outFile = file(outFileName, 'w')
else:
if (outAppend):
outFile = file(outFileName, 'a')
else:
outFile = file(outFileName, 'w')
return outFile
def mapName(self, oldName):
newName = oldName
if self.NameTable:
if oldName in self.NameTable:
newName = self.NameTable[oldName]
return newName
def cleanupName(self, oldName):
newName = oldName.replace(':', '_')
newName = newName.replace('-', '_')
newName = newName.replace('.', '_')
return newName
def make_gs_name(self, oldName):
if self.UseOldGetterSetter:
newName = oldName.capitalize()
else:
newName = '_%s' % oldName
return newName
def is_builtin_simple_type(self, type_val):
if type_val in self.StringType or \
type_val == self.TokenType or \
type_val == self.DateTimeType or \
type_val == self.TimeType or \
type_val == self.DateType or \
type_val in self.IntegerType or \
type_val == self.DecimalType or \
type_val == self.PositiveIntegerType or \
type_val == self.NonPositiveIntegerType or \
type_val == self.NegativeIntegerType or \
type_val == self.NonNegativeIntegerType or \
type_val == self.BooleanType or \
type_val == self.FloatType or \
type_val == self.DoubleType or \
type_val in self.OtherSimpleTypes:
return True
else:
return False
def set_type_constants(self, nameSpace):
self.CurrentNamespacePrefix = nameSpace
self.AttributeGroupType = nameSpace + 'attributeGroup'
self.AttributeType = nameSpace + 'attribute'
self.BooleanType = nameSpace + 'boolean'
self.ChoiceType = nameSpace + 'choice'
self.SimpleContentType = nameSpace + 'simpleContent'
self.ComplexContentType = nameSpace + 'complexContent'
self.ComplexTypeType = nameSpace + 'complexType'
self.GroupType = nameSpace + 'group'
self.SimpleTypeType = nameSpace + 'simpleType'
self.RestrictionType = nameSpace + 'restriction'
self.WhiteSpaceType = nameSpace + 'whiteSpace'
self.AnyAttributeType = nameSpace + 'anyAttribute'
self.DateTimeType = nameSpace + 'dateTime'
self.TimeType = nameSpace + 'time'
self.DateType = nameSpace + 'date'
self.IntegerType = (nameSpace + 'integer',
nameSpace + 'unsignedShort',
nameSpace + 'unsignedLong',
nameSpace + 'unsignedInt',
nameSpace + 'unsignedByte',
nameSpace + 'byte',
nameSpace + 'short',
nameSpace + 'long',
nameSpace + 'int',
)
self.DecimalType = nameSpace + 'decimal'
self.PositiveIntegerType = nameSpace + 'positiveInteger'
self.NegativeIntegerType = nameSpace + 'negativeInteger'
self.NonPositiveIntegerType = nameSpace + 'nonPositiveInteger'
self.NonNegativeIntegerType = nameSpace + 'nonNegativeInteger'
self.DoubleType = nameSpace + 'double'
self.ElementType = nameSpace + 'element'
self.ExtensionType = nameSpace + 'extension'
self.FloatType = nameSpace + 'float'
self.IDREFSType = nameSpace + 'IDREFS'
self.IDREFType = nameSpace + 'IDREF'
self.IDType = nameSpace + 'ID'
self.IDTypes = (self.IDREFSType, self.IDREFType, self.IDType, )
self.SchemaType = nameSpace + 'schema'
self.SequenceType = nameSpace + 'sequence'
self.StringType = (nameSpace + 'string',
nameSpace + 'duration',
nameSpace + 'anyURI',
nameSpace + 'base64Binary',
nameSpace + 'hexBinary',
nameSpace + 'normalizedString',
nameSpace + 'NMTOKEN',
nameSpace + 'ID',
nameSpace + 'Name',
nameSpace + 'language',
)
self.TokenType = nameSpace + 'token'
self.NameType = nameSpace + 'Name'
self.NCNameType = nameSpace + 'NCName'
self.QNameType = nameSpace + 'QName'
self.NameTypes = (self.NameType, self.NCNameType, self.QNameType, )
self.ListType = nameSpace + 'list'
self.EnumerationType = nameSpace + 'enumeration'
self.MinInclusiveType = nameSpace + 'minInclusive'
self.MaxInclusiveType = nameSpace + 'maxInclusive'
self.UnionType = nameSpace + 'union'
self.AnnotationType = nameSpace + 'annotation'
self.DocumentationType = nameSpace + 'documentation'
self.AnyType = nameSpace + 'any'
self.OtherSimpleTypes = (
nameSpace + 'ENTITIES',
nameSpace + 'ENTITY',
nameSpace + 'ID',
nameSpace + 'IDREF',
nameSpace + 'IDREFS',
nameSpace + 'NCName',
nameSpace + 'NMTOKEN',
nameSpace + 'NMTOKENS',
nameSpace + 'NOTATION',
nameSpace + 'Name',
nameSpace + 'QName',
nameSpace + 'anyURI',
nameSpace + 'base64Binary',
nameSpace + 'hexBinary',
nameSpace + 'boolean',
nameSpace + 'byte',
nameSpace + 'date',
nameSpace + 'dateTime',
nameSpace + 'time',
nameSpace + 'decimal',
nameSpace + 'double',
nameSpace + 'duration',
nameSpace + 'float',
nameSpace + 'gDay',
nameSpace + 'gMonth',
nameSpace + 'gMonthDay',
nameSpace + 'gYear',
nameSpace + 'gYearMonth',
nameSpace + 'int',
nameSpace + 'integer',
nameSpace + 'language',
nameSpace + 'long',
nameSpace + 'negativeInteger',
nameSpace + 'nonNegativeInteger',
nameSpace + 'nonPositiveInteger',
nameSpace + 'normalizedString',
nameSpace + 'positiveInteger',
nameSpace + 'short',
nameSpace + 'string',
nameSpace + 'time',
nameSpace + 'token',
nameSpace + 'unsignedByte',
nameSpace + 'unsignedInt',
nameSpace + 'unsignedLong',
nameSpace + 'unsignedShort',
nameSpace + 'anySimpleType',
)
self.SchemaToPythonTypeMap = {
self.BooleanType : 'bool',
self.DecimalType : 'float',
self.DoubleType : 'float',
self.FloatType : 'float',
self.NegativeIntegerType : 'int',
self.NonNegativeIntegerType : 'int',
self.NonPositiveIntegerType : 'int',
self.PositiveIntegerType : 'int',
self.DateTimeType.lower() : 'str',
self.TimeType.lower() : 'str',
self.DateType.lower() : 'str'
}
self.SchemaToPythonTypeMap.update(dict((x, 'int') for x in self.IntegerType))
self.SchemaToPythonTypeMap.update(dict((x.lower(), 'int') for x in self.IntegerType))
self.SchemaToPythonTypeMap.update(dict((x, 'str') for x in self.StringType))
self.SchemaToCppTypeMap = {
self.BooleanType : 'bool',
self.DecimalType : 'float',
self.DoubleType : 'float',
self.FloatType : 'float',
self.NegativeIntegerType : 'int',
self.NonNegativeIntegerType : 'int',
self.NonPositiveIntegerType : 'int',
self.PositiveIntegerType : 'int',
self.StringType : 'string',
}
self.SchemaToCppTypeMap.update(dict((x, 'int') for x in self.IntegerType))
self.SchemaToCppTypeMap.update(dict((x, 'string') for x in self.StringType))
def init_with_args(self):
global TEMPLATE_SUBCLASS_FOOTER
self.XsdNameSpace = self.nameSpace
self.Namespacedef = self.namespacedef
self.set_type_constants(self.nameSpace)
if self.behaviorFilename and not self.subclassFilename:
err_msg(USAGE_TEXT)
err_msg('\n*** Error. Option -b requires -s\n')
if self.xschemaFileName is None:
if len(self.args) != 1:
usage()
else:
self.xschemaFileName = self.args[0]
silent = not self.outputText
self.TEMPLATE_MAIN = fixSilence(self.TEMPLATE_MAIN, silent)
TEMPLATE_SUBCLASS_FOOTER = fixSilence(TEMPLATE_SUBCLASS_FOOTER, silent)
self._load_config()
if self.genCategory == 'type':
self._Generator = TypeGenerator(self)
elif self.genCategory == 'service':
self._Generator = ServiceGenerator(self)
elif (self.genCategory == 'ifmap-backend' or
self.genCategory == 'ifmap-frontend' or
self.genCategory == 'device-api' or
self.genCategory == 'java-api' or
self.genCategory == 'golang-api' or
self.genCategory == 'contrail-json-schema' or
self.genCategory == 'json-schema'):
self._Generator = IFMapGenerator(self, self.genCategory)
self._Generator.setLanguage(self.genLang)
def _load_config(self):
try:
#print '1. updating NameTable'
import generateds_config
NameTable.update(generateds_config.NameTable)
#print '2. updating NameTable'
except ImportError as exp:
pass
def parseAndGenerate(self):
self.DelayedElements = []
self.DelayedElements_subclass = []
self.AlreadyGenerated = []
self.AlreadyGenerated_subclass = []
if self.UserMethodsPath:
# UserMethodsModule = __import__(UserMethodsPath)
path_list = self.UserMethodsPath.split('.')
mod_name = path_list[-1]
mod_path = os.sep.join(path_list[:-1])
module_spec = imp.find_module(mod_name, [mod_path, ])
self.UserMethodsModule = imp.load_module(mod_name, *module_spec)
## parser = saxexts.make_parser("xml.sax.drivers2.drv_pyexpat")
parser = make_parser()
dh = XschemaHandler(self)
## parser.setDocumentHandler(dh)
parser.setContentHandler(dh)
infile = self.getInfile()
parser.parse(infile)
# on MacOS parser.parse closes infile
if infile.closed:
infile = self.getInfile()
root = dh.getRoot()
root.annotate()
## print '-' * 60
## root.show(sys.stdout, 0)
## print '-' * 60
#debug_show_elements(root)
infile.seek(0)
self._Generator.generate(root, infile, self.outFilename)
def getInfile(self):
if self.xschemaFileName == '-':
infile = sys.stdin
else:
infile = open(self.xschemaFileName, 'r')
if self.processIncludes:
import process_includes
outfile = io.BytesIO()
process_includes.process_include_files(infile, outfile,
inpath=self.xschemaFileName)
outfile.seek(0)
infile = outfile
return infile
def showLevel(outfile, level):
for idx in range(level):
outfile.write(' ')
class XschemaElementBase(object):
def __init__(self):
pass
class SimpleTypeElement(XschemaElementBase):
def __init__(self, name):
XschemaElementBase.__init__(self)
self.name = name
self.base = None
self.collapseWhiteSpace = 0
# Attribute definitions for the current attributeGroup, if there is one.
self.attributeGroup = None
# Attribute definitions for the currect element.
self.attributeDefs = {}
self.complexType = 0
# Enumeration values for the current element.
self.values = list()
# The other simple types this is a union of.
self.unionOf = list()
self.simpleType = 0
self.listType = 0
self.documentation = ''
self.default = None
self.restrictionAttrs = None
def setName(self, name): self.name = name
def getName(self): return self.name
def setBase(self, base): self.base = base
def getBase(self): return self.base
def getDefault(self): return self.default
| |
<filename>tests/test_editor_api.py
import os
import json
from django.test import TestCase
import libs.track as track
import TrackApp.models as models
import tests.testing_utils as testing_utils
class EditorTestUtils(TestCase):
def create_user(self,
username='default_user',
password='<PASSWORD>',
email='<EMAIL>'):
user = testing_utils.create_user(username=username,
password=password,
email=email)
self.user, self.username, self.password = user, username, password
return user, username, password
def login(self):
self.client.login(username=self.username,
password=self.password)
def create_session(self):
"""
Get request to the /editor endpoint to generate track in session
"""
response = self.client.get('/editor/')
session = self.client.session
return response, session
def get_sample_file(self, filename='simple_numbers.gpx'):
"""
Get a file to be used as input
"""
return os.path.join(self.test_path, 'samples', filename)
def compare_tracks(self, session_track, reference_track):
"""
Comparison of tracks is encapsulated in this function. It can be
particularly sensitive for segment_names since suffix may be added
according to the loaded name file in /media
"""
for k in session_track:
if k == 'segment_names':
for s, r in zip(session_track[k], reference_track[k]):
self.assertIn(os.path.splitext(r)[0], s)
else:
self.assertEqual(session_track[k], reference_track[k])
class EditorTest(EditorTestUtils):
"""
Test the editor API functions from views.
"""
def setUp(self):
self.test_path = os.path.dirname(__file__)
self.user, self.username, self.password = self.create_user()
self.login()
def test_create_session(self):
"""
Check that empty session is created
"""
response, session = self.create_session()
self.assertEqual(response.status_code, 200)
self.assertIn('json_track', session.keys())
self.assertIn('index_db', session.keys())
def test_add_gpx(self):
"""
Add one gpx file and check it is included in session
"""
self.create_session()
# Add file
sample_file = self.get_sample_file()
with open(sample_file, 'r') as f:
self.client.post('/editor/', {'document': f})
session_track = json.loads(self.client.session['json_track'])
# Create expected output
obj_track = track.Track()
obj_track.add_gpx(sample_file)
reference_track = json.loads(obj_track.to_json())
self.compare_tracks(session_track, reference_track)
self.assertIsNone(self.client.session['index_db']) # not saved session
def test_load_session(self):
"""
Create a session with one track, save it and load it
"""
self.create_session()
sample_file = self.get_sample_file()
with open(sample_file, 'r') as f:
self.client.post('/editor/', {'document': f})
self.client.post('/editor/save_session')
# Add another file to force that the last active session is not
# the same than the loaded one
with open(sample_file, 'r') as f:
self.client.post('/editor/', {'document': f})
# Load track
response = self.client.get(
f'/editor/{self.client.session["index_db"]}')
session_track = json.loads(self.client.session['json_track'])
# Create expected output
obj_track = track.Track()
obj_track.add_gpx(sample_file)
reference_track = json.loads(obj_track.to_json())
self.compare_tracks(session_track, reference_track)
self.assertEqual(response.status_code, 200)
self.assertIsNotNone(self.client.session['index_db'])
class GetSummaryTest(EditorTestUtils):
"""
Test the get_summary view of the editor api
"""
def setUp(self):
self.test_path = os.path.dirname(__file__)
self.user, self.username, self.password = self.create_user()
self.login()
def test_get_summary(self):
"""
Call the get_summary endpoint to check the return JSON
"""
self.create_session()
sample_file = self.get_sample_file()
with open(sample_file, 'r') as f:
self.client.post('/editor/', {'document': f})
response = self.client.get('/editor/get_summary')
summary = json.loads(response.content)['summary']
self.assertEqual(summary[list(summary.keys())[0]],
{'distance': '445.2 km',
'uphill': '+20 m',
'downhill': '-20 m'})
self.assertEqual(summary['total'],
{'distance': '445.2 km',
'uphill': '+20 m',
'downhill': '-20 m'})
self.assertEqual(response.status_code, 200)
def test_get_summary_no_track(self):
"""
Try to rename a non existing session
"""
response = self.client.get('/editor/get_summary')
self.assertEqual(response.status_code, 520)
def test_get_summary_wrong_request(self):
"""
Use post request instead of get and check response
"""
response = self.client.post('/editor/get_summary')
self.assertEqual(response.status_code, 405)
class SaveSessionTest(EditorTestUtils):
"""
Test the save_session view of the editor api
"""
def setUp(self):
self.test_path = os.path.dirname(__file__)
self.user, self.username, self.password = self.create_user()
self.login()
def test_save_session(self):
"""
Create a session with one track and save it
"""
self.create_session()
# Add file
sample_file = self.get_sample_file()
with open(sample_file, 'r') as f:
self.client.post('/editor/', {'document': f})
session_track = json.loads(self.client.session['json_track'])
# Save session
response = self.client.post('/editor/save_session')
# Get reference data
saved_track = models.Track.objects.\
get(id=self.client.session['index_db'])
saved_track = json.loads(saved_track.track)
self.assertEqual(session_track, saved_track)
self.assertEqual(response.status_code, 201)
self.assertIsNotNone(self.client.session['index_db'])
def test_save_add_save(self):
"""
Create and save session with one gpx file. Add a new one, save and
check that the db record is properly updated.
"""
self.create_session()
sample_file = self.get_sample_file()
with open(sample_file, 'r') as f:
self.client.post('/editor/', {'document': f})
self.client.post('/editor/save_session')
with open(sample_file, 'r') as f:
self.client.post('/editor/', {'document': f})
self.client.post('/editor/save_session')
# Load track
response = self.client.get(
f'/editor/{self.client.session["index_db"]}')
session_track = json.loads(self.client.session['json_track'])
# Create expected output
obj_track = track.Track()
obj_track.add_gpx(sample_file)
obj_track.add_gpx(sample_file)
reference_track = json.loads(obj_track.to_json())
self.compare_tracks(session_track, reference_track)
self.assertEqual(response.status_code, 200)
self.assertIsNotNone(self.client.session['index_db'])
def test_save_remove_save(self):
"""
Create and save session with five gpx file. Remove some, save and check
that the db record is properly updated.
"""
self.create_session()
# Load files and save session
for i in range(1, 6):
sample_file = self.get_sample_file(f'island_{i}.gpx')
with open(sample_file, 'r') as f:
self.client.post('/editor/', {'document': f})
self.client.post(
'/editor/rename_segment',
json.dumps({'index': i - 1,
'new_name': os.path.basename(sample_file)}),
content_type='application/json')
self.client.post('/editor/rename_session',
json.dumps({'new_name': 'test_save_remove_save'}),
content_type='application/json')
self.client.post('/editor/save_session')
# Remove segments and save
self.client.post('/editor/remove_segment/2')
self.client.post('/editor/remove_segment/4')
self.client.post('/editor/save_session')
# Load db record
track_db = json.loads(
models.Track.objects.get(id=self.client.session['index_db']).track)
segments_names = track_db['segment_names']
self.assertEqual(set(track_db['segment']), {1, 3, 5})
self.assertRegex(segments_names[0], 'island_1.*.gpx')
self.assertIsNone(segments_names[1])
self.assertRegex(segments_names[2], 'island_3.*.gpx')
self.assertIsNone(segments_names[3])
self.assertRegex(segments_names[4], 'island_5.*.gpx')
def test_save_rename_save(self):
"""
Create and save session with one gpx file. Add a new one, save an check
that the db record is properly updated.
"""
self.create_session()
self.client.post('/editor/save_session')
self.client.post('/editor/rename_session/test_save_rename_save')
self.client.post('/editor/save_session')
# Load db record
record = models.Track.objects.get(id=self.client.session['index_db'])
self.assertEqual(record.title, 'test_save_rename_save')
self.assertEqual(json.loads(record.track)['title'],
'test_save_rename_save')
def test_save_session_wrong_request(self):
"""
Use get request instead of post and check response
"""
self.create_session()
response = self.client.get('/editor/save_session')
self.assertEqual(response.status_code, 405)
def test_save_session_no_track(self):
"""
Try to save a non existing session
"""
response = self.client.post('/editor/save_session')
self.assertEqual(response.status_code, 520)
class RemoveSessionTest(EditorTestUtils):
"""
Test the remove_session view of the editor api
"""
def setUp(self):
self.test_path = os.path.dirname(__file__)
self.user, self.username, self.password = self.create_user()
self.login()
def test_remove_session(self):
"""
Create a session, save and remove it from db
"""
self.create_session()
self.client.post('/editor/save_session')
before = models.Track.objects.\
filter(id=self.client.session['index_db']).count()
response = self.client.post(
f'/editor/remove_session/{self.client.session["index_db"]}')
after = models.Track.objects.\
filter(id=self.client.session['index_db']).count()
self.assertEqual(response.status_code, 201)
self.assertEqual(before, 1)
self.assertEqual(after, 0)
def test_remove_session_no_track(self):
"""
Try to save a non existing session
"""
response = self.client.post('/editor/remove_session/25')
self.assertEqual(response.status_code, 520)
def test_remove_session_wrong_request(self):
"""
Use get request instead of post and check response
"""
response = self.client.get('/editor/remove_session/25')
self.assertEqual(response.status_code, 405)
class RenameSessionTest(EditorTestUtils):
"""
Test the rename_session view of the editor api
"""
def setUp(self):
self.test_path = os.path.dirname(__file__)
self.user, self.username, self.password = self.create_user()
self.login()
def test_rename_session(self):
"""
Create a session, rename and save
"""
self.create_session()
self.client.post('/editor/save_session')
response = self.client.post(
'/editor/rename_session/test_rename_session')
self.client.post('/editor/save_session')
track_db = models.Track.objects.get(id=self.client.session['index_db'])
self.assertEqual(response.status_code, 201)
self.assertEqual(track_db.title, 'test_rename_session')
def test_rename_session_no_track(self):
"""
Try to rename a non existing session
"""
response = self.client.post(
'/editor/rename_session/test_rename_session_no_track')
self.assertEqual(response.status_code, 520)
def test_rename_session_wrong_request(self):
"""
Use get request instead of post and check response
"""
response = self.client.get('/editor/rename_session/new_name')
self.assertEqual(response.status_code, 405)
def test_rename_session_invalid_endpoint(self):
"""
Do not provide new_name in request
"""
response = self.client.post('/editor/rename_session/')
self.assertEqual(response.status_code, 404)
class DownloadSessionTest(EditorTestUtils):
"""
Test the download_session view of the editor api
"""
def setUp(self):
self.test_path = os.path.dirname(__file__)
self.user, self.username, self.password = self.create_user()
self.login()
def test_download_session(self):
"""
Load a gpx and download the session
"""
self.create_session()
sample_file = self.get_sample_file()
with open(sample_file, 'r') as f:
self.client.post('/editor/', {'document': f})
self.client.post('/editor/rename_session/test_download_session')
response = self.client.post('/editor/download_session')
resp_json = json.loads(response.content)
self.assertRegex(resp_json['url'],
'/media/test_download_session_.{8}.gpx')
self.assertRegex(resp_json['filename'],
'test_download_session_.{8}.gpx')
self.assertEqual(os.path.basename(resp_json['url']),
resp_json['filename'])
self.assertEqual(response.status_code, 200)
def test_download_session_wrong_request(self):
"""
Use get request instead of post and check response
"""
self.create_session()
sample_file = self.get_sample_file()
with open(sample_file, 'r') as f:
self.client.post('/editor/', {'document': f})
response = self.client.get('/editor/download_session')
self.assertEqual(response.status_code, 405)
def test_download_session_no_session(self):
"""
Test download session with no available session
"""
response = self.client.post('/editor/download_session')
self.assertEqual(response.status_code, 520)
def test_download_session_no_track(self):
"""
Test download session with no available track
"""
self.create_session()
response = self.client.post('/editor/download_session')
self.assertEqual(response.status_code, 200)
class GetSegmentsLinksTest(EditorTestUtils):
"""
Test the get_segments_links view of the editor api
"""
def setUp(self):
self.test_path = os.path.dirname(__file__)
self.user, self.username, self.password = self.create_user()
self.login()
def test_get_segments_links(self):
"""
Test download session with no available track
"""
self.create_session()
for file in ['simple_numbers.gpx', 'simple_numbers_down.gpx',
'simple_numbers_left.gpx', 'simple_numbers_up.gpx']:
sample_file = self.get_sample_file(file)
with open(sample_file, 'r') as f:
self.client.post('/editor/', {'document': f})
response = self.client.get('/editor/get_segments_links')
resp_json = json.loads(response.content)
links = eval(resp_json['links'])
self.assertEqual(links, [[[1.0, 5.0], [1.0, 6.0]],
[[-3.0, 6.0], [-3.0, 5.0]],
[[-3.0, 1.0], [-3.0, 0.0]]])
self.assertEqual(response.status_code, 200)
def test_get_segments_links_no_track(self):
"""
Try to get segments with no available track
"""
response = self.client.get('/editor/get_segments_links')
self.assertEqual(response.status_code, 520)
def test_get_segments_links_wrong_request(self):
"""
Send post instead of get
"""
response = self.client.post('/editor/get_segments_links')
self.assertEqual(response.status_code, 405)
class ReverseSegmentTest(EditorTestUtils):
"""
Test the reverse segment view of the editor api
"""
def setUp(self):
self.test_path = os.path.dirname(__file__)
self.user, self.username, self.password = self.create_user()
self.login()
def test_reverse_segment(self):
"""
Test reverse segments
"""
self.create_session()
for file in ['simple_numbers.gpx', 'simple_numbers_down.gpx',
'simple_numbers_left.gpx']:
sample_file = self.get_sample_file(file)
with open(sample_file, 'r') as f:
self.client.post('/editor/', {'document': f})
response_1 = self.client.post('/editor/reverse_segment/1')
response_2 = self.client.post('/editor/reverse_segment/2')
response_3 = self.client.post('/editor/reverse_segment/3')
json_track = json.loads(self.client.session['json_track'])
simple_numbers = {'lat': [1] * 5, 'lon': list(range(1, 6))}
simple_numbers_down = {'lat': list(range(1, -4, -1)), 'lon': [6] * 5}
simple_numbers_left = {'lat': [-3] * 5, 'lon': list(range(5, 0, -1))}
self.assertEqual(response_1.status_code, 200)
self.assertEqual(response_2.status_code, 200)
self.assertEqual(response_3.status_code, 200)
self.assertEqual(json_track['lat'],
simple_numbers['lat'][::-1] +
simple_numbers_down['lat'][::-1] +
simple_numbers_left['lat'][::-1])
self.assertEqual(json_track['lon'],
simple_numbers['lon'][::-1] +
simple_numbers_down['lon'][::-1] +
simple_numbers_left['lon'][::-1])
def test_reverse_segment_get_no_track(self):
"""
Try to get segments with no available track
"""
response = self.client.post('/editor/reverse_segment/1')
self.assertEqual(response.status_code, 520)
def test_reverse_segment_get_no_index(self):
"""
Do not | |
<reponame>h3dema/deepwifi
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Environment implementation (abstract class) that represents the experiment using Video
This class implements the basic functions to control the APs,
but it does not implement the QoE
"""
import time
import logging
import pickle
from abc import abstractmethod
import socket
import numpy as np
import http.client
import urllib.parse
from Environment.interface_env import Interface_Env
from Environment.common import kill_aps
from Environment.common import change_channel_hostapd
from Environment.common import start_hostapd
def decode_txpower(t):
""" convert the data in info['txpower'] which is, for example, '15.00 dBm' into 15.0
@return: the value of the tx power
@rtype: float
"""
r = float(t.split()[0].strip())
return r
class Generic_AP(Interface_Env):
NUM_CHANNELS = 11
NUM_TXPOWER_LEVELS = 15
DEFAULT_C = 0.4 # used in the reward hossfeld
def __init__(self,
aps, # List[AP_Config]
model_filename, # filename that contains the trained model
mac_mapping={}, # {'hostname':'mac'}
log_level=logging.DEBUG,
log_name='AP Controller',
wait_for_states=10,
execute_action=False,
):
"""
initialize the environment
@param aps: list of aps controlled in the experiment
@param model_filename: name of the file that contains the trained model
@type model_filename: str
@param mac_mapping: a dictionary that maps the hostname to its mac address
@param execute_action: if True send the selected actions to the devices
"""
super().__init__(LOG_NAME=log_name, log_level=log_level)
self.aps = aps
# load model from json
self.mos_model = self.get_model(model_filename=model_filename)
# num_states is inf because there are continuous dimensions
self.num_states = None
self.dim_states = 20 # (None, 20)
self.num_actions = self.NUM_CHANNELS * self.NUM_TXPOWER_LEVELS
self.station_data = dict()
# used to inform command_ap the mapping between the station name and its MACs
self.mac_mapping = mac_mapping
self.wait_for_states = wait_for_states
self.execute_action = execute_action
self.last_channnel = [1 for _ in range(len(aps))]
def command_ap(self, server, port, iface, cmd, extra_params=None):
"""
@return: returns true if receive the response,
also returns the data or an empty dict (if error)
@rtype bool, dict
"""
conn = http.client.HTTPConnection(server, port)
params = {'iface': iface}
if extra_params is not None:
params.update(extra_params)
q = urllib.parse.urlencode(params)
url = "{}?{}".format(cmd, q)
try:
conn.request(method='GET', url=url)
except (ConnectionRefusedError, http.client.RemoteDisconnected, socket.gaierror):
return False, {} # Got an error
resp = conn.getresponse()
self.log.info("cmd: {} @ {} status:{}".format(cmd, server, resp.status))
try:
data = pickle.loads(resp.read())
except (EOFError, pickle.UnpicklingError):
data = {}
conn.close()
return resp.status == 200, data
def restart_aps(self, run_id):
""" this is done because our ap sometimes crashes. the hostapd continues to run, but does not provide a channel
"""
aps_to_change = []
chans = []
for ap, channel in zip(self.aps, self.last_channnel):
_, data = self.command_ap(ap.name, ap.port, ap.iface, '/get_info')
ch = data.get('channel', -1)
if ch != -1:
continue
aps_to_change.append(ap)
chans.append(ch)
if len(aps_to_change) == 0:
# nothing to do
return
# alter the hostapd.conf file
change_channel_hostapd(aps_to_change, chans)
# restart the hostapd
kill_aps(aps_to_change)
# start hostapd
start_hostapd(aps_to_change, [run_id for i in len(aps_to_change)])
def valid_actions(self, state=None):
""" return a list with all valid actions for a specific state,
if state == None, return all possible states
@param state: current state
@return: list(int)
"""
# TODO: check for valid actions when states is not None
valid = list(range(self.num_actions)) # now we always return all actions
return valid
def one_hot(self, channel):
""" code the channel using one-hot encoding
@param channel:
@type channel: int
@return: the channel hot encoded
@rtype: list(int)
"""
assert channel > 0 and channel <= self.NUM_CHANNELS, "Wrong channel = {}".format(channel)
cs = [0 for i in range(self.NUM_CHANNELS)]
cs[channel - 1] = 1
self.log.debug("one-hot {} = {}".format(channel, cs))
return cs
def get_states(self):
""" get the states, one for each AP
the state contains:
- ( #stations, ch1, ch2, ch3, ch4, ch5, ch6, ch7, ch8, ch9, ch10, ch11,
tx_power, #num_neighbors, ch_noise_max, perc_phy_busy_time,
sta_signal_avg,
rec_bitrate_min, tx_byte_avg, rx_byte_avg )
@return: return the value that represent the state of all APs. Returns None if an error occurs.
"""
known_macs = set([ap.mac for ap in self.aps])
try:
states = []
for ap in self.aps:
self.log.info("Data from {} @ {}".format(ap.name, ap.iface))
_, info = self.command_ap(ap.name, ap.port, ap.iface, '/get_info')
self.log.info("Info: {}".format(info))
ch = int(info['channel'])
self.log.info("Channel: {}".format(ch))
_, stations = self.command_ap(ap.name, ap.port, ap.iface, '/get_stations')
self.log.info("Stations: {}".format(stations))
num_stations = len(stations) # number of stations now
self.log.info("n: {}".format(num_stations))
# check #num_neighbors
_, scan = self.command_ap(ap.name, ap.port, ap.iface, '/get_scan_mac')
self.log.info("Scan: {}".format(scan))
macs = set([k for k in scan]) # the dictionary key is the mac of the detected AP
num_neighbors = len(macs.intersection(known_macs))
self.log.info("num_neighbors: {}".format(num_neighbors))
_, survey = self.command_ap(ap.name, ap.port, ap.iface, '/get_survey')
self.log.info("survey: {}".format(survey))
chann_in_use = [v for v in survey if survey[v].get('in use', False)][0] # we need only the channel in use
self.log.info("survey (in use): {}".format(chann_in_use))
survey_in_use = survey[chann_in_use]
ch_noise_max = survey_in_use['noise']
perc_phy_busy_time = (survey_in_use['channel busy time'] + survey_in_use['channel receive time'] + survey_in_use['channel transmit time']) \
/ survey_in_use['channel active time']
# obtain the state: one state per AP, so consolidate
signal_avg = np.average([stations[s]['signal avg'] for s in stations])
rx_bitrate = np.average([stations[s]['rx bitrate'] for s in stations])
# detrend tx_bytes and rx_bytes
tx_bytes = 0
rx_bytes = 0
for k in stations:
if k not in self.station_data:
self.station_data[k] = dict()
self.station_data[k]['tx bytes'] = stations[k]['tx bytes']
self.station_data[k]['rx bytes'] = stations[k]['rx bytes']
tx_bytes = stations[k]['tx bytes'] - self.station_data[k]['tx bytes']
rx_bytes = stations[k]['rx bytes'] - self.station_data[k]['rx bytes']
# save to use in the next round
self.station_data[k]['tx bytes'] = stations[k]['tx bytes']
self.station_data[k]['rx bytes'] = stations[k]['rx bytes']
# save the AP's state
state = [num_stations] + \
self.one_hot(ch) + \
[decode_txpower(info['txpower']),
num_neighbors, # num_neighbors
ch_noise_max,
perc_phy_busy_time,
signal_avg,
rx_bitrate,
tx_bytes,
rx_bytes,
]
if np.any(np.isnan(state)):
# some reading got nan == error
states = None
break
states.append(state) # get the final state for the AP
except (KeyError, ValueError, IndexError):
# IndexError: can occur in chann_in_use
# KeyError: can occur in ch, survey_in_use, ch_noise_max, perc_phy_busy_time
states = None # trigger an Error
self.log.info("States: {}".format(states))
return states
def encode_action(self, txpower, channel):
"""
@param action: an integer that represents the action
@return: decoded values of txpower (1 to 15 dBm) and channel (1 to 11)
"""
assert channel > 0 and txpower > 0
action = (channel - 1) * self.NUM_TXPOWER_LEVELS + (txpower - 1)
return action
def decode_action(self, action):
"""
@param action: an integer that represents the action
@return: decoded values of txpower (1 to 15 dBm) and channel (1 to 11)
"""
channel = action // self.NUM_TXPOWER_LEVELS + 1
txpower = action % self.NUM_TXPOWER_LEVELS + 1
return txpower, channel
def setup_device(self, ap, txpower, channel):
""" change the tx power and the ap's channel
@param ap: the ap
@param txpower: tx power (from 1 to 15 dBm)
@param channel: the 2.4GHz channel number (1 to 11)
"""
assert txpower in range(1, 16)
assert channel in range(1, 12)
_, data = self.command_ap(ap.name, ap.port, ap.iface, '/get_info')
ch = data.get('channel', -1)
if ch not in [-1, channel]:
# send command to change channel, if the channel is different
self.log.info("last_channnel {} ==> new channel {}".format(ch, channel))
self.command_ap(ap.name, ap.port, ap.iface,
'/set_channel', extra_params={'new_channel': channel})
else:
return False
self.command_ap(ap.name, ap.port, ap.iface,
'/set_power', extra_params={'new_power': txpower})
self.log.info("setup_device ** ap {} txpower {} channel {}".format(ap.name, txpower, channel))
return True
def make_step(self, actions, retries=5):
"""send commands to aps
@param actions: is a list of number (int) that represents the action to be taken for each AP
@type actions: list(int)
@param retries: number of times this function tries to get the next_state from the devices, if unsuccessful then return None in next_state
@int retries: int
@return: next_state: a (list of) number (int) that represents the next state
@return: reward: a real number (reward feedback). Reward contains np.nan if an error occurs
@rtype: list(int), float
"""
assert retries > 0, "At least one try"
self.log.info("make_step ** actions {} - type {}".format(actions, type(actions)))
if self.execute_action:
# make the move defined in action
i = 0
for ap, action in zip(self.aps, actions):
# decode the number into the actual set of commands
# send the commands to the ap
txpower, channel = self.decode_action(action)
self.setup_device(ap, txpower, channel)
self.last_channnel[i] = channel
i += 1
else:
# use this to just grab the data from a execution without the interference of the algorithm
self.log.info("******************")
self.log.info("******************")
self.log.info("** NO STEP DONE **")
self.log.info("******************")
self.log.info("******************")
# check the new state
i = 0
while i < | |
<reponame>UBC-MDS/mindthegap
from dash import Dash, html, dcc, Input, Output
import numpy as np
import pandas as pd
import altair as alt
from vega_datasets import data
import dash_bootstrap_components as dbc
import os
app = Dash(
__name__, title="Mindthegap Dashboard", external_stylesheets=[dbc.themes.BOOTSTRAP]
)
server = app.server
# read in gapminder and continent data
current_dir = os.path.abspath(os.path.dirname(__file__))
country_ids = pd.read_csv(os.path.join(current_dir, "../data/country_ids.csv"))
gap = pd.read_csv(os.path.join(current_dir, "../data/gapminder.csv"))
gap = gap.merge(country_ids, how="outer", on=["country"])
gap["log_income"] = gap["income"].apply(np.log)
# dictionary to generate dynamic metrics in altair
metrics = {
"life_expectancy": "Life Expectancy",
"child_mortality": "Child Mortality",
"pop_density": "Population Density",
}
############################## CONTROL PANEL FILTERS ##############################
FILTER_STYLE = {"background-color": "#f8f9fa", "width": "18rem", "height": "100%"}
filter_panel = dbc.Card(
dbc.Col(
[
html.Br(),
html.Br(),
html.Br(),
# control panel title
html.H2("Control Panel", className="text-center"),
html.Br(),
html.Br(),
# metric radio button
dbc.Row(
[
html.H5("1. Metric", className="text-left"),
dbc.RadioItems(
id="metric",
value="life_expectancy",
labelStyle={"display": "block"},
options=[{"label": v, "value": k} for k, v in metrics.items()],
),
]
),
html.Br(),
html.Br(),
# continent drop down
dbc.Row(
[
html.H5("2. Continent", className="text-left"),
dcc.Dropdown(
id="region",
options=[
{"label": reg, "value": reg}
for reg in gap["region"].dropna().unique()
],
value=None,
),
]
),
html.Br(),
html.Br(),
# sub-region drop down
dbc.Row(
[
html.H5("3. Sub Continent", className="text-left"),
dcc.Dropdown(id="sub_region", value=None),
]
),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
dbc.Row(
[
html.H5("Data Source", className="text-left"),
dcc.Markdown("""Dataset for visualization of this dashbaord can be downloaded from [here](https://github.com/UBC-MDS/mindthegap/blob/main/data/gapminder.csv)
"""
),
]
),
html.Br(),
html.Br(),
html.P("Note: If a plot is empty, this means that there is no data based on your selections.")
],
),
style=FILTER_STYLE,
body=True,
)
############################## PLOT OBJECTS #######################################
boxplot = html.Iframe(
id="boxplot",
style={"border-width": "0", "width": "100%", "min-height": "400px"},
)
bubblechart = html.Iframe(
id="bubblechart",
style={"border-width": "0", "width": "100%", "height": "400px"},
)
barchart = html.Iframe(
id="barchart",
style={
"border-width": "0",
"width": "100%",
"height": "400px",
},
)
worldmap = html.Iframe(
id="worldmap",
style={"border-width": "4px", "width": "100%", "min-height": "400px"}
)
############################## DASHBOARD LAYOUT ###################################
app.layout = dbc.Container(
[
# title
html.Div(
style={"textAlign": "center", "color": "black", "font-size": "26px"},
children=[
html.H1("Mindthegap Dashboard"),
],
),
html.Br(),
dbc.Row(
[
# control panel
dbc.Col(filter_panel, md=5, lg=3, sm=3),
dbc.Col(
[
dbc.Row(
[
dbc.Col(
[
html.H5("Select Year", className="text-left"),
dcc.Slider(
min=1970,
max=2010,
step=5,
value=2010,
id="yr",
marks={
str(i): {
"label": str(i),
"style": {"color": "black"},
}
for i in range(1970, 2015, 5)
},
),
],
md=10, lg=10
),
dbc.Row(),
html.Br(),
dbc.Row([
html.H5("World Map view by Metric", style={"width": "fit-content"}),
dbc.Col(
[
dbc.Button(
id="map_tooltip",
color="secondary",
children=html.Strong("?"),
size="sm",
outline=True,
),
dbc.Tooltip(
"Choose metric from the control panel, drag and select the year to view the change of metric in the world using the slide bar. Select a continent to view a zoomed version of continent. You can hover over the plot for more details",
target="map_tooltip",
placement="bottom",
),
] )
], style={"padding": "3vh 0"}),
dbc.Col([
dbc.Card(
dbc.Col(
dbc.Row(
[dbc.Col(
[
worldmap
],
md=10, lg=12,
),
]
),
),
style={"border":"0px"}),
html.Br(),
html.Br(),
dbc.Row(
[
dbc.Col(
[
dbc.Row([
html.H5("Top/Bottom countries by Metric", style={"width": "fit-content"}),
dbc.Col(
[
dbc.Button(
id="bar_tooltip",
color="secondary",
children=html.Strong("?"),
size="sm",
outline=True,
),
dbc.Tooltip(
"Choose metric from the control panel, drag and select the year using the slide bar. Select a continent and/or sub-continent to view the top/bottom countries for that metric",
target="bar_tooltip",
placement="bottom",
),
] )
], style={"padding": "3vh 0"}),
dbc.Card([
dbc.Card(
html.Div(
[
# dbc.Label("Choose Top/Bottom by country"),
dbc.RadioItems(
options=[
{"label": "Top Countries", "value": "Top"},
{"label": "Bottom Countries", "value": "Bottom"},
],
value="Top",
id="radio",
inline=True,
style={"align":"center"},
labelStyle={"align":"center"}
),
]
),
style={"height":"43px"}
),
html.Div(barchart)
])
],
md=6,lg=6
),
dbc.Col(
[
dbc.Row([
html.H5("GDP/Income by Metric", style={"width": "fit-content"}),
dbc.Col(
[
dbc.Button(
id="tab_tooltip",
color="secondary",
children=html.Strong("?"),
size="sm",
outline=True,
),
dbc.Tooltip(
"Choose metric from the control panel, drag and select the year using the slide bar. Select a continent and/or sub continent to view the changes in GDP w.r.t to population size and the changes in income by the parameters selected. You can hover over regions on the map",
target="tab_tooltip",
placement="bottom",
),
] )
], style={"padding": "3vh 0"}),
dbc.Card([
html.Div(
dbc.Tabs(
id="tabs",
active_tab="gdp",
children=[
dbc.Tab(
label="GDP",
tab_id="gdp",
),
dbc.Tab(
label="Income",
tab_id="income",
),
],
),
),
html.Div(id="tab-content")
])
],
md=6, lg=6),
]
),
],
md=10, lg=10
),
]
)
]
),
]
),
],
fluid=True,
)
############################## HELPER FUNCTIONS ###################################
@app.callback(Output("tab-content", "children"), Input("tabs", "active_tab"))
def render_graph(tabs):
if tabs == "gdp":
return html.Div([bubblechart])
elif tabs == "income":
return html.Div([boxplot])
def filter_data(region, sub_region, country, yr):
"""
Filter data based on region, sub region and country selection
Parameters
--------
region: string
Selection from the Region filter
sub_region: string
Selection from Sub Region filter
country: string
Selection from Country filter
yr: string
Selection from Year
Returns
--------
data
dataset that has been filtered on region, sub region and country selection
Example
--------
> filter_data("Asia", "Western Asia", "Yemen", 2015)
"""
# Filter by region, sub-region, country
if country:
data = gap.query(f"country == '{country}'")
elif sub_region:
data = gap.query(f"sub_region == '{sub_region}'")
elif region:
data = gap.query(f"region == '{region}'")
else:
data = gap
# Filter by year
if yr:
data = data.query(f"year == {yr}")
return data
@app.callback(
Output("sub_region", "options"),
Input("region", "value"),
)
def get_sub_region(region):
"""Get a sub region value(s) based on a region value in gapminder
Parameters
----------
region : string
The region to get subregions for
Returns
-------
options
Dict of subregion label/values
"""
if region is None:
options = [
{"label": sub_region, "value": sub_region}
for sub_region in gap["sub_region"].dropna().unique()
]
else:
sub_regions = list(gap[gap["region"] == region]["sub_region"].unique())
options = []
for sr in sub_regions:
options.append({"label": sr, "value": sr})
return options
@app.callback(
Output("country", "options"),
Input("region", "value"),
Input("sub_region", "value"),
)
def get_country(region, sub_region):
"""Get a sub region value(s) based on a region value in gapminder
Parameters
----------
region : string
The region to get subregions for
Returns
-------
options
Dict of subregion label/values
"""
options = [
{"label": cntry, "value": cntry}
for cntry in gap["country"].dropna().unique()
]
return options
############################## PLOTTING FUNCTIONS #################################
@app.callback(
Output("worldmap", "srcDoc"),
Input("metric", "value"),
Input("region", "value"),
Input("yr", "value"),
)
def plot_world_map(metric, region, yr):
"""
Create world heatmap for statsitic of interest based on selected year filter.
Parameters
--------
metric: string
Selection from statistic of interest filter
yr: integer
Year for which the data is displayed, from Year filter
Returns
--------
chart
World heatmap for statistic of interest based on year filter
Example
--------
> plot_world_map("child_mortality", "Asia", 2015)
"""
world = data.world_110m()
world_map = alt.topo_feature(data.world_110m.url, "countries")
alt.data_transformers.disable_max_rows()
df = filter_data(region, None, None, yr)
if region is None:
chart = (
alt.Chart(world_map, title=f"{metrics[metric]} by country for year {yr}")
.mark_geoshape(stroke="black")
.transform_lookup(
lookup="id",
from_=alt.LookupData(df, key="id", fields=["country", metric]),
)
.encode(
tooltip=["country:O", metric + ":Q"],
color=alt.Color(metric + ":Q", title=metrics[metric]),
)
.properties(width=900, height=350)
)
else:
scl = None
trans = None
if region == "Europe":
scl = 800
trans = [150, 1010]
elif region == "Asia":
scl = 500
trans = [-100, 500]
elif region == "Africa":
scl = 500
trans = [400, 300]
elif region == "Americas":
scl = 300
trans = [1000, 350]
elif region == "Oceania":
scl = 500
trans = [-400, 0]
chart = (
alt.Chart(world_map, title=f"{metrics[metric]} by country for year {yr}")
.mark_geoshape(stroke="black")
.transform_lookup(
lookup="id",
from_=alt.LookupData(df, key="id", fields=["country", metric]),
)
.encode(
tooltip=["country:O", metric + ":Q"],
color=alt.Color(metric + ":Q", title=metrics[metric]),
)
.project(type="naturalEarth1", scale=scl, translate=trans)
.properties(width=900, height=350)
)
return chart.to_html()
@app.callback(
Output("boxplot", "srcDoc"),
Input("metric", "value"),
Input("region", "value"),
Input("sub_region", "value"),
Input("yr", "value"),
)
def plot_box_plot(metric, region, sub_region, yr):
"""
Create box chart for statsitic of interested based on selected filters for income groups
Parameters
--------
metric: string
Selection from statistic of interest filter
region: string
Selection from the region filter
sub_region: string
Selection from sub region filter
yr: integer
Year for which the data is displayed, from Year filter
Returns
--------
chart
Bar chart showing statistic of interest for income groups,
in specific region, subregion and year
Example
--------
> plot_box_plot("child_mortality", "Asia", "Western Asia", 2015)
"""
alt.data_transformers.disable_max_rows()
# filter by region, sub-region & year
data = filter_data(region, sub_region, None, yr)
data = data[data["income_group"].notnull()]
chart = (
alt.Chart(
data,
title=f"{metrics[metric]} by Income Group for year {yr}",
)
.mark_boxplot(size=50)
.encode(
alt.X("income_group", sort="-x", title="Income Group"),
alt.Y(metric, title=metrics[metric], scale=alt.Scale(zero=False)),
color=alt.Color(
"income_group",
sort=alt.EncodingSortField("income_group", order="descending"),
title="Income Group",
),
tooltip=("name:O", "child_mortality:Q"),
)
.configure_axis(labelFontSize=12, titleFontSize=14)
.configure_legend(labelFontSize=12)
.properties(width=450, height=300)
.configure_legend(gradientLength=900, gradientThickness=400)
)
return chart.to_html()
@app.callback(
Output("bubblechart", "srcDoc"),
Input("metric", "value"),
Input("region", "value"),
Input("sub_region", "value"),
Input("yr", "value"),
)
def plot_bubble_chart(metric, region, sub_region, yr):
"""
Create bubble chart for statsitic of interested based on selected | |
0x0000408f, 0x08b80000, 0xc800204788b80000, b"c800204788b80000", b"C", b"T"),
(b"mt", 25, 16528, 0x00004090, 0x08e80000, 0xc800204808e80000, b"c800204808e80000", b"t", b"c"),
(b"MT", 25, 19870, 0x00004d9e, 0x0d636362, 0xc80026cf0d636362, b"c80026cf0d636362", b"T", b"ACGTACGTAC"),
(b"MT", 25, 19871, 0x00004d9f, 0x508d8d8e, 0xc80026cfd08d8d8e, b"c80026cfd08d8d8e", b"ACGTACGTAC", b"T"),
])
class TestFunctions(TestCase):
@classmethod
def setUpClass(cls):
global npvk
try:
npvk = pyvk.VariantKey(
os.path.realpath(os.path.dirname(os.path.realpath(__file__)) + "/../../c/test/data/genoref.bin"),
os.path.realpath(os.path.dirname(os.path.realpath(__file__)) + "/../../c/test/data/nrvk.10.bin"),
os.path.realpath(os.path.dirname(os.path.realpath(__file__)) + "/../../c/test/data/rsvk.10.bin"),
os.path.realpath(os.path.dirname(os.path.realpath(__file__)) + "/../../c/test/data/vkrs.10.bin"))
except Exception as err:
assert False, "Unable to initialize the class: {0}".format(err)
@classmethod
def tearDownClass(cls):
global npvk
npvk.close()
def test_encode_chrom(self):
data = np.array([
["", "NA", "XY", "1X", "1Y", "1M", "1MT", "chr", " 1", "1 "],
["1", "01", "001", "0001", "chr1", "CHR1", "chr01", "CHR01", "chr001", "CHR001"],
["2", "02", "002", "0002", "chr2", "CHR2", "chr02", "CHR02", "chr002", "CHR002"],
["3", "03", "003", "0003", "chr3", "CHR3", "chr03", "CHR03", "chr003", "CHR003"],
["4", "04", "004", "0004", "chr4", "CHR4", "chr04", "CHR04", "chr004", "CHR004"],
["5", "05", "005", "0005", "chr5", "CHR5", "chr05", "CHR05", "chr005", "CHR005"],
["6", "06", "006", "0006", "chr6", "CHR6", "chr06", "CHR06", "chr006", "CHR006"],
["7", "07", "007", "0007", "chr7", "CHR7", "chr07", "CHR07", "chr007", "CHR007"],
["8", "08", "008", "0008", "chr8", "CHR8", "chr08", "CHR08", "chr008", "CHR008"],
["9", "09", "009", "0009", "chr9", "CHR9", "chr09", "CHR09", "chr009", "CHR009"],
["10", "010", "0010", "00010", "chr10", "CHR10", "chr010", "CHR010", "chr0010", "CHR0010"],
["11", "011", "0011", "00011", "chr11", "CHR11", "chr011", "CHR011", "chr0011", "CHR0011"],
["12", "012", "0012", "00012", "chr12", "CHR12", "chr012", "CHR012", "chr0012", "CHR0012"],
["13", "013", "0013", "00013", "chr13", "CHR13", "chr013", "CHR013", "chr0013", "CHR0013"],
["14", "014", "0014", "00014", "chr14", "CHR14", "chr014", "CHR014", "chr0014", "CHR0014"],
["15", "015", "0015", "00015", "chr15", "CHR15", "chr015", "CHR015", "chr0015", "CHR0015"],
["16", "016", "0016", "00016", "chr16", "CHR16", "chr016", "CHR016", "chr0016", "CHR0016"],
["17", "017", "0017", "00017", "chr17", "CHR17", "chr017", "CHR017", "chr0017", "CHR0017"],
["18", "018", "0018", "00018", "chr18", "CHR18", "chr018", "CHR018", "chr0018", "CHR0018"],
["19", "019", "0019", "00019", "chr19", "CHR19", "chr019", "CHR019", "chr0019", "CHR0019"],
["20", "020", "0020", "00020", "chr20", "CHR20", "chr020", "CHR020", "chr0020", "CHR0020"],
["21", "021", "0021", "00021", "chr21", "CHR21", "chr021", "CHR021", "chr0021", "CHR0021"],
["22", "022", "0022", "00022", "chr22", "CHR22", "chr022", "CHR022", "chr0022", "CHR0022"],
["X", "x", "chrX", "chrx", "CHRX", "CHRx", "X", "X", "X", "X"],
["Y", "y", "chrY", "chry", "CHRY", "CHRy", "Y", "Y", "Y", "Y"],
["M", "m", "MT", "mt", "chrM", "chrm", "chrMT", "chrmt", "CHRMt", "CHRmT"],
])
for i in range(0, 26):
e = np.repeat(i, 10)
c = npvk.encode_chrom(data[i])
np.testing.assert_array_equal(c, e)
chrom = npvk.encode_chrom(b"WRONG")
self.assertEqual(chrom, 0)
def test_encode_chrom_input_type(self):
self.assertEqual(npvk.encode_chrom(b"chr01"), npvk.encode_chrom("chr01"))
def test_decode_chrom(self):
code = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]
e = np.array([b"NA", b"1", b"2", b"3", b"4", b"5", b"6", b"7", b"8", b"9", b"10", b"11", b"12", b"13", b"14", b"15", b"16", b"17", b"18", b"19", b"20", b"21", b"22", b"X", b"Y", b"MT"], dtype='|S2')
c = npvk.decode_chrom(code)
np.testing.assert_array_equal(c, e)
def test_encode_refalt_input_type(self):
self.assertEqual(npvk.encode_refalt(b"AC", b"GT"), npvk.encode_refalt("AC", "GT"))
self.assertEqual(npvk.encode_refalt(b"ACGTACGT", b"GTACGTAC"), npvk.encode_refalt("ACGTACGT", "GTACGTAC"))
def test_encode_refalt(self):
base = [b"A", b"C", b"N", b"GT", b"ACG", b"ACGTa", b"ACGTac", b"ACGTacg", b"ACGTacgt", b"ACGTACGTAC", b"ACGTacgtACGT"]
e = np.array([
142606336, 142606336, 143130624, 144703488, 726812453, 330097465,
152436736, 282591232, 159580160, 411828224, 176381952, 680361984,
184771072, 814581760, 193159936, 948800512, 201548640, 1083018624,
218325858, 1351454088, 1057675291, 1369359907, 145227776, 145227776,
1375594857, 1513805153, 154533888, 282722304, 161677312, 411860992,
178479104, 680364032, 186868224, 814582272, 195257088, 948800640,
203645792, 1083018656, 220423010, 1351454090, 1398628389, 1358766431,
1969243823, 1969243823, 971081273, 1771898065, 776945419, 1002621441,
1754088851, 1261827981, 1010241683, 1792303827, 1458585093, 1889528781,
273848339, 1684605733, 1654748801, 1190621367, 785406155, 2000763481,
291340288, 291340288, 299417600, 420306944, 316200960, 688756224,
324589696, 822971776, 332978368, 957189472, 341367000, 1091407320,
636483075, 1776551723, 1695405603, 1483277935, 428617728, 428617728,
445396480, 697139968, 453785120, 831359168, 462173744, 965577776,
470562358, 1099795852, 191755869, 1887943745, 1415691159, 303875507,
713917280, 713917280, 722305890, 848136408, 723621645, 1783965747,
508782503, 458890457, 581191723, 141156817, 1430489581, 628679603,
1225091505, 1225091505, 1071275181, 1218465497, 1052062435, 681734883,
1735823055, 1173846073, 528456553, 1747209651, 859706021, 859706021,
1597496643, 1378980139, 1362028185, 1471164791, 1267233551, 573285289,
284858523, 284858523, 1987610005, 378861315, 644679011, 912925615,
1451695617, 1451695617, 1610695323, 2076346979, 976842551, 976842551,
], dtype=np.uint32)
iref = []
ialt = []
for i in range(0, 11):
for j in range(i, 11):
ri = i
rj = j
for r in range(0, 2):
iref.append(base[ri])
ialt.append(base[rj])
tmp = ri
ri = rj
rj = tmp
h = npvk.encode_refalt(iref, ialt)
np.testing.assert_array_equal(h, e)
def test_decode_refalt(self):
d = np.array([
142606336, 142606336, 143130624, 144703488, 726812453, 330097465,
152436736, 282591232, 159580160, 411828224, 176381952, 680361984,
184771072, 814581760, 193159936, 948800512, 201548640, 1083018624,
218325858, 1351454088, 1057675291, 1369359907, 145227776, 145227776,
1375594857, 1513805153, 154533888, 282722304, 161677312, 411860992,
178479104, 680364032, 186868224, 814582272, 195257088, 948800640,
203645792, 1083018656, 220423010, 1351454090, 1398628389, 1358766431,
1969243823, 1969243823, 971081273, 1771898065, 776945419, 1002621441,
1754088851, 1261827981, 1010241683, 1792303827, 1458585093, 1889528781,
273848339, 1684605733, 1654748801, 1190621367, 785406155, 2000763481,
291340288, 291340288, 299417600, 420306944, 316200960, 688756224,
324589696, 822971776, 332978368, 957189472, 341367000, 1091407320,
636483075, 1776551723, 1695405603, 1483277935, 428617728, 428617728,
445396480, 697139968, 453785120, 831359168, 462173744, 965577776,
470562358, 1099795852, 191755869, 1887943745, 1415691159, 303875507,
713917280, 713917280, 722305890, 848136408, 723621645, 1783965747,
508782503, 458890457, 581191723, 141156817, 1430489581, 628679603,
1225091505, 1225091505, 1071275181, 1218465497, 1052062435, 681734883,
1735823055, 1173846073, 528456553, 1747209651, 859706021, 859706021,
1597496643, 1378980139, 1362028185, 1471164791, 1267233551, 573285289,
284858523, 284858523, 1987610005, 378861315, 644679011, 912925615,
1451695617, 1451695617, 1610695323, 2076346979, 976842551, 976842551,
], dtype=np.uint32)
eref = np.array([b'A', b'A', b'A', b'C', b'', b'', b'A', b'GT', b'A', b'ACG', b'A', b'ACGTA', b'A',
b'ACGTAC', b'A', b'ACGTACG', b'A', b'ACGTACGT', b'A', b'ACGTACGTAC', b'', b'',
b'C', b'C', b'', b'', b'C', b'GT', b'C', b'ACG', b'C', b'ACGTA', b'C', b'ACGTAC',
b'C', b'ACGTACG', b'C', b'ACGTACGT', b'C', b'ACGTACGTAC', b'', b'', b'', b'', b'',
b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'GT', b'GT',
b'GT', b'ACG', b'GT', b'ACGTA', b'GT', b'ACGTAC', b'GT', b'ACGTACG', b'GT',
b'ACGTACGT', b'', b'', b'', b'', b'ACG', b'ACG', b'ACG', b'ACGTA', b'ACG',
b'ACGTAC', b'ACG', b'ACGTACG', b'ACG', b'ACGTACGT', b'', b'', b'', b'', b'ACGTA',
b'ACGTA', b'ACGTA', b'ACGTAC', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'',
b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'',
b'', b'', b'', b'', b'', b'', b'', b'', b''], dtype=np.string_)
ealt = np.array([b'A', b'A', b'C', b'A', b'', b'', b'GT', b'A', b'ACG', b'A', b'ACGTA', b'A',
b'ACGTAC', b'A', b'ACGTACG', b'A', b'ACGTACGT', b'A', b'ACGTACGTAC', b'A', b'',
b'', b'C', b'C', b'', b'', b'GT', b'C', b'ACG', b'C', b'ACGTA', b'C', b'ACGTAC', b'C',
b'ACGTACG', b'C', b'ACGTACGT', b'C', b'ACGTACGTAC', b'C', b'', b'', b'', b'', b'',
b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'GT', b'GT',
b'ACG', b'GT', b'ACGTA', b'GT', b'ACGTAC', b'GT', b'ACGTACG', b'GT', b'ACGTACGT',
b'GT', b'', b'', b'', b'', b'ACG', b'ACG', b'ACGTA', b'ACG', b'ACGTAC', b'ACG',
b'ACGTACG', b'ACG', b'ACGTACGT', b'ACG', b'', b'', b'', b'', b'ACGTA', b'ACGTA',
b'ACGTAC', b'ACGTA', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'',
b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'',
b'', b'', b'', b'', b'', b'', b''], dtype=np.string_)
esizeref = np.array([1, 1, 1, 1, 0, 0, 1, 2, 1, 3, 1, 5, 1, 6, 1, 7, 1, 8, 1, 10, 0, 0, 1, 1,
0, 0, 1, 2, 1, 3, 1, 5, 1, 6, 1, 7, 1, 8, 1, 10, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 3, 2, 5, 2, 6, 2, 7, 2, 8,
0, 0, 0, 0, 3, 3, 3, 5, 3, 6, 3, 7, 3, 8, 0, 0, 0, 0, 5, 5, 5, 6, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=np.uint8)
esizealt = np.array([1, 1, 1, 1, 0, 0, 2, 1, 3, 1, 5, 1, 6, 1, 7, 1, 8, 1, 10, 1, 0, 0, 1, 1,
0, 0, 2, 1, 3, 1, 5, 1, 6, 1, 7, 1, 8, 1, 10, 1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 3, 2, 5, 2, 6, 2, 7, 2, 8, 2,
0, 0, 0, 0, 3, 3, 5, 3, 6, 3, 7, 3, 8, 3, 0, 0, 0, 0, 5, 5, 6, 5, 0, 0,
0, 0, 0, 0, | |
<filename>dfxapiclient/simpleclient.py
import asyncio
import copy
import json
import os
import uuid
from .measurements import Measurement
from .measurements_pb2 import SubscribeResultsRequest
from .organizations import Organization
from .users import User
from .websocketHelper import WebsocketHandler
class SimpleClient():
"""The DFX API SimpleClient simplifies the process of using the DFX API,
by providing the following set of core API functionalities:
Registering a device, creating a user, user login, creating a measurement,
subscribing to results, adding measurement data and retrieving results
In subsequent updates, more DFX API endpoints will be added.
For more information on the DFX API, please see https://dfxapiversion10.docs.apiary.io
"""
def __init__(self,
license_key: str,
study_id: str,
email: str,
password: <PASSWORD>,
server: str = "prod",
device_name: str = "DFX desktop",
firstname: str = None,
lastname: str = None,
phonenum: str = None,
gender: str = None,
dateofbirth: str = None,
height: str = None,
weight: str = None,
config_file: str = None,
add_method: str = "REST",
measurement_mode: str = "DISCRETE",
chunk_length: float = 15,
video_length: float = 60):
"""[summary]
Arguments:
license_key {str} -- DFX API License
study_id {str} -- Study ID
email {str} -- Email address
password {str} -- Password
Keyword Arguments:
server {str} -- Server to use (default: {"prod"})
device_name {str} -- Device name (default: {"DFX desktop"})
firstname {str} -- First name (default: {None})
lastname {str} -- Last name (default: {None})
phonenum {str} -- Phone number (default: {None})
gender {str} -- Gender (default: {None})
dateofbirth {str} -- Date of birth (default: {None})
height {str} -- Height (cm) (default: {None})
weight {str} -- Weight (kg) (default: {None})
config_file {str} -- Path to save local configuration (default: {None})
add_method {str} -- Chunk add backend used Websocket or REST (default: {"REST"})
measurement_mode {str} -- Measurement mode (only DISCRETE supported for now) (default: {"DISCRETE"})
chunk_length {float} -- Chunk length in seconds (default: {15})
video_length {float} -- Video length in seconds (default: {60})
"""
# License key and study ID needs to be provided by the admin
self.license_key = license_key
self.study_id = study_id
self.device_name = device_name
self.server = server.lower()
self.conn_method = add_method.lower()
self.video_length = video_length
self.chunk_length = chunk_length
self.measurement_mode = measurement_mode.upper()
self.config_file = config_file
self.chunks = None
self.device_token = ''
self.device_id = ''
self.user_id = ''
self.user_token = ''
self.measurement_id = ''
self.received_data = asyncio.Queue(30) # Queue for storing results
self.__valid_servers = {}
self.__measurement_modes = {}
self.__get_urls()
self.__measurement_mode()
self.user = User(self.server_url, firstname, lastname, email, password, gender, dateofbirth, height, weight)
self.organization = Organization(license_key, self.server_url)
# Some boolean variables (flags) and floats (time in seconds) for
# asynchronous signalling purposes.
self.addData_done = True # Can only close websocket after all tasks are done
self.subscribe_done = True
self.sub_cycle_complete = True # Can only create measurement after subscribe is done for previous one
self.complete = False
self.subscribe_poll = 0.2 # Time values for signalling and polling
self.subscribe_signal = 0.5
self.__setup() # Register license, create user and login user
self.ws_obj = WebsocketHandler(self.user_token, self.websocket_url)
self.measurement = Measurement(self.study_id,
self.server_url,
self.ws_obj,
self.num_chunks,
self.max_chunks,
mode=self.measurement_mode,
token=self.user_token)
self.received_data = self.measurement.received_data
def __get_urls(self):
"""`Get the REST, websocket, or gRPC urls.
Raises:
KeyError: if server key was not in list
"""
self.__valid_servers = {
"qa": {
"server_url": "https://qa.api.deepaffex.ai:9443",
"websocket_url": "wss://qa.api.deepaffex.ai:9080"
},
"dev": {
"server_url": "https://dev.api.deepaffex.ai:9443",
"websocket_url": "wss://dev.api.deepaffex.ai:9080"
},
"demo": {
"server_url": "https://demo.api.deepaffex.ai:9443",
"websocket_url": "wss://demo.api.deepaffex.ai:9080"
},
"prod": {
"server_url": "https://api.deepaffex.ai:9443",
"websocket_url": "wss://api.deepaffex.ai:9080"
},
"prod-cn": {
"server_url": "https://api.deepaffex.cn:9443",
"websocket_url": "wss://api.deepaffex.cn:9080"
},
"demo-cn": {
"server_url": "https://demo.api.deepaffex.cn:9443",
"websocket_url": "wss://demo.api.deepaffex.cn:9080"
}
}
try:
self.server_url = self.__valid_servers[self.server]["server_url"]
self.websocket_url = self.__valid_servers[self.server]["websocket_url"]
except KeyError:
raise KeyError("Invalid server ID given")
def __measurement_mode(self):
"""Setup the measurement mode selected by the user.
Determines the maximum number of chunks for each measurement etc.
Raises:
KeyError: if unknown mode was passed
"""
self.__measurement_modes = {"DISCRETE": 120, "BATCH": 1200, "VIDEO": 1200, "STREAMING": 1200}
try:
max_len = self.__measurement_modes[self.measurement_mode]
except KeyError:
raise KeyError("Invalid measurement mode given")
self.num_chunks = int(self.video_length / self.chunk_length)
self.max_chunks = int(max_len / self.chunk_length)
def __record(self, data={}):
"""Record and cache all important parameters in a config file
For this method, if the `data` parameter is not passed in, the new
values are directly overwritten into the config file.
If `data` is passed in, it creates a copy. The handling for
recycling previous values are now implemented in `__setup()` below.
Keyword Arguments:
data {dict} -- The data to record(default: {{}})
"""
# Create a config file
if not self.config_file:
self.config_file = "./default.config"
# Create empty config json file if not there
if not os.path.isfile(self.config_file):
with open(self.config_file, 'w') as f:
d = json.dumps({})
f.write(d)
# This structure ensures that for different servers, there can exist
# multiple licenses (`license_key`), which contains one `device_token`
# each and multiple users (identified by `user_email`), each with its
# own `user_token`.
# Overwrite values with current values
if not data or data == {}:
with open(self.config_file, 'r') as f:
data = json.load(f)
data[self.server] = {}
if self.license_key != '':
data[self.server][self.license_key] = {}
if self.device_token != '':
data[self.server][self.license_key]["device_token"] = self.device_token
if self.user.email != '':
data[self.server][self.license_key][self.user.email] = {}
if self.user_token != '':
data[self.server][self.license_key][self.user.email]["user_token"] = self.user_token
else:
data = data
# Clean up the remaining values (i.e. get rid of it if it's empty)
copied = copy.deepcopy(data)
for server in copied.keys():
if server not in self.__valid_servers.keys():
data.pop(server, None)
for key in copied[server].keys():
data[server].pop('', None)
data[server].pop(' ', None)
if copied[server][key] == {}:
data[server].pop(key, None)
for k in copied[server][key].keys():
if k != "device_token":
data[server][key].pop('', None)
data[server][key].pop(' ', None)
if copied[server][key][k] == {} or copied[server][key][k] == "":
data[server][key].pop(k, None)
with open(self.config_file, 'w') as f:
d = json.dumps(data)
f.write(d)
def __setup(self):
"""Performs the activities necessary for setting up the client.
Register license, create user, and authentication / login.
Recycling and saving the values in configuration file.
Raises:
PermissionError: if server error due to permissions.
"""
# Create empty config json file if not there.
if not self.config_file:
self.config_file = "./default.config"
if not os.path.isfile(self.config_file):
with open(self.config_file, 'w') as f:
json.dump({}, f)
# Recycle and replace values in the config file
with open(self.config_file, 'r') as json_file:
data = json.load(json_file)
# Records the `server`, `license_key`, and `user_email` if they don't exist.
# Server
if (self.server not in data.keys() or data[self.server] == {}):
data[self.server] = {}
# License key
if (self.license_key not in data[self.server].keys() or data[self.server][self.license_key] == {}):
data[self.server][self.license_key] = {}
# User email
if self.user.email not in data[self.server][self.license_key].keys():
data[self.server][self.license_key][self.user.email] = {}
# Next, if a `device_token` doesn't exist for this server and
# license, call the `Organization.registerLicense()` endpoint to
# obtain a device token. On the other hand, if the device token
# already exists, it takes the existing token to prevent
# redundantly registering the same license.
# Device token
if ("device_token" not in data[self.server][self.license_key].keys()
or data[self.server][self.license_key]["device_token"] == ''):
out = self.organization.registerLicense(self.device_name)
if 'Token' not in out:
self.__record(data=data) # Save current state
raise PermissionError(
"Registration error. Make sure your license key is valid for the selected server.")
self.device_token = out['Token']
data[self.server][self.license_key]["device_token"] = self.device_token
elif (self.device_token == '' and data[self.server][self.license_key]["device_token"] != ''):
self.device_token = data[self.server][self.license_key]["device_token"]
# Next, if a `user_token` does not exist for the current user on
# this license and server, it tries to log in (`User.login()`) the
# user first using the device token. If cannot be logged in, it
# needs to create a new user (`User.create()`) before logging in.
# The user information and credentials are already handled in the
# `User` class, so it only needs to pass in the `device_token`.
# User token
if ("user_token" not in data[self.server][self.license_key][self.user.email].keys()
or data[self.server][self.license_key][self.user.email]["user_token"] == ''):
res = self.user.login(self.device_token)
if res == "INVALID_USER":
res = self.user.create(self.device_token)
if res == 'INTERNAL_ERROR':
self.__record(data=data)
raise PermissionError("Cannot create new user. Check your license permissions.")
self.user_id = res
res = self.user.login(self.device_token)
elif res == "INVALID_PASSWORD":
self.__record(data=data)
raise PermissionError("Incorrect login password.")
self.user_token = self.user.user_token
if self.user_token != '':
data[self.server][self.license_key][self.user.email]["user_token"] = self.user_token
else:
self.user_token = data[self.server][self.license_key][self.user.email]["user_token"]
self.user.user_token = self.user_token
# Record updated data into the config file.
self.__record(data=data)
def create_new_measurement(self) -> str:
"""Create a new measurement by calling to the `create` endpoint under
`Measurement`.
Returns:
str -- Measurement ID
"""
try:
self.measurement.create()
except ValueError:
# Handling if | |
<reponame>sail-sg/mugs<filename>src/vision_transformer.py
# Copyright 2022 Garena Online Private Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
ViT backbones, including ViT-small, ViT-base, ViT-large
Mostly copy-paste from timm library.
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
import math
from functools import partial
import torch
import torch.nn as nn
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn(
"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2,
)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
lower = norm_cdf((a - mean) / std)
upper = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * lower - 1, 2 * upper - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
# type: (torch.tensor, float, float, float, float) -> torch.tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def drop_path(x, drop_prob: float = 0.0, training: bool = False):
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (
x.ndim - 1
) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
"""
MLP module in ViT
"""
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.0,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
"""
Attention module in ViT
"""
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads)
reshaped_qkv = qkv.permute(2, 0, 3, 1, 4)
q, k, v = reshaped_qkv[0], reshaped_qkv[1], reshaped_qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class Block(nn.Module):
"""
ViT block, including Attention, MLP, etc.
"""
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.0,
qkv_bias=False,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=drop,
)
def forward(self, x, return_attention=False):
y, attn = self.attn(self.norm1(x))
if return_attention:
return attn
x = x + self.drop_path(y)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
"""Image to Patch Embedding"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
num_patches = (img_size // patch_size) * (img_size // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(
in_chans, embed_dim, kernel_size=patch_size, stride=patch_size
)
def forward(self, x):
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class VisionTransformer(nn.Module):
"""Vision Transformer"""
def __init__(
self,
img_size=[224, 224],
patch_size=16,
in_chans=3,
num_classes=0,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
qkv_bias=False,
qk_scale=None,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.0,
norm_layer=nn.LayerNorm,
num_relation_blocks=0,
**kwargs
):
super().__init__()
self.num_features = self.embed_dim = embed_dim
self.patch_size = patch_size
self.num_classes = num_classes
self.depth = depth
self.patch_embed = PatchEmbed(
img_size=img_size[0],
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
trunc_normal_(self.pos_embed, std=0.02)
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, depth)
] # stochastic depth decay rule
self.blocks = nn.ModuleList(
[
Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
)
for i in range(depth)
]
)
self.norm = norm_layer(embed_dim)
# Classifier head
self.head = (
nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
)
self.num_relation_blocks = num_relation_blocks
if num_relation_blocks > 0:
self.relation_blocks = nn.ModuleList(
[
Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
)
for i in range(int(num_relation_blocks))
]
)
trunc_normal_(self.cls_token, std=0.02)
self.apply(self._init_weights)
def add_pos_emb_for_cls_token(self):
pe_cls_token = torch.zeros([1, 1, self.embed_dim], dtype=torch.float32)
self.pos_embed = nn.Parameter(torch.cat([pe_cls_token, self.pos_embed], dim=1))
self.pos_embed.requires_grad = False
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def interpolate_pos_encoding(self, x, w, h):
npatch = x.shape[1] - 1
N = self.pos_embed.shape[1] - 1
if npatch == N and w == h:
return self.pos_embed
class_pos_embed = self.pos_embed[:, 0]
patch_pos_embed = self.pos_embed[:, 1:]
dim = x.shape[-1]
w0 = w // self.patch_embed.patch_size
h0 = h // self.patch_embed.patch_size
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(
1, int(math.sqrt(N)), int(math.sqrt(N)), dim
).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode="bicubic",
)
assert (
int(w0) == patch_pos_embed.shape[-2]
and int(h0) == patch_pos_embed.shape[-1]
)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def prepare_tokens(self, x):
B, nc, w, h = x.shape
x = self.patch_embed(x) # patch linear embedding
# add the [CLS] token to the embed patch tokens
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
def forward(self, x, return_all=False, local_group_memory_inputs=None, **kwargs):
x = self.prepare_tokens(x)
for blk in self.blocks:
x = blk(x)
if self.num_relation_blocks > 0:
mem = local_group_memory_inputs.get("mem")
if mem is not None:
m, _ = mem(x.mean(1))
rx = torch.cat((x.mean(1).unsqueeze(1), m), dim=1)
else:
rx = x
for i, blk in enumerate(self.relation_blocks):
rx = blk(rx)
relation_out = self.norm(rx[:, 0])
x = self.norm(x)
if self.num_classes > 0:
return self.head(x[:, 0])
if return_all:
return x, relation_out
else:
return x[:, 0], relation_out
def forward_knn(self, x):
x = self.prepare_tokens(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x[:, 0]
def get_last_selfattention(self, x):
x = self.prepare_tokens(x)
for i, blk in enumerate(self.blocks):
if i < len(self.blocks) - 1:
x = blk(x)
else:
# return attention of the last block
return blk(x, return_attention=True)
def get_intermediate_layers(self, x, n=1):
x = self.prepare_tokens(x)
# we return the output tokens from the `n` last blocks
output = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if len(self.blocks) - i <= n:
output.append(self.norm(x))
return output
def get_num_layers(self):
return self.depth
@torch.jit.ignore
def no_weight_decay(self):
return {"pos_embed", "cls_token"}
def vit_tiny(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size,
embed_dim=192,
depth=12,
num_heads=3,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs
)
return model
def vit_small(patch_size=16, **kwargs):
model = | |
"""
Tests of Tax-Calculator using puf.csv input.
Note that the puf.csv file that is required to run this program has
been constructed by the Tax-Calculator development team by merging
information from the most recent publicly available IRS SOI PUF file
and from the Census CPS file for the corresponding year. If you have
acquired from IRS the most recent SOI PUF file and want to execute
this program, contact the Tax-Calculator development team to discuss
your options.
Read Tax-Calculator/TESTING.md for details.
"""
# CODING-STYLE CHECKS:
# pycodestyle test_pufcsv.py
# pylint --disable=locally-disabled test_pufcsv.py
import os
import json
import pytest
import numpy as np
import pandas as pd
# pylint: disable=import-error
from taxcalc import Policy, Records, Calculator
START_YEAR = 2017
@pytest.mark.requires_pufcsv
def test_agg(tests_path, puf_fullsample):
"""
Test Tax-Calculator aggregate taxes with no policy reform using
the full-sample puf.csv and a small sub-sample of puf.csv
"""
# pylint: disable=too-many-locals,too-many-statements
nyrs = 10
# create a baseline Policy object with current-law policy parameters
baseline_policy = Policy()
# create a Records object (rec) containing all puf.csv input records
recs = Records(data=puf_fullsample)
# create a Calculator object using baseline policy and puf records
calc = Calculator(policy=baseline_policy, records=recs)
calc.advance_to_year(START_YEAR)
calc_start_year = calc.current_year
# create aggregate diagnostic table (adt) as a Pandas DataFrame object
adt = calc.diagnostic_table(nyrs).round(1) # column labels are int
taxes_fullsample = adt.loc["Combined Liability ($b)"]
# compare actual DataFrame, adt, with the expected DataFrame, edt
aggres_path = os.path.join(tests_path, 'pufcsv_agg_expect.csv')
edt = pd.read_csv(aggres_path, index_col=False) # column labels are str
edt.drop('Unnamed: 0', axis='columns', inplace=True)
assert len(adt.columns.values) == len(edt.columns.values)
diffs = False
for icol in adt.columns.values:
if not np.allclose(adt[icol], edt[str(icol)]):
diffs = True
if diffs:
new_filename = '{}{}'.format(aggres_path[:-10], 'actual.csv')
adt.to_csv(new_filename, float_format='%.1f')
msg = 'PUFCSV AGG RESULTS DIFFER FOR FULL-SAMPLE\n'
msg += '-------------------------------------------------\n'
msg += '--- NEW RESULTS IN pufcsv_agg_actual.csv FILE ---\n'
msg += '--- if new OK, copy pufcsv_agg_actual.csv to ---\n'
msg += '--- pufcsv_agg_expect.csv ---\n'
msg += '--- and rerun test. ---\n'
msg += '--- (both are in taxcalc/tests) ---\n'
msg += '-------------------------------------------------\n'
raise ValueError(msg)
# create aggregate diagnostic table using unweighted sub-sample of records
fullsample = puf_fullsample
rn_seed = 2222 # to ensure sub-sample is always the same
subfrac = 0.05 # sub-sample fraction
subsample = fullsample.sample(frac=subfrac, random_state=rn_seed)
recs_subsample = Records(data=subsample)
calc_subsample = Calculator(policy=baseline_policy, records=recs_subsample)
calc_subsample.advance_to_year(START_YEAR)
adt_subsample = calc_subsample.diagnostic_table(nyrs)
# compare combined tax liability from full and sub samples for each year
taxes_subsample = adt_subsample.loc["Combined Liability ($b)"]
msg = ''
for cyr in range(calc_start_year, calc_start_year + nyrs):
reltol = 0.01 # maximum allowed relative difference in tax liability
if not np.allclose(taxes_subsample[cyr], taxes_fullsample[cyr],
atol=0.0, rtol=reltol):
reldiff = (taxes_subsample[cyr] / taxes_fullsample[cyr]) - 1.
line1 = '\nPUFCSV AGG SUB-vs-FULL RESULTS DIFFER IN {}'
line2 = '\n when subfrac={:.3f}, rtol={:.4f}, seed={}'
line3 = '\n with sub={:.3f}, full={:.3f}, rdiff={:.4f}'
msg += line1.format(cyr)
msg += line2.format(subfrac, reltol, rn_seed)
msg += line3.format(taxes_subsample[cyr],
taxes_fullsample[cyr],
reldiff)
if msg:
raise ValueError(msg)
MTR_TAX_YEAR = 2013
MTR_NEG_DIFF = False # set True to subtract (rather than add) small amount
# specify payrolltax mtr histogram bin boundaries (or edges):
PTAX_MTR_BIN_EDGES = [0.0, 0.02, 0.04, 0.06, 0.08,
0.10, 0.12, 0.14, 0.16, 0.18, 1.0]
# the bin boundaries above are arbitrary, so users
# may want to experiment with alternative boundaries
# specify incometax mtr histogram bin boundaries (or edges):
ITAX_MTR_BIN_EDGES = [-1.0, -0.30, -0.20, -0.10, 0.0,
0.10, 0.20, 0.30, 0.40, 0.50, 1.0]
# the bin boundaries above are arbitrary, so users
# may want to experiment with alternative boundaries
def mtr_bin_counts(mtr_data, bin_edges, recid):
"""
Compute mtr histogram bin counts and return results as a string.
"""
res = ''
(bincount, _) = np.histogram(mtr_data.round(decimals=4), bins=bin_edges)
sum_bincount = np.sum(bincount)
res += '{} :'.format(sum_bincount)
for idx in range(len(bin_edges) - 1):
res += ' {:6d}'.format(bincount[idx])
res += '\n'
if sum_bincount < mtr_data.size:
res += 'WARNING: sum of bin counts is too low\n'
recinfo = ' mtr={:.2f} for recid={}\n'
mtr_min = mtr_data.min()
mtr_max = mtr_data.max()
bin_min = min(bin_edges)
bin_max = max(bin_edges)
if mtr_min < bin_min:
res += ' min(mtr)={:.2f}\n'.format(mtr_min)
for idx in range(mtr_data.size):
if mtr_data[idx] < bin_min:
res += recinfo.format(mtr_data[idx], recid[idx])
if mtr_max > bin_max:
res += ' max(mtr)={:.2f}\n'.format(mtr_max)
for idx in range(mtr_data.size):
if mtr_data[idx] > bin_max:
res += recinfo.format(mtr_data[idx], recid[idx])
return res
def nonsmall_diffs(linelist1, linelist2, small=0.0):
"""
Return True if line lists differ significantly; otherwise return False.
Significant numerical difference means one or more numbers differ (between
linelist1 and linelist2) by more than the specified small amount.
"""
# embedded function used only in nonsmall_diffs function
def isfloat(value):
"""
Return True if value can be cast to float; otherwise return False.
"""
try:
float(value)
return True
except ValueError:
return False
# begin nonsmall_diffs logic
assert isinstance(linelist1, list)
assert isinstance(linelist2, list)
if len(linelist1) != len(linelist2):
return True
assert 0.0 <= small <= 1.0
epsilon = 1e-6
smallamt = small + epsilon
for line1, line2 in zip(linelist1, linelist2):
if line1 == line2:
continue
else:
tokens1 = line1.replace(',', '').split()
tokens2 = line2.replace(',', '').split()
for tok1, tok2 in zip(tokens1, tokens2):
tok1_isfloat = isfloat(tok1)
tok2_isfloat = isfloat(tok2)
if tok1_isfloat and tok2_isfloat:
if abs(float(tok1) - float(tok2)) <= smallamt:
continue
else:
return True
elif not tok1_isfloat and not tok2_isfloat:
if tok1 == tok2:
continue
else:
return True
else:
return True
return False
@pytest.mark.requires_pufcsv
def test_mtr(tests_path, puf_path):
"""
Test Tax-Calculator marginal tax rates with no policy reform using puf.csv
Compute histograms for each marginal tax rate income type using
sample input from the puf.csv file and writing output to a string,
which is then compared for differences with EXPECTED_MTR_RESULTS.
"""
# pylint: disable=too-many-locals,too-many-statements
assert len(PTAX_MTR_BIN_EDGES) == len(ITAX_MTR_BIN_EDGES)
# construct actual results string, res
res = ''
if MTR_NEG_DIFF:
res += 'MTR computed using NEGATIVE finite_diff '
else:
res += 'MTR computed using POSITIVE finite_diff '
res += 'for tax year {}\n'.format(MTR_TAX_YEAR)
# create a Policy object (clp) containing current-law policy parameters
clp = Policy()
clp.set_year(MTR_TAX_YEAR)
# create a Records object (puf) containing puf.csv input records
puf = Records(data=puf_path)
recid = puf.RECID # pylint: disable=no-member
# create a Calculator object using clp policy and puf records
calc = Calculator(policy=clp, records=puf)
res += '{} = {}\n'.format('Total number of data records', puf.array_length)
res += 'PTAX mtr histogram bin edges:\n'
res += ' {}\n'.format(PTAX_MTR_BIN_EDGES)
res += 'ITAX mtr histogram bin edges:\n'
res += ' {}\n'.format(ITAX_MTR_BIN_EDGES)
variable_header = 'PTAX and ITAX mtr histogram bin counts for'
# compute marginal tax rate (mtr) histograms for each mtr variable
for var_str in Calculator.MTR_VALID_VARIABLES:
zero_out = (var_str == 'e01400')
(mtr_ptax, mtr_itax, _) = calc.mtr(variable_str=var_str,
negative_finite_diff=MTR_NEG_DIFF,
zero_out_calculated_vars=zero_out,
wrt_full_compensation=False)
if zero_out:
# check that calculated variables are consistent
assert np.allclose((calc.array('iitax') +
calc.array('payrolltax')),
calc.array('combined'))
assert np.allclose((calc.array('ptax_was') +
calc.array('setax') +
calc.array('ptax_amc')),
calc.array('payrolltax'))
assert np.allclose(calc.array('c21060') - calc.array('c21040'),
calc.array('c04470'))
assert np.allclose(calc.array('taxbc') + calc.array('c09600'),
calc.array('c05800'))
assert np.allclose((calc.array('c05800') +
calc.array('othertaxes') -
calc.array('c07100')),
calc.array('c09200'))
assert np.allclose(calc.array('c09200') - calc.array('refund'),
calc.array('iitax'))
if var_str == 'e00200s':
# only MARS==2 filing units have valid MTR values
mtr_ptax = mtr_ptax[calc.array('MARS') == 2]
mtr_itax = mtr_itax[calc.array('MARS') == 2]
res += '{} {}:\n'.format(variable_header, var_str)
res += mtr_bin_counts(mtr_ptax, PTAX_MTR_BIN_EDGES, recid)
res += mtr_bin_counts(mtr_itax, ITAX_MTR_BIN_EDGES, recid)
# check for differences between actual and expected results
mtrres_path = os.path.join(tests_path, 'pufcsv_mtr_expect.txt')
with open(mtrres_path, 'r') as expected_file:
txt = expected_file.read()
expected_results = txt.rstrip('\n\t ') + '\n' # cleanup end of file txt
if nonsmall_diffs(res.splitlines(True), expected_results.splitlines(True)):
new_filename = '{}{}'.format(mtrres_path[:-10], 'actual.txt')
with open(new_filename, 'w') as new_file:
new_file.write(res)
msg = 'PUFCSV MTR RESULTS DIFFER\n'
msg += '-------------------------------------------------\n'
msg += '--- NEW RESULTS IN pufcsv_mtr_actual.txt FILE ---\n'
msg += '--- if new OK, copy pufcsv_mtr_actual.txt to ---\n'
msg += '--- pufcsv_mtr_expect.txt ---\n'
msg += '--- and rerun test. ---\n'
msg += '-------------------------------------------------\n'
raise ValueError(msg)
@pytest.mark.requires_pufcsv
def test_mtr_pt_active(puf_subsample):
"""
Test whether including wages in active income causes
MTRs on e00900p and e26270 to be less than -1 (i.e., -100%)
"""
# pylint: disable=too-many-locals
rec = Records(data=puf_subsample)
reform_year = 2018
# create current-law Calculator object, calc1
pol1 = Policy()
calc1 = Calculator(policy=pol1, records=rec)
calc1.advance_to_year(reform_year)
calc1.calc_all()
mtr1_e00900p = calc1.mtr('e00900p')[2]
mtr1_e26270 = calc1.mtr('e26270')[2]
assert min(mtr1_e00900p) > -1
assert min(mtr1_e26270) > -1
# change PT rates, calc2
reform2 = {'PT_rt7': {reform_year: 0.35}}
pol2 = Policy()
pol2.implement_reform(reform2)
calc2 = Calculator(policy=pol2, records=rec)
calc2.advance_to_year(reform_year)
calc2.calc_all()
mtr2_e00900p = calc2.mtr('e00900p')[2]
mtr2_e26270 = calc2.mtr('e26270')[2]
assert min(mtr2_e00900p) > -1
assert min(mtr2_e26270) > -1
# change PT_wages_active_income
reform3 = | |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from typing import Any, Dict, Optional, Tuple
import gym
import torch
from nle import nethack
from minihack.agent.common.models.embed import GlyphEmbedding
from minihack.agent.common.models.transformer import TransformerEncoder
from omegaconf import DictConfig
from ray.rllib.models import ModelCatalog
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils.annotations import override
from torch import nn
from torch.nn import functional as F
NUM_GLYPHS = nethack.MAX_GLYPH
NUM_FEATURES = nethack.BLSTATS_SHAPE[0]
PAD_CHAR = 0
NUM_CHARS = 128
class RLLibGlyphEmbedding(GlyphEmbedding):
def glyphs_to_idgroup(self, glyphs):
B, H, W = glyphs.shape
ids_groups = self.id_pairs_table.index_select(
0, glyphs.contiguous().view(-1).long()
)
ids = ids_groups.select(1, 0).view(B, H, W).long()
groups = ids_groups.select(1, 1).view(B, H, W).long()
return (ids, groups)
def prepare_input(self, inputs):
"""Take the inputs to the network as dictionary and return a namedtuple
of the input/index tensors to be embedded (GlyphTuple)"""
embeddable_data = {}
# Only flatten the data we want
for key, value in inputs.items():
if key in self.embeddings:
# -- [ B x ...] -> [ B' x ... ]
# embeddable_data[key] = torch.flatten(value, 0, 1).long()
embeddable_data[key] = value.long()
# add our group id and subgroup id if we want them
if self.requires_id_pairs_table:
ids, groups = self.glyphs_to_idgroup(inputs["glyphs"])
embeddable_data["groups"] = groups
embeddable_data["subgroup_ids"] = ids
# convert embeddable_data to a named tuple
return self.GlyphTuple(**embeddable_data)
class NetHackNet(nn.Module):
AgentOutput = collections.namedtuple(
"AgentOutput",
"action policy_logits baseline chosen_option teacher_logits pot_sm",
)
def __init__(self):
super(NetHackNet, self).__init__()
self.register_buffer("reward_sum", torch.zeros(()))
self.register_buffer("reward_m2", torch.zeros(()))
self.register_buffer("reward_count", torch.zeros(()).fill_(1e-8))
def forward(self, inputs, core_state):
raise NotImplementedError
def initial_state(self, batch_size=1):
return ()
def prepare_input(self, inputs):
# -- [B x H x W]
glyphs = inputs["glyphs"]
# -- [B x F]
features = inputs["blstats"]
B, *_ = glyphs.shape
return glyphs, features
def embed_state(self, inputs):
raise NotImplementedError
@torch.no_grad()
def update_running_moments(self, reward_batch):
"""Maintains a running mean of reward."""
new_count = len(reward_batch)
new_sum = torch.sum(reward_batch)
new_mean = new_sum / new_count
curr_mean = self.reward_sum / self.reward_count
new_m2 = torch.sum((reward_batch - new_mean) ** 2) + (
(self.reward_count * new_count)
/ (self.reward_count + new_count)
* (new_mean - curr_mean) ** 2
)
self.reward_count += new_count
self.reward_sum += new_sum
self.reward_m2 += new_m2
@torch.no_grad()
def get_running_std(self):
"""Returns standard deviation of the running mean of the reward."""
return torch.sqrt(self.reward_m2 / self.reward_count)
class Crop(nn.Module):
def __init__(self, height, width, height_target, width_target):
super(Crop, self).__init__()
self.width = width
self.height = height
self.width_target = width_target
self.height_target = height_target
width_grid = self._step_to_range(
2 / (self.width - 1), self.width_target
)[None, :].expand(self.height_target, -1)
height_grid = self._step_to_range(
2 / (self.height - 1), height_target
)[:, None].expand(-1, self.width_target)
# "clone" necessary, https://github.com/pytorch/pytorch/issues/34880
self.register_buffer("width_grid", width_grid.clone())
self.register_buffer("height_grid", height_grid.clone())
def _step_to_range(self, step, num_steps):
return torch.tensor(
[step * (i - num_steps // 2) for i in range(num_steps)]
)
def forward(self, inputs, coordinates):
"""Calculates centered crop around given x,y coordinates.
Args:
inputs [B x H x W] or [B x H x W x C]
coordinates [B x 2] x,y coordinates
Returns:
[B x H' x W'] inputs cropped and centered around x,y coordinates.
"""
assert inputs.shape[1] == self.height, "expected %d but found %d" % (
self.height,
inputs.shape[1],
)
assert inputs.shape[2] == self.width, "expected %d but found %d" % (
self.width,
inputs.shape[2],
)
permute_results = False
if inputs.dim() == 3:
inputs = inputs.unsqueeze(1)
else:
permute_results = True
inputs = inputs.permute(0, 2, 3, 1)
inputs = inputs.float()
x = coordinates[:, 0]
y = coordinates[:, 1]
x_shift = 2 / (self.width - 1) * (x.float() - self.width // 2)
y_shift = 2 / (self.height - 1) * (y.float() - self.height // 2)
grid = torch.stack(
[
self.width_grid[None, :, :] + x_shift[:, None, None],
self.height_grid[None, :, :] + y_shift[:, None, None],
],
dim=3,
)
crop = (
torch.round(F.grid_sample(inputs, grid, align_corners=True))
.squeeze(1)
.long()
)
if permute_results:
# [B x C x H x W] -> [B x H x W x C]
crop = crop.permute(0, 2, 3, 1)
return crop
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
class BaseNet(NetHackNet):
def __init__(self, processed_observation_shape, flags: DictConfig):
super(BaseNet, self).__init__()
self.observation_space = processed_observation_shape.original_space
self.H = self.observation_space["glyphs"].shape[0]
self.W = self.observation_space["glyphs"].shape[1]
self.k_dim = flags.embedding_dim
self.h_dim = flags.hidden_dim
self.crop_model = flags.crop_model
self.crop_dim = flags.crop_dim
self.num_features = NUM_FEATURES
self.crop = Crop(self.H, self.W, self.crop_dim, self.crop_dim)
self.glyph_type = flags.glyph_type
self.glyph_embedding = RLLibGlyphEmbedding(
flags.glyph_type,
flags.embedding_dim,
None,
flags.use_index_select,
)
K = flags.embedding_dim # number of input filters
F = 3 # filter dimensions
S = 1 # stride
P = 1 # padding
M = 16 # number of intermediate filters
self.Y = 8 # number of output filters
L = flags.layers # number of convnet layers
in_channels = [K] + [M] * (L - 1)
out_channels = [M] * (L - 1) + [self.Y]
def interleave(xs, ys):
return [val for pair in zip(xs, ys) for val in pair]
conv_extract = [
nn.Conv2d(
in_channels=in_channels[i],
out_channels=out_channels[i],
kernel_size=(F, F),
stride=S,
padding=P,
)
for i in range(L)
]
self.extract_representation = nn.Sequential(
*interleave(conv_extract, [nn.ELU()] * len(conv_extract))
)
if self.crop_model == "transformer":
self.extract_crop_representation = TransformerEncoder(
K,
N=L,
heads=8,
height=self.crop_dim,
width=self.crop_dim,
device=None,
)
elif self.crop_model == "cnn":
conv_extract_crop = [
nn.Conv2d(
in_channels=in_channels[i],
out_channels=out_channels[i],
kernel_size=(F, F),
stride=S,
padding=P,
)
for i in range(L)
]
self.extract_crop_representation = nn.Sequential(
*interleave(conv_extract_crop, [nn.ELU()] * len(conv_extract))
)
# MESSAGING MODEL
if "msg" not in flags:
self.msg_model = "none"
else:
self.msg_model = flags.msg.model
self.msg_hdim = flags.msg.hidden_dim
self.msg_edim = flags.msg.embedding_dim
if self.msg_model in ("gru", "lstm", "lt_cnn"):
# character-based embeddings
self.char_lt = nn.Embedding(
NUM_CHARS, self.msg_edim, padding_idx=PAD_CHAR
)
else:
# forward will set up one-hot inputs for the cnn, no lt needed
pass
if self.msg_model.endswith("cnn"):
# from Zhang et al, 2016
# Character-level Convolutional Networks for Text Classification
# https://arxiv.org/abs/1509.01626
if self.msg_model == "cnn":
# inputs will be one-hot vectors, as done in paper
self.conv1 = nn.Conv1d(NUM_CHARS, self.msg_hdim, kernel_size=7)
elif self.msg_model == "lt_cnn":
# replace one-hot inputs with learned embeddings
self.conv1 = nn.Conv1d(
self.msg_edim, self.msg_hdim, kernel_size=7
)
else:
raise NotImplementedError("msg.model == %s", flags.msg.model)
# remaining convolutions, relus, pools, and a small FC network
self.conv2_6_fc = nn.Sequential(
nn.ReLU(),
nn.MaxPool1d(kernel_size=3, stride=3),
# conv2
nn.Conv1d(self.msg_hdim, self.msg_hdim, kernel_size=7),
nn.ReLU(),
nn.MaxPool1d(kernel_size=3, stride=3),
# conv3
nn.Conv1d(self.msg_hdim, self.msg_hdim, kernel_size=3),
nn.ReLU(),
# conv4
nn.Conv1d(self.msg_hdim, self.msg_hdim, kernel_size=3),
nn.ReLU(),
# conv5
nn.Conv1d(self.msg_hdim, self.msg_hdim, kernel_size=3),
nn.ReLU(),
# conv6
nn.Conv1d(self.msg_hdim, self.msg_hdim, kernel_size=3),
nn.ReLU(),
nn.MaxPool1d(kernel_size=3, stride=3),
# fc receives -- [ B x h_dim x 5 ]
Flatten(),
nn.Linear(5 * self.msg_hdim, 2 * self.msg_hdim),
nn.ReLU(),
nn.Linear(2 * self.msg_hdim, self.msg_hdim),
) # final output -- [ B x h_dim x 5 ]
elif self.msg_model in ("gru", "lstm"):
def rnn(flag):
return nn.LSTM if flag == "lstm" else nn.GRU
self.char_rnn = rnn(self.msg_model)(
self.msg_edim,
self.msg_hdim // 2,
batch_first=True,
bidirectional=True,
)
elif self.msg_model != "none":
raise NotImplementedError("msg.model == %s", flags.msg.model)
self.embed_features = nn.Sequential(
nn.Linear(self.num_features, self.k_dim),
nn.ReLU(),
nn.Linear(self.k_dim, self.k_dim),
nn.ReLU(),
)
self.equalize_input_dim = flags.equalize_input_dim
if not self.equalize_input_dim:
# just added up the output dimensions of the input featurizers
# feature / status dim
out_dim = self.k_dim
# CNN over full glyph map
out_dim += self.H * self.W * self.Y
if self.crop_model == "transformer":
out_dim += self.crop_dim ** 2 * K
elif self.crop_model == "cnn":
out_dim += self.crop_dim ** 2 * self.Y
# messaging model
if self.msg_model != "none":
out_dim += self.msg_hdim
else:
# otherwise, project them all to h_dim
NUM_INPUTS = 4 if self.msg_model != "none" else 3
project_hdim = flags.equalize_factor * self.h_dim
out_dim = project_hdim * NUM_INPUTS
# set up linear layers for projections
self.project_feature_dim = nn.Linear(self.k_dim, project_hdim)
self.project_glyph_dim = nn.Linear(
self.H * self.W * self.Y, project_hdim
)
c__2 = self.crop_dim ** 2
if self.crop_model == "transformer":
self.project_crop_dim = nn.Linear(c__2 * K, project_hdim)
elif self.crop_model == "cnn":
self.project_crop_dim = nn.Linear(c__2 * self.Y, project_hdim)
if self.msg_model != "none":
self.project_msg_dim = nn.Linear(self.msg_hdim, project_hdim)
self.fc = nn.Sequential(
nn.Linear(out_dim, self.h_dim),
nn.ReLU(),
nn.Linear(self.h_dim, self.h_dim),
nn.ReLU(),
)
def prepare_input(self, inputs):
# -- [B x H x W]
B, H, W = inputs["glyphs"].shape
# take our chosen | |
)
projection_tensor = self.projection_layer(inputs=Z)
print_obj(
"\n" + func_name, "projection_tensor", projection_tensor
)
# Reshape projection into "image".
# shape = (
# cur_batch_size,
# projection_height,
# projection_width,
# projection_depth
# )
projection_tensor_reshaped = tf.reshape(
tensor=projection_tensor,
shape=[-1] + params["{}_projection_dims".format(self.kind)],
name="{}_projection_reshaped".format(self.name)
)
print_obj(
func_name,
"projection_tensor_reshaped",
projection_tensor_reshaped
)
# shape = (
# cur_batch_size,
# projection_height,
# projection_width,
# projection_depth
# )
projection_tensor_leaky = tf.nn.leaky_relu(
features=projection_tensor_reshaped,
alpha=params["{}_leaky_relu_alpha".format(self.kind)],
name="{}_projection_tensor_reshaped_leaky_relu".format(self.kind)
)
print_obj(
func_name, "projection_tensor_leaky", projection_tensor_leaky
)
# shape = (
# cur_batch_size,
# projection_height,
# projection_width,
# projection_depth
# )
pixel_norm_output = self.use_pixel_norm(
X=projection_tensor_leaky,
params=params,
epsilon=params["pixel_norm_epsilon"]
)
print_obj(func_name, "pixel_norm_output", pixel_norm_output)
return pixel_norm_output
def fused_conv2d_pixel_norm(self, input_image, conv2d_layer, params):
"""Fused `Conv2D` layer and pixel norm operation.
Args:
input_image: tensor, input image of rank 4.
conv2d_layer: `Conv2D` layer.
params: dict, user passed parameters.
Returns:
New image tensor of rank 4.
"""
func_name = "fused_conv2d_pixel_norm"
conv_output = conv2d_layer(inputs=input_image)
print_obj("\n" + func_name, "conv_output", conv_output)
conv_output_leaky = tf.nn.leaky_relu(
features=conv_output,
alpha=params["{}_leaky_relu_alpha".format(self.kind)],
name="{}_fused_conv2d_pixel_norm_leaky_relu".format(self.kind)
)
print_obj(func_name, "conv_output_leaky", conv_output_leaky)
pixel_norm_output = self.use_pixel_norm(
X=conv_output_leaky,
params=params,
epsilon=params["pixel_norm_epsilon"]
)
print_obj(func_name, "pixel_norm_output", pixel_norm_output)
return pixel_norm_output
def upsample_vec_to_img_image(self, image, orig_img_size, block_idx):
"""Upsamples vec_to_img image.
Args:
image: tensor, image created by vec_to_img conv block.
orig_img_size: list, the height and width dimensions of the
original image before any growth.
block_idx: int, index of the current vec_to_img growth block.
Returns:
Upsampled image tensor.
"""
func_name = "upsample_{}_image".format(self.kind)
# Upsample from s X s to 2s X 2s image.
upsampled_image = tf.image.resize(
images=image,
size=tf.convert_to_tensor(
value=orig_img_size,
dtype=tf.int32,
name="{}_upsample_{}_image_orig_img_size".format(
self.name, self.kind
)
) * 2 ** block_idx,
method="nearest",
name="{}_growth_upsampled_image_{}_{}x{}_{}x{}".format(
self.name,
block_idx,
orig_img_size[0] * 2 ** (block_idx - 1),
orig_img_size[1] * 2 ** (block_idx - 1),
orig_img_size[0] * 2 ** block_idx,
orig_img_size[1] * 2 ** block_idx
)
)
print_obj("\n" + func_name, "upsampled_image", upsampled_image)
return upsampled_image
def create_base_vec_to_img_network(self, Z, params):
"""Creates base vec_to_img network.
Args:
Z: tensor, latent vectors of shape [cur_batch_size, latent_size].
projection_layer: `Dense` layer for projection of noise into image.
to_rgb_conv_layers: list, toRGB 1x1 conv layers.
blocks: list, lists of block layers for each block.
params: dict, user passed parameters.
Returns:
Final network block conv tensor.
"""
func_name = "create_base_{}_network".format(self.kind)
print_obj("\n" + func_name, "Z", Z)
with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):
# Project latent noise vectors into image.
projection = self.use_vec_to_img_projection_layer(
Z=Z, params=params
)
print_obj(func_name, "projection", projection)
# Only need the first block and toRGB conv layer for base network.
block_layers = self.conv_layer_blocks[0]
to_rgb_conv_layer = self.to_rgb_conv_layers[0]
# Pass inputs through layer chain.
block_conv = projection
for i in range(0, len(block_layers)):
block_conv = self.fused_conv2d_pixel_norm(
input_image=block_conv,
conv2d_layer=block_layers[i],
params=params
)
print_obj(func_name, "block_conv_{}".format(i), block_conv)
# Convert convolution to RGB image.
to_rgb_conv = to_rgb_conv_layer(inputs=block_conv)
print_obj(func_name, "to_rgb_conv", to_rgb_conv)
return to_rgb_conv
def create_growth_transition_vec_to_img_network(
self, Z, orig_img_size, alpha_var, params, trans_idx):
"""Creates growth transition vec_to_img network.
Args:
Z: tensor, latent vectors of shape [cur_batch_size, latent_size].
orig_img_size: list, the height and width dimensions of the
original image before any growth.
alpha_var: variable, alpha for weighted sum of fade-in of layers.
params: dict, user passed parameters.
trans_idx: int, index of current growth transition.
Returns:
Weighted sum tensor of growing and shrinking network paths.
"""
func_name = "create_growth_transition_{}_network".format(self.kind)
print_obj("\nEntered {}".format(func_name), "trans_idx", trans_idx)
print_obj(func_name, "Z", Z)
with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):
# Project latent noise vectors into image.
projection = self.use_vec_to_img_projection_layer(
Z=Z, params=params
)
print_obj(func_name, "projection", projection)
# Permanent blocks.
permanent_blocks = self.conv_layer_blocks[0:trans_idx + 1]
# Base block doesn't need any upsampling so handle differently.
base_block_conv_layers = permanent_blocks[0]
# Pass inputs through layer chain.
block_conv = projection
for i in range(0, len(base_block_conv_layers)):
block_conv = self.fused_conv2d_pixel_norm(
input_image=block_conv,
conv2d_layer=base_block_conv_layers[i],
params=params
)
print_obj(
func_name,
"base_block_conv_{}_{}".format(trans_idx, i),
block_conv
)
# Growth blocks require first prev conv layer's image upsampled.
for i in range(1, len(permanent_blocks)):
# Upsample previous block's image.
block_conv = self.upsample_vec_to_img_image(
image=block_conv,
orig_img_size=orig_img_size,
block_idx=i
)
print_obj(
func_name,
"upsample_vec_to_img_image_block_conv_{}_{}".format(
trans_idx, i
),
block_conv
)
block_conv_layers = permanent_blocks[i]
for j in range(0, len(block_conv_layers)):
block_conv = self.fused_conv2d_pixel_norm(
input_image=block_conv,
conv2d_layer=block_conv_layers[j],
params=params
)
print_obj(
func_name,
"block_conv_{}_{}_{}".format(trans_idx, i, j),
block_conv
)
# Upsample most recent block conv image for both side chains.
upsampled_block_conv = self.upsample_vec_to_img_image(
image=block_conv,
orig_img_size=orig_img_size,
block_idx=len(permanent_blocks)
)
print_obj(
func_name,
"upsampled_block_conv_{}".format(trans_idx),
upsampled_block_conv
)
# Growing side chain.
growing_block_layers = self.conv_layer_blocks[trans_idx + 1]
growing_to_rgb_conv_layer = self.to_rgb_conv_layers[trans_idx + 1]
# Pass inputs through layer chain.
block_conv = upsampled_block_conv
for i in range(0, len(growing_block_layers)):
block_conv = self.fused_conv2d_pixel_norm(
input_image=block_conv,
conv2d_layer=growing_block_layers[i],
params=params
)
print_obj(
func_name,
"growing_block_conv_{}_{}".format(trans_idx, i),
block_conv
)
growing_to_rgb_conv = growing_to_rgb_conv_layer(inputs=block_conv)
print_obj(
func_name,
"growing_to_rgb_conv_{}".format(trans_idx),
growing_to_rgb_conv
)
# Shrinking side chain.
shrinking_to_rgb_conv_layer = self.to_rgb_conv_layers[trans_idx]
# Pass inputs through layer chain.
shrinking_to_rgb_conv = shrinking_to_rgb_conv_layer(
inputs=upsampled_block_conv
)
print_obj(
func_name,
"shrinking_to_rgb_conv_{}".format(trans_idx),
shrinking_to_rgb_conv
)
# Weighted sum.
weighted_sum = tf.add(
x=growing_to_rgb_conv * alpha_var,
y=shrinking_to_rgb_conv * (1.0 - alpha_var),
name="growth_transition_weighted_sum_{}".format(trans_idx)
)
print_obj(
func_name,
"weighted_sum_{}".format(trans_idx),
weighted_sum
)
return weighted_sum
def create_growth_stable_vec_to_img_network(
self, Z, orig_img_size, params, trans_idx):
"""Creates final vec_to_img network.
Args:
Z: tensor, latent vectors of shape [cur_batch_size, latent_size].
orig_img_size: list, the height and width dimensions of the
original image before any growth.
params: dict, user passed parameters.
trans_idx: int, index of current growth transition.
Returns:
Final network block conv tensor.
"""
func_name = "create_growth_stable_{}_network".format(self.kind)
print_obj("\n" + func_name, "Z", Z)
with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):
# Project latent noise vectors into image.
projection = self.use_vec_to_img_projection_layer(
Z=Z, params=params
)
print_obj(func_name, "projection", projection)
# Permanent blocks.
permanent_blocks = self.conv_layer_blocks[0:trans_idx + 2]
# Base block doesn't need any upsampling so handle differently.
base_block_conv_layers = permanent_blocks[0]
# Pass inputs through layer chain.
block_conv = projection
for i in range(len(base_block_conv_layers)):
block_conv = self.fused_conv2d_pixel_norm(
input_image=block_conv,
conv2d_layer=base_block_conv_layers[i],
params=params
)
print_obj(
func_name, "base_block_conv_{}".format(i), block_conv
)
# Growth blocks require first prev conv layer's image upsampled.
for i in range(1, len(permanent_blocks)):
# Upsample previous block's image.
block_conv = self.upsample_vec_to_img_image(
image=block_conv,
orig_img_size=orig_img_size,
block_idx=i
)
print_obj(
func_name,
"upsample_vec_to_img_image_block_conv_{}".format(i),
block_conv
)
# Get layers from ith permanent block.
block_conv_layers = permanent_blocks[i]
# Loop through `Conv2D` layers now of permanent block.
for j in range(len(block_conv_layers)):
block_conv = self.fused_conv2d_pixel_norm(
input_image=block_conv,
conv2d_layer=block_conv_layers[j],
params=params
)
print_obj(
func_name,
"block_conv_{}_{}".format(i, j),
block_conv
)
# Get transition index toRGB conv layer.
to_rgb_conv_layer = self.to_rgb_conv_layers[trans_idx + 1]
# Pass inputs through layer chain.
to_rgb_conv = to_rgb_conv_layer(inputs=block_conv)
print_obj(func_name, "to_rgb_conv", to_rgb_conv)
return to_rgb_conv
##########################################################################
##########################################################################
##########################################################################
def unknown_switch_case_vec_to_img_outputs(
self, Z, orig_img_size, alpha_var, params, growth_index):
"""Uses switch case to use the correct network to generate images.
Args:
Z: tensor, latent vectors of shape [cur_batch_size, latent_size].
orig_img_size: list, the height and width dimensions of the
original image before any growth.
alpha_var: variable, alpha for weighted sum of fade-in of layers.
params: dict, user passed parameters.
growth_index: tensor, current growth stage.
Returns:
Generated image output tensor.
"""
func_name = "unknown_switch_case_{}_outputs".format(self.kind)
# Switch to case based on number of steps for gen outputs.
generated_outputs = tf.switch_case(
branch_index=growth_index,
branch_fns=[
# 4x4
lambda: self.create_base_vec_to_img_network(
Z=Z, params=params
),
# 8x8
lambda: self.create_growth_transition_vec_to_img_network(
Z=Z,
orig_img_size=orig_img_size,
alpha_var=alpha_var,
params=params,
trans_idx=min(0, len(params["conv_num_filters"]) - 2)
),
# 8x8
lambda: self.create_growth_stable_vec_to_img_network(
Z=Z,
orig_img_size=orig_img_size,
params=params,
trans_idx=min(0, len(params["conv_num_filters"]) - 2)
),
# 16x16
lambda: self.create_growth_transition_vec_to_img_network(
Z=Z,
orig_img_size=orig_img_size,
alpha_var=alpha_var,
params=params,
trans_idx=min(1, len(params["conv_num_filters"]) - 2)
),
# 16x16
lambda: self.create_growth_stable_vec_to_img_network(
Z=Z,
orig_img_size=orig_img_size,
params=params,
trans_idx=min(1, len(params["conv_num_filters"]) - 2)
),
# 32x32
lambda: self.create_growth_transition_vec_to_img_network(
Z=Z,
orig_img_size=orig_img_size,
alpha_var=alpha_var,
params=params,
trans_idx=min(2, len(params["conv_num_filters"]) - 2)
),
# 32x32
lambda: self.create_growth_stable_vec_to_img_network(
Z=Z,
orig_img_size=orig_img_size,
params=params,
trans_idx=min(2, len(params["conv_num_filters"]) - 2)
),
# 64x64
lambda: self.create_growth_transition_vec_to_img_network(
Z=Z,
orig_img_size=orig_img_size,
alpha_var=alpha_var,
params=params,
trans_idx=min(3, len(params["conv_num_filters"]) - 2)
),
# 64x64
lambda: self.create_growth_stable_vec_to_img_network(
Z=Z,
orig_img_size=orig_img_size,
params=params,
trans_idx=min(3, len(params["conv_num_filters"]) - 2)
),
# 128x128
lambda: self.create_growth_transition_vec_to_img_network(
Z=Z,
orig_img_size=orig_img_size,
alpha_var=alpha_var,
params=params,
trans_idx=min(4, len(params["conv_num_filters"]) - 2)
),
# 128x128
lambda: self.create_growth_stable_vec_to_img_network(
Z=Z,
orig_img_size=orig_img_size,
params=params,
trans_idx=min(4, len(params["conv_num_filters"]) - 2)
),
# 256x256
lambda: self.create_growth_transition_vec_to_img_network(
Z=Z,
orig_img_size=orig_img_size,
alpha_var=alpha_var,
params=params,
trans_idx=min(5, len(params["conv_num_filters"]) - 2)
),
# 256x256
lambda: self.create_growth_stable_vec_to_img_network(
Z=Z,
orig_img_size=orig_img_size,
params=params,
trans_idx=min(5, len(params["conv_num_filters"]) - 2)
),
# 512x512
lambda: self.create_growth_transition_vec_to_img_network(
Z=Z,
orig_img_size=orig_img_size,
alpha_var=alpha_var,
params=params,
trans_idx=min(6, len(params["conv_num_filters"]) - 2)
),
# 512x512
lambda: self.create_growth_stable_vec_to_img_network(
Z=Z,
orig_img_size=orig_img_size,
params=params,
trans_idx=min(6, len(params["conv_num_filters"]) - 2)
),
# 1024x1024
lambda: self.create_growth_transition_vec_to_img_network(
Z=Z,
orig_img_size=orig_img_size,
alpha_var=alpha_var,
params=params,
trans_idx=min(7, len(params["conv_num_filters"]) - 2)
),
# 1024x1024
lambda: self.create_growth_stable_vec_to_img_network(
Z=Z,
orig_img_size=orig_img_size,
params=params,
trans_idx=min(7, len(params["conv_num_filters"]) - 2)
)
],
name="{}_switch_case_generated_outputs".format(self.name)
)
print_obj(func_name, "generated_outputs", generated_outputs)
return generated_outputs
def known_switch_case_vec_to_img_outputs(
self, Z, orig_img_size, alpha_var, params):
"""Uses switch case to use the correct network to generate images.
Args:
| |
= timeSeriesPosition
else:
logEvent("ERROR! Wavetools.py: Location vector for timeSeries must have three-components",level=0)
sys.exit(1)
# Mean water level
self.mwl = mwl
# Wave direction
self.waveDir = setDirVector(waveDir)
# Gravity
self.g = np.array(g)
# Derived variables
# Gravity magnitude
self.gAbs = sqrt(sum(g * g))
# Definition of gravity direction
self.vDir = setVertDir(g)
dirCheck(self.waveDir,self.vDir)
#Reading time series
if(arrayData):
tdata = seriesArray
else:
filetype = timeSeriesFile[-4:]
fid = open(timeSeriesFile,"r")
if (filetype !=".txt") and (filetype != ".csv"):
logEvent("ERROR! Wavetools.py: File %s must be given in .txt or .csv format" % (timeSeriesFile),level=0)
sys.exit(1)
elif (filetype == ".csv"):
tdata = np.loadtxt(fid,skiprows=skiprows,delimiter=",")
else:
tdata = np.loadtxt(fid,skiprows=skiprows)
fid.close()
#Checks for tseries file
# Only 2 columns: time & eta
ncols = len(tdata[0,:])
if ncols != 2:
logEvent("ERROR! Wavetools.py: Timeseries file (%s) must have only two columns [time, eta]" % (timeSeriesFile),level=0)
sys.exit(1)
time_temp = tdata[:,0]
self.dt = old_div((time_temp[-1]-time_temp[0]),(len(time_temp)-1))
# If necessary, perform interpolation
doInterp = False
for i in range(1,len(time_temp)):
dt_temp = time_temp[i]-time_temp[i-1]
#check if time is at first column
if time_temp[i]<=time_temp[i-1]:
logEvent("ERROR! WaveTools.py: Found not consistent time entry between %s and %s row in %s file. Time variable must be always at the first column of the file and increasing monotonically" %(i-1,i,timeSeriesFile) )
sys.exit(1)
#check if sampling rate is constant
if dt_temp!=self.dt:
doInterp = True
if(doInterp):
logEvent("INFO WaveTools.py: Not constant sampling rate found, proceeding to signal interpolation to a constant sampling rate",level=0)
self.time = np.linspace(time_temp[0],time_temp[-1],len(time_temp))
self.etaS = np.interp(self.time,time_temp,tdata[:,1])
else:
self.time = time_temp
self.etaS = tdata[:,1]
self.t0 = self.time[0]
# Remove mean level from raw data
self.etaS -= np.mean(self.etaS)
# Filter out first 2.5 % and last 2.5% to make the signal periodic
self.etaS *= costap(len(self.time),cutoff=cutoffTotal)
# clear tdata from memory
del tdata
# Calculate time lenght
self.tlength = (self.time[-1]-self.time[0])
# Matrix initialisation
self.windows_handover = []
self.windows_rec = []
self.Twindow = 10.
# Direct decomposition of the time series for using at reconstruct_direct
if (self.rec_direct):
Nf = self.N
nfft=len(self.time)
logEvent("INFO: WaveTools.py: performing a direct series decomposition")
decomp = decompose_tseries(self.time,self.etaS,self.dt)
self.ai = decomp[1]
ipeak = np.where(self.ai == max(self.ai))[0][0]
imax = min(ipeak + old_div(Nf,2),len(self.ai))
imin = max(0,ipeak - old_div(Nf,2))
self.ai = self.ai[imin:imax]
self.omega = decomp[0][imin:imax]
self.phi = - decomp[2][imin:imax]
self.ki = dispersion(self.omega,self.depth,g=self.gAbs)
self.Nf = imax - imin
self.setup = decomp[3]
self.kDir = np.zeros((len(self.ki),3),"d")
for ii in range(len(self.ki)):
self.kDir[ii,:] = self.ki[ii]*self.waveDir[:]
for ij in range(self.Nf):
self.omega_c[ij] = self.omega[ij]
self.ki_c[ij] =self.ki[ij]
self.tanh_c[ij] = np.tanh(self.ki[ij]*self.depth)
self.ai_c[ij] = self.ai[ij]
self.phi_c[ij] = self.phi[ij]
for kk in range(3):
self.kDir_c[3*ij+kk] = self.kDir[ij,kk]
self.kDir_ = self.kDir_c
self.omega_ = self.omega_c
self.ki_ =self.ki_c
self.ai_ = self.ai_c
self.tanh_ = self.tanh_c
self.phi_ = self.phi_c
# Spectral windowing
else:
if (window_params is None):
logEvent("ERROR! WaveTools.py: Set parameters for spectral windowing. Argument window_params must be a dictionary")
sys.exit(1)
try:
self.Nwaves = window_params["Nwaves"]
except:
logEvent("ERROR! WaveTools.py: Dictionary key 'Nwaves' (waves per window) not found in window_params dictionary")
sys.exit(1)
try:
self.Tm = window_params["Tm"]
except:
logEvent("ERROR! WaveTools.py: Dictionary key 'Tm' (mean or characteristic wave period) not found in window_params dictionary")
sys.exit(1)
try:
windowName = window_params["Window"]
except:
logEvent("ERROR! WaveTools.py: Dictionary key 'Window' (window function type) not found in window_params dictionary")
sys.exit(1)
if(self.Nwaves > 0.5*self.tlength / self.Tm):
logEvent("ERROR! WaveTools.py: Reconstruction is expected to have two windows or more. Plese reduce the number of waves per window or switch to direct decomposition )")
sys.exit(1)
validWindows = [costap, tophat]
wind_filt = loadExistingFunction(windowName, validWindows)
logEvent("INFO WaveTools.py: performing series decomposition with spectral windows")
# Portion of overlap, compared to window time
try:
self.overlap = window_params["Overlap"]
except:
self.overlap = 0.7
logEvent("INFO WaveTools.py: Overlap entry in window_params dictionary not found. Setting default value of 0.7 (70% of the window length)")
try:
self.cutoff = window_params["Cutoff"]
except:
self.cutoff= 0.1
logEvent("INFO WaveTools.py: Cutoff entry in window_params dictionary not found. Setting default value of 0.1 (1/10 of the window length)")
# Portion of window filtered with the Costap filter
# Setting the handover time, either at the middle of the overlap or just after the filter
self.handover = max(1.1 *self.cutoff, old_div(self.overlap, 2.))
if (self.handover > 0.9 * self.overlap):
logEvent("ERROR! Wavetools.py: Window handover is not optimal as the cutoff is too close to the overlap. Decrease cutoff or increase overlap")
sys.exit(1)
self.Twindow = self.Tm * self.Nwaves # setting the window duration (approx.). Twindow = Tmean * Nwaves
self.Toverlap = self.overlap * self.Twindow
self.Nwindows = int( old_div((self.tlength - self.Twindow ), (self.Twindow - self.Toverlap)) ) + 1 #Getting the actual number of windows (N-1) * (Twindow - Toverlap) + Twindow = total time
self.Twindow = old_div(self.tlength,(1. + (1. - self.overlap)*(self.Nwindows-1))) # Correct Twindow and Toverlap for duration and integer number of windows
self.Toverlap = self.overlap*self.Twindow
logEvent("INFO: Wavetools.py: Correcting window duration for matching the exact time range of the series. Window duration correspond to %s waves approx." %(old_div(self.Twindow, self.Tm)) )
diff = (self.Nwindows-1.)*(self.Twindow -self.Toverlap)+self.Twindow - self.tlength
logEvent("INFO: Wavetools.py: Checking duration of windowed time series: %s per cent difference from original duration" %(100*diff) )
logEvent("INFO: Wavetools.py: Using %s windows for reconstruction with %s sec duration and %s per cent overlap" %(self.Nwindows, self.Twindow,100*self.overlap ))
# Setting where each window starts and ends
for jj in range(self.Nwindows):
span = np.zeros(2,"d")
tfirst = self.time[0] + self.Twindow
tlast = self.time[-1] - self.Twindow
if jj == 0:
ispan1 = 0
ispan2 = np.where(self.time> tfirst)[0][0]
elif jj == self.Nwindows-1:
ispan1 = np.where(self.time > tlast)[0][0]
ispan2 = len(self.time)-1
else:
tstart = self.time[ispan2] - self.Toverlap
ispan1 = np.where(self.time > tstart)[0][0]
ispan2 = np.where(self.time > tstart + self.Twindow )[0][0]
span[0] = ispan1
span[1] = ispan2
# Storing time series in windows and handover times
self.windows_handover.append( self.time[ispan2] - self.handover*self.Twindow )
self.windows_rec.append(np.array(list(zip(self.time[ispan1:ispan2],self.etaS[ispan1:ispan2]))))
# Decomposing windows to frequency domain
self.decompose_window = []
# style = "k-"
# ii = 0
for wind in self.windows_rec:
nfft=len(wind[:,0])
wind[:,1] *=wind_filt(nfft,cutoff = self.cutoff)
decomp = decompose_tseries(wind[:,0],wind[:,1],self.dt)
self.N = min(self.N, len(decomp[0]))
Nftemp = self.N
ipeak = np.where(decomp[1] == max(decomp[1]))[0][0]
imax = min(ipeak + old_div(Nftemp,2),len(decomp[1]))
imin = max(0,ipeak - old_div(Nftemp,2))
self.Nf = imax-imin
if (self.Nf < self.N):
if imin == 0:
imax = imax + (self.N - self.Nf)
else:
imin = imin - (self.N - self.Nf)
self.Nf = self.N
decomp[1] = decomp[1][imin:imax]
decomp[0] = decomp[0][imin:imax]
decomp[2] = -decomp[2][imin:imax]
ki = dispersion(decomp[0],self.depth,g=self.gAbs)
kDir = np.zeros((len(ki),3),"d")
Tlag = np.zeros(ki.shape,)
for ii in range(len(ki)):
kDir[ii,:] = ki[ii]*self.waveDir[:]
Tlag[ii] = old_div(sum(Lgen[:]*kDir[ii,:]),decomp[0][ii])
self.Tlag = max(Tlag)
if self.Tlag > (old_div(self.Toverlap,2.) - self.cutoff*self.Twindow):
logEvent("ERROR!: WaveTools.py: Relaxation zone lenght does not allow for spatial coherency in the windows method.Please a) increase number of waves per window or b) increase overlap or c) decrease lenght of the relaxation zone")
sys.exit(1)
decomp.append(kDir)
decomp.append(ki)
self.decompose_window.append(decomp)
#c++ declarations
for ii in range(len(self.windows_handover)):
self.whand_c[ii] = self.windows_handover[ii]
self.T0[ii] = self.windows_rec[ii][0,0]
self.whand_ = self.whand_c
self.T0_ = self.T0
for ii in range(self.Nwindows):
for jj in range(self.N):
ij = ii*self.N + jj
if(jj <len(self.decompose_window[ii][0])):
self.omega_c[ij] = self.decompose_window[ii][0][jj]
self.ki_c[ij] = self.decompose_window[ii][5][jj]
self.tanh_c[ij] = np.tanh(self.ki_c[ij]*self.depth)
self.ai_c[ij] = self.decompose_window[ii][1][jj]
self.phi_c[ij] =self.decompose_window[ii][2][jj]
for kk in range(3):
self.kDir_c[3*ij+kk] = self.decompose_window[ii][4][jj,kk]
else:
self.omega_c[ij] =1.
self.ki_c[ij] = 1.
self.tanh_c[ij] = 1.
self.ai_c[ij] = 0.
self.phi_c[ij] =0.
for kk in range(3):
self.kDir_c[3*ij+kk] = 1.
self.kDir_ = self.kDir_c
self.omega_ = self.omega_c
self.ki_ =self.ki_c
self.ai_ = self.ai_c
self.tanh_ = self.tanh_c
self.phi_ = self.phi_c
self.Nall = self.Nf*self.Nwindows
for ii in range(3):
self.x0_c[ii] = self.x0[ii]
self.waveDir_c[ii] = self.waveDir[ii]
self.vDir_c[ii] = self.vDir[ii]
self.x0_ = self.x0_c
self.waveDir_ = self.waveDir_c
self.vDir_ = self.vDir_c
if(self.rec_direct):
self.eta = self.etaDirect
self.u = self.uDirect
self._cpp_eta = self._cpp_etaDirect
self._cpp_u = self._cpp_uDirect
else:
self.eta = self.etaWindow
self.u = self.uWindow
self._cpp_eta = self._cpp_etaWindow
self._cpp_u = self._cpp_uWindow
def windOut(self):
return {"TWindow":self.Twindow,"TOverlap":self.Toverlap,"Tlag":self.Tlag, "rec_direct":self.rec_direct}
def _cpp_etaDirect(self,x,t):
return __cpp_etaDirect(x,self.x0_,t,self.kDir_,self.omega_,self.phi_,self.ai_,self.Nf, self.fast)
def _cpp_uDirect(self,U,x,t):
__cpp_uDirect(U,x,self.x0_,t,self.kDir_,self.ki_,self.omega_,self.phi_,self.ai_,self.mwl,self.depth,self.Nf,self.waveDir_, self.vDir_, self.tanh_, self.gAbs, self.fast)
def etaDirect(self, x, t):
"""Calculates free surface elevation(Timeseries class-direct method
Parameters
----------
x : numpy.ndarray
Position vector
t : float
Time variable
Returns
--------
float
Free-surface elevation as a float
"""
cython.declare(xx=cython.double[3])
xx[0] = x[0]
xx[1] = x[1]
xx[2] = x[2]
return self._cpp_etaDirect(xx,t)
def uDirect(self, x, t):
"""Calculates wave velocity vector (Timeseries class-direct method)
Parameters
----------
x : numpy.ndarray
Position vector
t : float
Time variable
Returns
--------
numpy.ndarray
Velocity vector as 1D array
"""
cython.declare(xx=cython.double[3])
cython.declare(cppU=cython.double[3])
for ii in range(3):
| |
b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b +
let a = b in a + b + | |
<reponame>ntaagarwal/indoorInverse
[]# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import random
import PIL
import torch
import torchvision
from torchvision.transforms import functional as F
from torchvision.transforms.transforms import Lambda
from torch.nn.functional import interpolate, pad
# from torchvision.transforms import RandomCrop
from torch import Tensor
from typing import Tuple, List, Optional
import numpy as np
from utils.utils_misc import checkEqual1
import numbers
def _get_image_size(img):
if F._is_pil_image(img):
return img.size
elif isinstance(img, torch.Tensor) and img.dim() > 2:
return img.shape[-2:][::-1]
else:
raise TypeError("Unexpected type {}".format(type(img)))
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target=None):
if target is not None:
for t in self.transforms:
image, target = t(image, target)
return image, target
else:
for t in self.transforms:
image = t(image)
return image
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
class Resize(object):
def __init__(self, min_size, max_size, interpolation='BILINEAR'):
# interpolation: 'BILINEAR', 'NEAREST', ...
if not isinstance(min_size, (list, tuple)):
min_size = (min_size,)
self.min_size = min_size
self.max_size = max_size
assert interpolation in ['BILINEAR', 'NEAREST'], 'interpolation option not supported!'
if interpolation=='BILINEAR':
self.PIL_interpolation = PIL.Image.BILINEAR # https://pytorch.org/docs/stable/torchvision/transforms.html#torchvision.transforms.Resize
self.F_interpolation = 'bilinear'
elif interpolation=='NEAREST':
self.PIL_interpolation = PIL.Image.NEAREST
self.F_interpolation = 'nearest'
# modified from torchvision to add support for max size
def get_size(self, image_size):
w, h = image_size
size = random.choice(self.min_size)
max_size = self.max_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (oh, ow)
def __call__(self, images, target=None):
if isinstance(images, list):
size = self.get_size(images[0].size)
image_size = images[0].size
assert checkEqual1([X.size for X in images]), 'sizes of an image list should all equal!'
images = [F.resize(X, size, interpolation=self.PIL_interpolation) for X in images]
else:
size = self.get_size(images.size)
image_size = images.size
images = F.resize(images, size, interpolation=self.PIL_interpolation)
if target is None:
return images
# target = target.resize((image.size[0], image.size[1], -1))
target.unsqueeze_(0)
target = interpolate(target, size=(image_size[1], image_size[0]), mode=self.F_interpolation) # https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.interpolate
target.squeeze_(0)
return image, target
# class RandomCrop(object):
# def __init__(self, min_size, max_size):
# if not isinstance(min_size, (list, tuple)):
# min_size = (min_size,)
# self.min_size = min_size
# self.max_size = max_size
# # modified from torchvision to add support for max size
# def get_size(self, image_size):
# w, h = image_size
# size = random.choice(self.min_size)
# max_size = self.max_size
# if max_size is not None:
# min_original_size = float(min((w, h)))
# max_original_size = float(max((w, h)))
# if max_original_size / min_original_size * size > max_size:
# size = int(round(max_size * min_original_size / max_original_size))
# if (w <= h and w == size) or (h <= w and h == size):
# return (h, w)
# if w < h:
# ow = size
# oh = int(size * h / w)
# else:
# oh = size
# ow = int(size * w / h)
# return (oh, ow)
# @staticmethod
# def get_params(img: Tensor, output_size: Tuple[int, int]) -> Tuple[int, int, int, int]:
# """Get parameters for ``crop`` for a random crop.
# Taken from https://pytorch.org/docs/stable/_modules/torchvision/transforms/transforms.html#RandomCrop
# Args:
# img (PIL Image or Tensor): Image to be cropped.
# output_size (tuple): Expected output size of the crop.
# Returns:
# tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
# """
# w, h = _get_image_size(img)
# th, tw = output_size
# if w == tw and h == th:
# return 0, 0, h, w
# i = torch.randint(0, h - th + 1, size=(1, )).item()
# j = torch.randint(0, w - tw + 1, size=(1, )).item()
# return i, j, th, tw
# def __call__(self, image, target=None):
# crop_size = self.get_size(image.size) # (h, w)
# assert image.size[0] > crop_size[1] # im_W > crop_W
# assert image.size[1] > crop_size[0] # im_H > crop_H
# i, j, h, w = self.get_params(image, crop_size)
# # image = F.resize(image, size, interpolation=self.PIL_interpolation)
# image = F.crop(image, i, j, h, w)
# if target is None:
# return image
# # target = target.resize((image.size[0], image.size[1], -1))
# # target.unsqueeze_(0)
# # print('--0', target.shape)
# # target = F.crop(image, i, j, h, w) # ONLY FOR PYTORCH>=1.6.0
# # print('--1', target.shape)
# # target.squeeze_(0)
# return image, target[..., i:i+h, j:j+w]
class RandomCrop(object):
def __init__(self, H_cropto, W_cropto):
# if not isinstance(min_size, (list, tuple)):
# min_size = (min_size,)
self.H_cropto = H_cropto
self.W_cropto = W_cropto
# # modified from torchvision to add support for max size
# def get_size(self, image_size):
# w, h = image_size
# size = random.choice(self.min_size)
# max_size = self.max_size
# if max_size is not None:
# min_original_size = float(min((w, h)))
# max_original_size = float(max((w, h)))
# if max_original_size / min_original_size * size > max_size:
# size = int(round(max_size * min_original_size / max_original_size))
# if (w <= h and w == size) or (h <= w and h == size):
# return (h, w)
# if w < h:
# ow = size
# oh = int(size * h / w)
# else:
# oh = size
# ow = int(size * w / h)
# return (oh, ow)
@staticmethod
def get_params(img: Tensor, output_size: Tuple[int, int]) -> Tuple[int, int, int, int]:
"""Get parameters for ``crop`` for a random crop.
Taken from https://pytorch.org/docs/stable/_modules/torchvision/transforms/transforms.html#RandomCrop
Args:
img (PIL Image or Tensor): Image to be cropped.
output_size (tuple): Expected output size of the crop.
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
"""
w, h = _get_image_size(img)
th, tw = output_size
if w == tw and h == th:
return 0, 0, h, w
i = torch.randint(0, h - th + 1, size=(1, )).item()
j = torch.randint(0, w - tw + 1, size=(1, )).item()
return i, j, th, tw
def __call__(self, images, target=None):
# crop_size = self.get_size(image.size) # (h, w)
crop_size = (self.H_cropto, self.W_cropto)
if isinstance(images, list):
image_size = images[0].size
assert checkEqual1([X.size for X in images]), 'sizes of an image list should all equal!'
sample_image = images[0]
else:
image_size = images.size
sample_image = images
assert image_size[0] >= crop_size[1] # im_W > crop_W
assert image_size[1] >= crop_size[0] # im_H > crop_H
i, j, h, w = self.get_params(sample_image, crop_size)
if isinstance(images, list):
images = [F.crop(X, i, j, h, w) for X in images]
else:
images = F.crop(images, i, j, h, w)
if target is None:
return images
# target = target.resize((image.size[0], image.size[1], -1))
# target.unsqueeze_(0)
# print('--0', target.shape)
# target = F.crop(image, i, j, h, w) # ONLY FOR PYTORCH>=1.6.0
# print('--1', target.shape)
# target.squeeze_(0)
return images, target[..., i:i+h, j:j+w]
class CenterCrop(object):
def __init__(self, min_size, max_size):
if not isinstance(min_size, (list, tuple)):
min_size = (min_size,)
self.min_size = min_size
self.max_size = max_size
# modified from torchvision to add support for max size
def get_size(self, image_size):
w, h = image_size
size = random.choice(self.min_size)
max_size = self.max_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (oh, ow)
def __call__(self, images, target=None):
if isinstance(images, list):
image_size = images[0].size
assert checkEqual1([X.size for X in images]), 'sizes of an image list should all equal!'
sample_image = images[0]
else:
image_size = images.size
sample_image = images
crop_size = self.get_size(sample_image.size) # (h, w)
assert image_size[0] > crop_size[1] # im_W > crop_W
assert image_size[1] > crop_size[0] # im_H > crop_H
# image = F.resize(image, size, interpolation=self.PIL_interpolation)
if isinstance(images, list):
images = [F.center_crop(X, crop_size) for X in images]
else:
images = F.center_crop(images, crop_size)
if target is None:
return images
image_height = sample_image.size[1]
image_width = sample_image.size[0]
crop_top = int(round((image_height - crop_size[0]) / 2.))
crop_left = int(round((image_width - crop_size[1]) / 2.))
return images, target[..., crop_top:crop_top+crop_size[0], crop_left:crop_left+crop_size[1]]
class Pad(object):
def __init__(self, H_padto, W_padto):
# if not isinstance(min_size, (list, tuple)):
# min_size = (min_size,)
self.H_padto = H_padto
self.W_padto = W_padto
def __call__(self, images, target=None):
if | |
<filename>http_auth/passive_datapool.py
# Data pool with passive reclaim
# A part of the httpauth-lib derived from DOORMEN controller.
# (c) 2018 National Institute of Advanced Industrial Science and Technology.
import weakref
import threading
import collections
import types
import sys
wr = weakref.ref
wr_None = lambda: None # pseudo weak reference to None or an expired object
c_true = lambda x: True
__all__ = ('DataPool', 'PooledDataMixin', 'PooledDataBase')
IN_POOL = [True]
DEAD = [False]
DEBUG = False
class _Handle:
__slots__ = ('watch_target', 'datapool', 'refback', 'finalizer')
def __init__(self, watch_target, datapool, refback, finalizer):
self.watch_target = watch_target
self.datapool = datapool
self.refback = refback
self.finalizer = finalizer
class _Rref(wr):
__slots__ = ('o', '__weakref__')
self_wr = None
def __new__(klass, ref, o):
def _cb(arg):
self = self_wr()
if self and self.o:
o = self.o
self.o = None
DataPool._reclaim(o)
self = weakref.ref.__new__(klass, ref, _cb)
self_wr = weakref.ref(self)
return self
def __init__(self, ref, o):
super().__init__(ref)
self.o = o
def __del__(self):
if self.o:
o = self.o
self.o = None
DataPool._reclaim_dead(o)
def detach(self):
# intentionally coincide name with weakref.finalize
self.o = None
_identity = lambda x: x
_ignore = lambda x: None
class DataPool:
"""A data pool which will reclaim unused data in a passive manner.
One of more data can be put to pool, and leased out one-by-one
using `get()`.
Each leased data is internally associated to a referent object
`ref`, provided as an argument to `get`. If the referent is
discarded from memory, the data will be automatically recraimed
and returned to the pool. The data can also be returned to pool
by calling `DataPool.return_to_pool(data)`.
The referent object `ref` must be alive during the data is used.
Other methods in this class must not be called on the data when
the corresponding referent is lost. It is OK to lose both ref and
data at the same time, however.
This class is useful when some resource is used with callbacks for
some another library, and the usual `with` or `try-finally`
pattern cannot be used to reclaim a resource in case of abnormal
exits.
Methods regarding managed data are static methods: these are
called like `DataPool.method(data, ...)`.
The leased data MUST NOT have a permanent strong reference to the
referent: circular dependency will eliminate possibility of
returning object to pool, and cause memory leaks (such garbage
cannot be collected by cycle-detecting GC.) Having a weak
reference is fine, or DataPool.get_referent(data) will serve as a
replacement.
Alternative approach (with a side effect) can be enabled with the
`gc_recovery` hook parameter. See more description in the bottom
of the source code for more details and possible workarounds.
The pool is thread safe, and the leased data is also safe with
background GC behavior. However, methods working on a leased data
must not be called concurrently on a single data, otherwise the
behavior is undefined.
"""
def __init__(self, *, factory=None, type=None, gc_recovery=None):
"""Create a data pool.
Optional Parameters:
`factory`: generator function for new data, used when the
pool cannot serve request by existing data.
`type`: limit pooled data to subtype of the given type.
`gc_recovery`: hook for rescue from cyclic data condition.
See documentation in the bottom of the source code for
details.
"""
self._queue = collections.deque()
self._lock = threading.Lock()
self._factory = factory
self._type = type
self._gc_recovery = (_identity if gc_recovery == True else
_ignore if gc_recovery == False else
gc_recovery)
self._reclaimed = []
# Public APIs
def get(self, ref, pred=c_true, factory=None):
"""Retrieve one of stored data from pool.
Parameters:
`ref` (mandatory): the referent object to be tracked. It
must be weak-referencible. If `ref` object vanishes,
the retrieved data will automatically be returned to
this pool.
`pred` (optional): predicate function to choose data to be
retrieved.
`factory` (optional): function returning new data when
existing data is not available. If it is not
supplied for both `get()` and `DataPool()`, `get()`
will return None. See `put()` for requirements on
the data returned from factory.
"""
factory = factory or self._factory
if len(self._reclaimed) > 0:
while True:
# intentionally racing with producer _reclaim_dead():
# Both list.append and list.pop are atomic.
# Use of self._lock will cause deadlock inside GC.
l = []
try:
l.append(self._reclaimed.pop())
except IndexError:
break
for i in reversed(l):
self._append_to_queue(i)
with self._lock:
d = None
n = len(self._queue)
for _ in range(n):
d = self._queue.popleft()
if pred(d):
break
self._queue.append(d)
else:
d = None
if factory and not d:
d = factory()
if d:
self._setup_lease(d, ref)
if DEBUG:
print("data {} leased from pool {}".format(d, self))
return d
def put(self, data):
"""Store a new data to pool.
If stored data have slots restriction, it must either inherit
from `PooledDataBase` or have slots shown in
`DataPool.required_slot_names`.
"""
if (self._type and not isinstance(data, self._type)):
raise ValueError("Datapool accepts only {!s} but put {!s}".
format(self._type, type(data)))
self._check_cleanness(data)
self._append_to_queue(data)
if DEBUG:
print("data {} put to pool {}".format(d, self))
def put_and_use(self, data, ref):
"""Register data to be used with the given pool.
The data is already `leased out`: it can be used in the
current context.
It is mostly equivalent to put-get pair, but `put_and_use`
ensures that the same data is always returned.
The data has the same restriction as put.
"""
if (self._type and not isinstance(data, self._type)):
raise ValueError("Datapool accepts only {} but put {}".
format(self._type, type(data)))
self._check_cleanness(data)
self._setup_lease(data, ref)
if DEBUG:
print("data {} put_and_use for pool {}".format(data, self))
return data
@staticmethod
def replace_data(old, new):
"""Replace the data `old`, associated to some DataPool, with
the `new` data.
The old data must be retrieved from some pool. The data `new`
will be returned to pool instead of `old` in future.
It is almost equivalent to `remove_from_pool(old)` followed by
`put_and_use(new, ...)`, but inheriting associated pool and
referent object from `old`.
"""
handle, ref, pool = DataPool._check_alive_leased_data(old)
# inhibit finalizer
assert(ref)
assert(handle.refback[0] is old)
# holding ref alive is important!
# BEGIN CRITICAL SECTION regarding to finalizer
old.__handle = DEAD
handle.refback[0] = new
new.__handle = handle
# END CRITICAL SECTION
if not ref or not old:
raise AssertionError(str(ref)+str(old))
# for doubly sure ref is not optimized out in any way in future
return new
@staticmethod
def update_referent(d, ref):
"""Update referent object of d to ref.
Both old and new referents must be alive at this moment.
"""
handle, old_ref, pool = DataPool._check_alive_leased_data(d)
# inhibit old finalizer
assert(old_ref)
assert(ref)
assert(handle.watch_target() == old_ref)
handle.finalizer.detach()
pool._setup_lease(d, ref, forced=True)
DataPool._clear_handle_content(handle, finalizer_detach=False)
@staticmethod
def return_to_pool(d):
"""Return the data `d` immediately to the associated DataPool.
"""
handle, ref, pool = DataPool._check_alive_leased_data(d)
# inhibit finalizer
if DEBUG:
print("data {} returned to pool {}".format(d, pool))
DataPool._clear_handle_content(handle)
d.__handle = IN_POOL
if pool:
pool._append_to_queue(d)
finished = return_to_pool
@staticmethod
def remove_from_pool(d):
"""Declare `d` should not be returned to pool.
the data d must not be returned to pool already."""
handle, ref, pool = DataPool._check_alive_leased_data(d)
# inhibit finalizer
DataPool._clear_handle_content(handle)
d.__handle = DEAD
kill = remove_from_pool
@staticmethod
def get_referent(d):
"""Get a (strong) referece to the referent object
currently associated to the argument.
"""
handle, ref, pool = DataPool._check_alive_leased_data(d)
return ref
# debug method
def __len__(self):
return len(self._queue) + len(self._reclaimed)
def __bool__(self):
return True
def _dump(self):
l = [*iter(self._queue), *iter(self._reclaimed)]
return("DataPool({!r})".format(l))
def _debug_peeklist(self):
l = [*iter(self._queue), *iter(self._reclaimed)]
return l
# internal methods
@staticmethod
def _reclaim(refback):
# called either from finalizer or as weakref callback
if sys.is_finalizing():
# meaningless to return objects to pools upon exit
return
d = refback[0]
if not d: return
handle = d.__handle
if type(handle) is not _Handle:
return
assert(d.__handle.watch_target() == None)
pool = handle.datapool()
DataPool._clear_handle_content(handle, finalizer_detach=False)
d.__handle = IN_POOL
if pool:
pool._append_to_queue(d)
@staticmethod
def _reclaim_dead(refback):
# be careful: data is dead and we're in GC!
if sys.is_finalizing():
return
d = refback[0]
if not d: return
handle = d.__handle
if type(handle) is not _Handle: return
#assert(d.__handle.watch_target() == None)
# d is dead: if watch_target is live, it have lost a reference to d.
# It means that d is safe to be returned to pool.
pool = handle.datapool()
DataPool._clear_handle_content(handle, finalizer_detach=False)
d.__handle = IN_POOL
if not pool:
return
if pool._gc_recovery:
new_d = pool._gc_recovery(d)
if new_d:
pool._check_cleanness(new_d, in_pool_ok=True)
| |
# -*- coding: utf-8 -*-
"""
"""
import logging
import pkg_resources
import time
from mingus.containers import Note, NoteContainer, Bar, Track
from mingus.midi.fluidsynth import FluidSynthSequencer
from mingus.midi import pyfluidsynth as globalfs
from typing import Union
from pathlib import Path
from .keyboard import PianoKeyboard, PianoKey
from .utils import (
note_to_string,
note_container_to_note_string_list,
bar_to_note_string_list,
track_to_note_string_list,
)
DEFAULT_SOUND_FONTS = Path(pkg_resources.resource_filename("pypiano", "/sound_fonts/FluidR3_GM.sf2"))
# Valid audio driver are taken from docstring of mingus.midi.fluidsynth.FluidSynthSequencer.start_audio_output() method
# https://github.com/bspaans/python-mingus/blob/f131620eb7353bcfbf1303b24b951a95cad2ac20/mingus/midi/fluidsynth.py#L57
VALID_AUDIO_DRIVERS = (
None,
"alsa",
"oss",
"jack",
"portaudio",
"sndmgr",
"coreaudio",
"Direct Sound",
"dsound",
"pulseaudio",
)
# See a list of General Midi instruments here https://en.wikipedia.org/wiki/General_MIDI. Pianos are in section one
DEFAULT_INSTRUMENTS = {
"Acoustic Grand Piano": 0,
"Bright Acoustic Piano": 1,
"Electric Grand Piano": 2,
"Honky-tonk Piano": 3,
"Electric Piano 1": 4,
"Electric Piano 2": 5,
"Harpsichord": 6,
"Clavi": 7,
}
# Initialize module logger
logger = logging.getLogger("pypiano")
logger.addHandler(logging.NullHandler())
class Piano(object):
"""Class representing a Piano with 88 keys based on mingus
Class to programmatically play piano via audio output or record music to a wav file. Abstraction layer on top of
mingus.midi.fluidsynth.FluidSynthSequencer.
Attributes
sound_fonts_path: Optional string or Path object pointing to a *.sf2 files. PyPiano ships sound fonts by default
audio_driver: Optional argument specifying audio driver to use. Following audio drivers could be used:
(None, "alsa", "oss", "jack", "portaudio", "sndmgr", "coreaudio","Direct Sound", "dsound", "pulseaudio").
Not all drivers will be available for every platform
instrument: Optional argument to set the instrument that should be used. If default sound fonts are used you can
choose one of the following pianos sounds:
("Acoustic Grand Piano", "Bright Acoustic Piano", "Electric Grand Piano", "Honky-tonk Piano",
"Electric Piano 1", "Electric Piano 2", "Harpsichord", "Clavi"). If different sound fonts are provided
you should also pass an integer with the instrument number
"""
def __init__(
self,
sound_fonts_path: Union[str, Path] = DEFAULT_SOUND_FONTS,
audio_driver: Union[str, None] = None,
instrument: Union[str, int] = "Acoustic Grand Piano",
) -> None:
self.__fluid_synth_sequencer = FluidSynthSequencer()
self._sound_fonts_path = Path(sound_fonts_path)
# Set variable to track if sound fonts are loaded
self._sound_fonts_loaded = False
self.load_sound_fonts(self._sound_fonts_path)
# Audio output is lazily loaded when self.play method is called the first time without recording
self._current_audio_driver = audio_driver
# Set a variable to track if audio output is currently active
self._audio_driver_is_active = False
# Set instrument
self.instrument = instrument
self.load_instrument(self.instrument)
# Initialize a piano keyboard
self.keyboard = PianoKeyboard()
def load_sound_fonts(self, sound_fonts_path: Union[str, Path]) -> None:
"""Load sound fonts from a given path"""
logger.debug("Attempting to load sound fonts from {file}".format(file=sound_fonts_path))
if self._sound_fonts_loaded:
self._unload_sound_fonts()
if not self.__fluid_synth_sequencer.load_sound_font(str(sound_fonts_path)):
raise Exception("Could not load sound fonts from {file}".format(file=sound_fonts_path))
self._sound_fonts_loaded = True
self._sound_fonts_path = Path(sound_fonts_path)
logger.debug("Successfully initialized sound fonts from {file_path}".format(file_path=sound_fonts_path))
def _unload_sound_fonts(self) -> None:
"""Unload a given sound font file
Safely unload current sound font file. Method controls if a sound font file is already loaded via
self._sound_fonts_loaded.
"""
logger.debug("Unloading current active sound fonts from file: {0}".format(self._sound_fonts_path))
if self._sound_fonts_loaded:
self.__fluid_synth_sequencer.fs.sfunload(self.__fluid_synth_sequencer.sfid)
self._sound_fonts_loaded = False
self._sound_fonts_path = None
else:
logger.debug("No active sound fonts")
def _start_audio_output(self) -> None:
"""Private method to start audio output
This method in conjunction with self._stop_audio_output should be used to safely start and stop audio output,
for example when there is switch between audio output and recording audio to a file (check doc string of
self._stop_audio_output for more details why this necessary). This method replaces
mingus.midi.fluidsynth.FluidSynthSequencer
"""
logger.debug("Starting audio output using driver: {driver}".format(driver=self._current_audio_driver))
# That is actually already done by the low level method and is included here again for transparency
if self._current_audio_driver not in VALID_AUDIO_DRIVERS:
raise ValueError(
"{driver} is not a valid audio driver. Must be one of: {allowed_drivers}".format(
driver=self._current_audio_driver,
allowed_drivers=VALID_AUDIO_DRIVERS,
)
)
if not self._audio_driver_is_active:
self.__fluid_synth_sequencer.start_audio_output(self._current_audio_driver)
# It seems to be necessary to reset the program after starting audio output
# mingus.midi.pyfluidsynth.program_reset() is calling fluidsynth fluid_synth_program_reset()
# https://www.fluidsynth.org/api/group__midi__messages.html#ga8a0e442b5013876affc685b88a6e3f49
self.__fluid_synth_sequencer.fs.program_reset()
self._audio_driver_is_active = True
else:
logger.debug("Audio output seems to be already active")
def _stop_audio_output(self) -> None:
"""Private method to stop audio output
Method is used to safely stop audio output via deleting an active audio driver, for example if there
is a switch between audio output and recording. This method should be used in conjunction with
self._start_audio_output(). It is a thin wrapper around the mingus.midi.pyfluidsynth.delete_fluid_audio_driver
and ensures that mingus.midi.pyfluidsynth.delete_fluid_audio_driver is not called twice because this seems to
result in segmentation fault:
[1] 4059 segmentation fault python3
Tracking is done via checking and setting self._audio_driver_is_active attribute. This method basically
replaces mingus.midi.pyfluidsynth.delete() (which is also basically a wrapper for
mingus.midi.pyfluidsynth.delete_fluid_audio_driver), because the delete method from the mingus package seems not
safe to use and results in a crash if for some reason is called after an audio driver was already deleted and
there isn't currently an active one. Despite the mingus.midi.pyfluidsynth.delete method seems to attempt to
check if an audio driver is present and tries to avoid such a scenario via checking
mingus.midi.pyfluidsynth.audio_driver argument for None. However once an audio driver was initialized the
audio_driver argument seems to be never set back to None and therefore it seems you can't rely on checking that
argument to know if an audio is active.
I am not sure if it is a good way to do it that way and if it has any side effects, but it seems to work so far
and enables switching between recording to a file and playing audio output without initializing a new object.
"""
if self._audio_driver_is_active:
globalfs.delete_fluid_audio_driver(self.__fluid_synth_sequencer.fs.audio_driver)
# It seems to be necessary to reset the program after starting audio output
# mingus.midi.pyfluidsynth.program_reset() is calling fluidsynth fluid_synth_program_reset()
# https://www.fluidsynth.org/api/group__midi__messages.html#ga8a0e442b5013876affc685b88a6e3f49
self.__fluid_synth_sequencer.fs.program_reset()
self._audio_driver_is_active = False
else:
logger.debug("Audio output seems to be already inactive")
def load_instrument(self, instrument: Union[str, int]) -> None:
"""Method to change the piano instrument
Load an instrument that should be used for playing or recording music. If PyPiano default sound fonts are used
you can choose one of the following instruments:
("Acoustic Grand Piano", "Bright Acoustic Piano", "Electric Grand Piano", "Honky-tonk Piano",
"Electric Piano 1", "Electric Piano 2", "Harpsichord", "Clavi")
Args
instrument: String with the name of the instrument to be used for default sound founts. If different sound
fonts are used an integer with the instrument number should be provided.
"""
logger.info("Setting instrument: {0}".format(instrument))
# If default sound fonts are used, check if the provided instrument string is contained in the valid
# instruments. If different sound fonts are provided, checks are disabled
if self._sound_fonts_path == DEFAULT_SOUND_FONTS:
if isinstance(instrument, int):
raise TypeError("When using default sound fonts you must pass a string for instrument parameter")
if instrument not in tuple(DEFAULT_INSTRUMENTS.keys()):
raise ValueError(
"Unknown instrument parameter. Instrument must be one of: {instrument}".format(
instrument=tuple(DEFAULT_INSTRUMENTS.keys())
)
)
self.__fluid_synth_sequencer.set_instrument(channel=1, instr=DEFAULT_INSTRUMENTS[instrument], bank=0)
self.instrument = instrument
else:
if isinstance(instrument, str):
raise TypeError("When using non default sound fonts you must pass an integer for instrument parameter")
self.__fluid_synth_sequencer.set_instrument(channel=1, instr=instrument, bank=0)
self.instrument = instrument
def play(
self,
music_container: Union[str, int, Note, NoteContainer, Bar, Track, PianoKey],
recording_file: Union[str, None] = None,
record_seconds: int = 4,
) -> None:
"""Function to play a provided music container and control recording settings
Central user facing method of Piano class to play or record a given music container. Handles setting
up audio output or recording to audio file and handles switching between playing audio and recording to wav
file.
Args
music_container: A music container such as Notes, NoteContainers, etc. describing a piece of music
recording_file: Path to a wav file where audio should be saved to. If passed music_container will be
recorded
record_seconds: The duration of recording in seconds
"""
# Check a given music container for invalid notes. See docstring of self._lint_music_container for more details
self._lint_music_container(music_container)
if recording_file is None:
logger.info("Playing music container: {music_container} via audio".format(music_container=music_container))
self._start_audio_output()
self._play_music_container(music_container)
else:
logger.info(
"Recording music container: {music_container} to file {recording_file}".format(
music_container=music_container, recording_file=recording_file
)
)
self._stop_audio_output()
self.__fluid_synth_sequencer.start_recording(recording_file)
self._play_music_container(music_container)
WAV_SAMPLE_FREQUENCY = 44100
samples = globalfs.raw_audio_string(
self.__fluid_synth_sequencer.fs.get_samples(int(record_seconds * WAV_SAMPLE_FREQUENCY))
)
self.__fluid_synth_sequencer.wav.writeframes(bytes(samples))
self.__fluid_synth_sequencer.wav.close()
# It seems we have to delete the wav attribute after recording in order to enable switching between
# audio output and recording for all music containers. The
# mingus.midi.fluidsynth.FluidSynthSequencer.play_Bar and
# mingus.midi.fluidsynth.FluidSynthSequencer.play_Track use the
# mingus.midi.fluidsynth.FluidSynthSequencer.sleep methods internally which is for some reason also | |
# Copyright 2020 The TensorFlow Ranking Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines ranking losses as TF ops.
The losses here are used to learn TF ranking models. It works with listwise
Tensors only.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_ranking.python import losses_impl
from tensorflow_ranking.python import utils
class RankingLossKey(object):
"""Ranking loss key strings."""
# Names for the ranking based loss functions.
PAIRWISE_HINGE_LOSS = 'pairwise_hinge_loss'
PAIRWISE_LOGISTIC_LOSS = 'pairwise_logistic_loss'
PAIRWISE_SOFT_ZERO_ONE_LOSS = 'pairwise_soft_zero_one_loss'
SOFTMAX_LOSS = 'softmax_loss'
SIGMOID_CROSS_ENTROPY_LOSS = 'sigmoid_cross_entropy_loss'
MEAN_SQUARED_LOSS = 'mean_squared_loss'
LIST_MLE_LOSS = 'list_mle_loss'
APPROX_NDCG_LOSS = 'approx_ndcg_loss'
APPROX_MRR_LOSS = 'approx_mrr_loss'
GUMBEL_APPROX_NDCG_LOSS = 'gumbel_approx_ndcg_loss'
NEURAL_SORT_CROSS_ENTROPY_LOSS = 'neural_sort_cross_entropy_loss'
GUMBEL_NEURAL_SORT_CROSS_ENTROPY_LOSS = 'gumbel_neural_sort_cross_entropy_loss'
def make_loss_fn(loss_keys,
loss_weights=None,
weights_feature_name=None,
lambda_weight=None,
reduction=tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
name=None,
extra_args=None):
"""Makes a loss function using a single loss or multiple losses.
Args:
loss_keys: A string or list of strings representing loss keys defined in
`RankingLossKey`. Listed loss functions will be combined in a weighted
manner, with weights specified by `loss_weights`. If `loss_weights` is
None, default weight of 1 will be used.
loss_weights: List of weights, same length as `loss_keys`. Used when merging
losses to calculate the weighted sum of losses. If `None`, all losses are
weighted equally with weight being 1.
weights_feature_name: A string specifying the name of the weights feature in
`features` dict.
lambda_weight: A `_LambdaWeight` object created by factory methods like
`create_ndcg_lambda_weight()`.
reduction: One of `tf.losses.Reduction` except `NONE`. Describes how to
reduce training loss over batch.
name: A string used as the name for this loss.
extra_args: A string-keyed dictionary that contains any other loss-specific
arguments.
Returns:
A function _loss_fn(). See `_loss_fn()` for its signature.
Raises:
ValueError: If `reduction` is invalid.
ValueError: If `loss_keys` is None or empty.
ValueError: If `loss_keys` and `loss_weights` have different sizes.
"""
if (reduction not in tf.compat.v1.losses.Reduction.all() or
reduction == tf.compat.v1.losses.Reduction.NONE):
raise ValueError('Invalid reduction: {}'.format(reduction))
if not loss_keys:
raise ValueError('loss_keys cannot be None or empty.')
if not isinstance(loss_keys, list):
loss_keys = [loss_keys]
if loss_weights:
if len(loss_keys) != len(loss_weights):
raise ValueError('loss_keys and loss_weights must have the same size.')
def _loss_fn(labels, logits, features):
"""Computes a single loss or weighted combination of losses.
Args:
labels: A `Tensor` of the same shape as `logits` representing relevance.
logits: A `Tensor` with shape [batch_size, list_size]. Each value is the
ranking score of the corresponding item.
features: Dict of Tensors of shape [batch_size, list_size, ...] for
per-example features and shape [batch_size, ...] for non-example context
features.
Returns:
An op for a single loss or weighted combination of multiple losses.
Raises:
ValueError: If `loss_keys` is invalid.
"""
weights = None
if weights_feature_name:
weights = tf.convert_to_tensor(value=features[weights_feature_name])
# Convert weights to a 2-D Tensor.
weights = utils.reshape_to_2d(weights)
gbl_labels, gbl_logits, gbl_weights = losses_impl.gumbel_softmax_sample(
labels, logits, weights)
loss_kwargs = {
'labels': labels,
'logits': logits,
'weights': weights,
'reduction': reduction,
'name': name,
}
gbl_loss_kwargs = {
'labels': gbl_labels,
'logits': gbl_logits,
'weights': gbl_weights,
'reduction': reduction,
'name': name,
}
if extra_args is not None:
loss_kwargs.update(extra_args)
gbl_loss_kwargs.update(extra_args)
loss_kwargs_with_lambda_weight = loss_kwargs.copy()
loss_kwargs_with_lambda_weight['lambda_weight'] = lambda_weight
key_to_fn = {
RankingLossKey.PAIRWISE_HINGE_LOSS:
(_pairwise_hinge_loss, loss_kwargs_with_lambda_weight),
RankingLossKey.PAIRWISE_LOGISTIC_LOSS:
(_pairwise_logistic_loss, loss_kwargs_with_lambda_weight),
RankingLossKey.PAIRWISE_SOFT_ZERO_ONE_LOSS:
(_pairwise_soft_zero_one_loss, loss_kwargs_with_lambda_weight),
RankingLossKey.SOFTMAX_LOSS:
(_softmax_loss, loss_kwargs_with_lambda_weight),
RankingLossKey.SIGMOID_CROSS_ENTROPY_LOSS:
(_sigmoid_cross_entropy_loss, loss_kwargs),
RankingLossKey.MEAN_SQUARED_LOSS: (_mean_squared_loss, loss_kwargs),
RankingLossKey.LIST_MLE_LOSS:
(_list_mle_loss, loss_kwargs_with_lambda_weight),
RankingLossKey.APPROX_NDCG_LOSS: (_approx_ndcg_loss, loss_kwargs),
RankingLossKey.APPROX_MRR_LOSS: (_approx_mrr_loss, loss_kwargs),
RankingLossKey.GUMBEL_APPROX_NDCG_LOSS:
(_approx_ndcg_loss, gbl_loss_kwargs),
RankingLossKey.NEURAL_SORT_CROSS_ENTROPY_LOSS:
(_neural_sort_cross_entropy_loss, loss_kwargs),
RankingLossKey.GUMBEL_NEURAL_SORT_CROSS_ENTROPY_LOSS:
(_neural_sort_cross_entropy_loss, gbl_loss_kwargs),
}
# Obtain the list of loss ops.
loss_ops = []
for loss_key in loss_keys:
if loss_key not in key_to_fn:
raise ValueError('Invalid loss_key: {}.'.format(loss_key))
loss_fn, kwargs = key_to_fn[loss_key]
loss_ops.append(loss_fn(**kwargs))
# Compute weighted combination of losses.
if loss_weights:
weighted_losses = []
for loss_op, loss_weight in zip(loss_ops, loss_weights):
weighted_losses.append(tf.multiply(loss_op, loss_weight))
else:
weighted_losses = loss_ops
return tf.add_n(weighted_losses)
return _loss_fn
def make_loss_metric_fn(loss_key,
weights_feature_name=None,
lambda_weight=None,
name=None):
"""Factory method to create a metric based on a loss.
Args:
loss_key: A key in `RankingLossKey`.
weights_feature_name: A `string` specifying the name of the weights feature
in `features` dict.
lambda_weight: A `_LambdaWeight` object.
name: A `string` used as the name for this metric.
Returns:
A metric fn with the following Args:
* `labels`: A `Tensor` of the same shape as `predictions` representing
graded relevance.
* `predictions`: A `Tensor` with shape [batch_size, list_size]. Each value
is the ranking score of the corresponding example.
* `features`: A dict of `Tensor`s that contains all features.
"""
metric_dict = {
RankingLossKey.PAIRWISE_HINGE_LOSS:
losses_impl.PairwiseHingeLoss(name, lambda_weight=lambda_weight),
RankingLossKey.PAIRWISE_LOGISTIC_LOSS:
losses_impl.PairwiseLogisticLoss(name, lambda_weight=lambda_weight),
RankingLossKey.PAIRWISE_SOFT_ZERO_ONE_LOSS:
losses_impl.PairwiseSoftZeroOneLoss(
name, lambda_weight=lambda_weight),
RankingLossKey.SOFTMAX_LOSS:
losses_impl.SoftmaxLoss(name, lambda_weight=lambda_weight),
RankingLossKey.SIGMOID_CROSS_ENTROPY_LOSS:
losses_impl.SigmoidCrossEntropyLoss(name),
RankingLossKey.MEAN_SQUARED_LOSS:
losses_impl.MeanSquaredLoss(name),
RankingLossKey.LIST_MLE_LOSS:
losses_impl.ListMLELoss(name, lambda_weight=lambda_weight),
RankingLossKey.APPROX_NDCG_LOSS:
losses_impl.ApproxNDCGLoss(name),
RankingLossKey.APPROX_MRR_LOSS:
losses_impl.ApproxMRRLoss(name),
RankingLossKey.GUMBEL_APPROX_NDCG_LOSS:
losses_impl.ApproxNDCGLoss(name),
RankingLossKey.NEURAL_SORT_CROSS_ENTROPY_LOSS:
losses_impl.NeuralSortCrossEntropyLoss(name),
RankingLossKey.GUMBEL_NEURAL_SORT_CROSS_ENTROPY_LOSS:
losses_impl.NeuralSortCrossEntropyLoss(name),
}
def _get_weights(features):
"""Get weights tensor from features and reshape it to 2-D if necessary."""
weights = None
if weights_feature_name:
weights = tf.convert_to_tensor(value=features[weights_feature_name])
# Convert weights to a 2-D Tensor.
weights = utils.reshape_to_2d(weights)
return weights
def metric_fn(labels, predictions, features):
"""Defines the metric fn."""
weights = _get_weights(features)
loss = metric_dict.get(loss_key, None)
if loss is None:
raise ValueError('loss_key {} not supported.'.format(loss_key))
return loss.eval_metric(labels, predictions, weights)
return metric_fn
def create_ndcg_lambda_weight(topn=None, smooth_fraction=0.):
"""Creates _LambdaWeight for NDCG metric."""
return losses_impl.DCGLambdaWeight(
topn,
gain_fn=lambda labels: tf.pow(2.0, labels) - 1.,
rank_discount_fn=lambda rank: 1. / tf.math.log1p(rank),
normalized=True,
smooth_fraction=smooth_fraction)
def create_reciprocal_rank_lambda_weight(topn=None, smooth_fraction=0.):
"""Creates _LambdaWeight for MRR-like metric."""
return losses_impl.DCGLambdaWeight(
topn,
gain_fn=lambda labels: labels,
rank_discount_fn=lambda rank: 1. / rank,
normalized=True,
smooth_fraction=smooth_fraction)
def create_p_list_mle_lambda_weight(list_size):
"""Creates _LambdaWeight based on Position-Aware ListMLE paper.
Produces a weight based on the formulation presented in the
"Position-Aware ListMLE" paper (Lan et al.) and available using
create_p_list_mle_lambda_weight() factory function above.
Args:
list_size: Size of the input list.
Returns:
A _LambdaWeight for Position-Aware ListMLE.
"""
return losses_impl.ListMLELambdaWeight(
rank_discount_fn=lambda rank: tf.pow(2., list_size - rank) - 1.)
def _pairwise_hinge_loss(
labels,
logits,
weights=None,
lambda_weight=None,
reduction=tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
name=None):
"""Computes the pairwise hinge loss for a list.
The hinge loss is defined as Hinge(l_i > l_j) = max(0, 1 - (s_i - s_j)). So a
correctly ordered pair has 0 loss if (s_i - s_j >= 1). Otherwise the loss
increases linearly with s_i - s_j. When the list_size is 2, this reduces to
the standard hinge loss.
Args:
labels: A `Tensor` of the same shape as `logits` representing graded
relevance.
logits: A `Tensor` with shape [batch_size, list_size]. Each value is the
ranking score of the corresponding item.
weights: A scalar, a `Tensor` with shape [batch_size, 1] for list-wise
weights, or a `Tensor` with shape [batch_size, list_size] for item-wise
weights.
lambda_weight: A `_LambdaWeight` object.
reduction: One of `tf.losses.Reduction` except `NONE`. Describes how to
reduce training loss over batch.
name: A string used as the name for this loss.
Returns:
An op for the pairwise hinge loss.
"""
loss = losses_impl.PairwiseHingeLoss(name, lambda_weight)
with tf.compat.v1.name_scope(loss.name, 'pairwise_hinge_loss',
(labels, logits, weights)):
return loss.compute(labels, logits, weights, reduction)
def _pairwise_logistic_loss(
labels,
logits,
weights=None,
lambda_weight=None,
reduction=tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
name=None):
"""Computes the pairwise logistic loss for a list.
The preference probability of each pair is computed as the sigmoid function:
P(l_i > l_j) = 1 / (1 + exp(s_j - s_i)) and the logistic loss is log(P(l_i >
l_j)) if l_i > l_j and 0 otherwise.
Args:
labels: A `Tensor` of the same shape as `logits` representing graded
relevance.
logits: A `Tensor` with shape [batch_size, list_size]. Each value is the
ranking score of the corresponding item.
weights: A scalar, a `Tensor` with shape [batch_size, 1] for list-wise
weights, or a `Tensor` with shape [batch_size, list_size] for item-wise
weights.
lambda_weight: A `_LambdaWeight` object.
reduction: One of `tf.losses.Reduction` except `NONE`. Describes how to
reduce training loss over batch.
name: A string used as the name for this loss.
Returns:
An op for the pairwise logistic loss.
"""
loss = losses_impl.PairwiseLogisticLoss(name, lambda_weight)
with tf.compat.v1.name_scope(loss.name, 'pairwise_logistic_loss',
(labels, logits, weights)):
return loss.compute(labels, logits, weights, reduction)
def _pairwise_soft_zero_one_loss(
labels,
logits,
weights=None,
lambda_weight=None,
reduction=tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
name=None):
"""Computes the pairwise soft zero-one loss.
Note this is different from sigmoid cross entropy in that soft | |
<gh_stars>0
# SPDX-License-Identifier: BSD-3-Clause
# Copyright Contributors to the OpenColorIO Project.
class Config:
"""
Config
"""
def __init__(self):
pass
def CreateFromEnv(self):
"""
CreateFromEnv()
Create a :py:class:`PyOpenColorIO.Config` object using the environment variable.
:returns: Config object
"""
pass
def CreateFromFile(self, filename):
"""
CreateFromFile(filename)
Create a :py:class:`PyOpenColorIO.Config` object using the information in a file.
:param filename: name of file
:type filename: string
:return: Config object
"""
pass
def CreateFromStream(self, istream):
pass
def isEditable(self):
"""
isEditable()
Returns whether Config is editable.
The configurations returned from
:py:func:`PyOpenColorIO.GetCurrentConfig` are not editable, and if
you want to edit them you can use
:py:meth:`PyOpenColorIO.Config.createEditableCopy`.
If you attempt to call any of the set functions on a noneditable
Config, an exception will be thrown.
:return: state of :py:class:`PyOpenColorIO.Config`'s editability
:rtype: bool
"""
pass
def createEditableCopy(self):
"""
createEditableCopy()
Returns an editable copy of :py:class:`PyOpenColorIO.Config`.
:return: editable copy of :py:class:`PyOpenColorIO.Config`
:rtype: Config object
"""
pass
def sanityCheck(self):
"""
sanityCheck()
This will throw an exception if :py:class:`PyOpenColorIO.Config` is
malformed. The most common error occurs when references are made to
colorspaces that do not exist.
"""
pass
def getDescription(self):
"""
getDescription()
Returns the stored description of :py:class:`PyOpenColorIO.Config`.
:return: stored description of :py:class:`PyOpenColorIO.Config`
:rtype: string
"""
pass
def setDescription(self, desc):
"""
setDescription(desc)
Sets the description of :py:class:`PyOpenColorIO.Config`.
:param desc: description of :py:class:`PyOpenColorIO.Config`
:type desc: string
"""
pass
def serialize(self):
"""
serialize()
Returns the string representation of :py:class:`PyOpenColorIO.Config`
in YAML text form. This is typically stored on disk in a file with the
.ocio extension.
:return: :py:class:`PyOpenColorIO.Config` in YAML text form
:rtype: string
"""
pass
def getCacheID(self, pycontext=None):
"""
getCacheID([, pycontext])
This will produce a hash of the all colorspace definitions, etc.
All external references, such as files used in FileTransforms, etc.,
will be incorporated into the cacheID. While the contents of the files
are not read, the file system is queried for relevant information
(mtime, inode) so that the :py:class:`PyOpenColorIO.Config`'s cacheID
will change when the underlying LUTs are updated.
If a context is not provided, the current Context will be used. If a
null context is provided, file references will not be taken into
account (this is essentially a hash of :py:meth:`PyOpenColorIO.Config.serialize`).
:param pycontext: optional
:type pycontext: object
:return: hash of :py:class:`PyOpenColorIO.Config`
:rtype: string
"""
pass
def getCurrentContext(self):
"""
getCurrentContext()
Return the current context, which is essentially a record of all
the environment variables that are available for use in file path
lookups.
:return: context
:rtype: pycontext
"""
pass
def addEnvironmentVar(self, name, defaultValue):
"""
"""
pass
def getNumEnvironmentVars(self):
"""
"""
pass
def getEnvironmentVarNameByIndex(self, index):
"""
"""
pass
def getEnvironmentVarDefault(self, name):
"""
"""
pass
def getEnvironmentVarDefaults(self):
"""
"""
pass
def clearEnvironmentVars(self):
"""
"""
pass
def getSearchPath(self):
"""
getSearchPath()
Returns the search path as a concatenated string (see
getSearchPathByIndex for a more platform agnostic function).
:return: search path
:rtype: string
"""
pass
def setSearchPath(self, searchpath):
"""
setSearchPath(path)
Sets the search path as a concatenated string (see addSearchPath
for a more platform agnostic function).
:param path: the search path
:type path: string
"""
pass
def getNumSearchPaths(self):
"""
getNumSearchPaths()
Return the number of search paths.
:return: the number of search paths
:rtype: int
"""
pass
def getSearchPathByIndex(self, index):
"""
getSearchPathByIndex(index)
Return one of the search paths.
:param index: the index of the search path
:type index: int
:return: the search path at index
:rtype: string
"""
pass
def clearSearchPaths(self):
"""
clearSearchPaths()
Clear the search path list.
"""
pass
def addSearchPath(self, path):
"""
addSearchPath(path)
Add a search path to the end of the list.
:param path: the search path to add
:type path: string
"""
pass
def getWorkingDir(self):
"""
getWorkingDir()
Returns the working directory.
:return: the working directory
:rtype path: string
"""
pass
def setWorkingDir(self, dirname):
"""
setWorkingDir(path)
Sets the working directory.
:param path: the working directory
:type path: string
"""
pass
def getNumColorSpaces(self):
pass
def getColorSpaceNameByIndex(self, index):
pass
def getColorSpaces(self):
"""
getColorSpaces()
Returns all the ColorSpaces defined in :py:class:`PyOpenColorIO.Config`.
:return: ColorSpaces in :py:class:`PyOpenColorIO.Config`
:rtype: tuple
"""
pass
def getColorSpace(self, name):
"""
getColorSpace(name)
Returns the data for the specified color space in :py:class:`PyOpenColorIO.Config`.
This will return null if the specified name is not found.
:param name: name of color space
:type name: string
:return: data for specified color space
:rtype: pyColorSpace object
"""
pass
def getIndexForColorSpace(self, name):
pass
def addColorSpace(self, colorSpace):
"""
addColorSpace(pyColorSpace)
Add a specified color space to :py:class:`PyOpenColorIO.Config`.
:param pyColorSpace: color space
:type pyColorSpace: object
.. note::
If another color space is already registered with the same name,
this will overwrite it.
"""
pass
def clearColorSpaces(self):
"""
clearColorSpaces()
Clear the color spaces in :py:class:`PyOpenColorIO.Config`.
"""
pass
def parseColorSpaceFromString(self, str):
"""
parseColorSpaceFromString(str)
Parses out the color space from a string.
Given the specified string, gets the longest, right-most color space substring.
* If strict parsing is enabled, and no color space is found, return an empty string.
* If strict parsing is disabled, return the default role, if defined.
* If the default role is not defined, return an empty string.
:param str: ColorSpace data
:type str: string
:return: parsed data
:rtype: string
"""
pass
def isStrictParsingEnabled(self):
pass
def setStrictParsingEnabled(self, enable):
pass
def setRole(self, role, csname):
"""
setRole(role, csname)
Set a role's ColorSpace.
Setting the colorSpaceName name to a null string unsets it.
:param role: role whose ColorSpace will be set
:type role: string
:param csname: name of ColorSpace
:type csname: string
"""
pass
def getNumRoles(self):
pass
def hasRole(self, role):
pass
def getRoleName(self, index):
pass
def getDefaultDisplay(self):
"""
getDefaultDisplay()
Returns the default display set in :py:class:`PyOpenColorIO.Config`.
:return: default display
:rtype: string
"""
pass
def getNumDisplays(self):
pass
def getDisplay(self, index):
pass
def getDisplays(self):
"""
getDisplays()
Returns all the displays defined in :py:class:`PyOpenColorIO.Config`.
:return: displays in :py:class:`PyOpenColorIO.Config`
:rtype: list of strings
"""
pass
def getDefaultView(self, display):
"""
getDefaultView(display)
Returns the default view of :py:class:`PyOpenColorIO.Config`.
:param display: default view
:type display: string
:return: view
:rtype: string
"""
pass
def getNumViews(self, display):
pass
def getView(self, display, index):
pass
def getViews(self, display):
"""
getViews(display)
Returns all the views defined in :py:class:`PyOpenColorIO.Config`.
:param display: views in :py:class:`PyOpenColorIO.Config`
:type display: string
:return: views in :py:class:`PyOpenColorIO.Config`.
:rtype: list of strings
"""
pass
def getDisplayColorSpaceName(self, display, view):
"""
getDisplayColorSpaceName(display, view)
Returns the ColorSpace name corresponding to the display and view
combination in :py:class:`PyOpenColorIO.Config`.
:param display: display
:type display: string
:param view: view
:type view: string
:return: display color space name
:rtype: string
"""
pass
def getDisplayLooks(self, display, view):
"""
getDisplayLooks(display, view)
Returns the looks corresponding to the display and view combination in
:py:class:`PyOpenColorIO.Config`.
:param display: display
:type display: string
:param view: view
:type view: string
:return: looks
:rtype: string
"""
pass
def addDisplay(self, display, view, csname, looks=None):
"""
addDisplay(display, view, colorSpaceName[, looks])
NEEDS WORK
:param display:
:type display: string
:param view:
:type view: string
:param colorSpaceName:
:type colorSpaceName: string
:param looks: optional
:type looks: string
"""
pass
def clearDisplays(self):
"""
clearDisplays()
"""
pass
def setActiveDisplays(self, displays):
"""
setActiveDisplays(displays)
Sets the active displays in :py:class:`PyOpenColorIO.Config`.
:param displays: active displays
:type displays: string
"""
pass
def getActiveDisplays(self):
"""
getActiveDisplays()
Returns the active displays in :py:class:`PyOpenColorIO.Config`.
:return: active displays
:rtype: string
"""
pass
def setActiveViews(self, views):
"""
setActiveViews(views)
Sets the active views in :py:class:`PyOpenColorIO.Config`.
:param views: active views
:type views: string
"""
pass
def getActiveViews(self):
"""
getActiveViews()
Returns the active views in :py:class:`PyOpenColorIO.Config`.
:return: active views
:rtype: string
"""
pass
def getDefaultLumaCoefs(self):
"""
getDefaultLumaCoefs()
Returns the default luma coefficients in | |
= enemy_formation
enemy = enemies_not_dead_already[0] if len(enemies_not_dead_already) > 0 else None
blinding_stun_chance, disorienting_stun_chance, plague_poison_dmg = None, None, None
if 'blinding_gas' in hero.skills:
blinding_stun_level = hero.skills['blinding_gas']
blinding_stun_chance = AttackSkills['blinding_gas'][2][blinding_stun_level]
if 'disorienting_blast' in hero.skills:
disorienting_stun_level = hero.skills['disorienting_blast']
disorienting_stun_chance = AttackSkills['disorienting_blast'][2][disorienting_stun_level]
if 'plague_grenade' in hero.skills:
plague_skill_level = hero.skills['plague_grenade']
plague_poison_dmg = AttackSkills['plague_grenade'][2][plague_skill_level]
# use battlefield medicine to heal target in danger of dying or with large blight/bleed
if 'battlefield_medicine' in hero.skills and (hero.rank == 3 or hero.rank == 4):
target = next((ally for ally in party if ally.currentHp == 0 and ally.effectiveHp < 0), None)
if target is None:
target = next((ally for ally in party if ally.percentHp <= 30 and ally.damageOnNextTurn > 0), None)
if target is None:
target = next((ally for ally in party if ally.damageOverTime >= 20), None)
if target is not None:
attack = 'battlefield_medicine'
# stall if only one weak enemy left and need to heal or stress heal
if (len(enemies_not_dead_already) == 0
or (len(enemies_not_dead_already) == 1
and (stall or (enemy.canBeKilledIn1Hit
and any(ally is not hero and not ally.already_moved and not ally.stunned
and (((ally.rank == 1 or ally.rank == 2) and (ally.heroClass in FrontLineClasses))
or (ally.heroClass not in BackLineClasses and ally.rank != 4))
for ally in party))) and current_round < 8)) \
and ((any(ally.percentHp < 80 for ally in party) and any(ally.healer for ally in party))
or (any(ally.stress > 0 for ally in party) and any(ally.stressHealer for ally in party))):
if enemy is not None and not enemy.stunned and 'disorienting_blast' in hero.skills \
and hero.rank in AttackSkills['disorienting_blast'][0] \
and disorienting_stun_chance - enemy.stunResist >= 50 \
and any(rank in AttackSkills['disorienting_blast'][1] for rank in enemy.rank):
attack = 'disorienting_blast'
target = enemy
elif enemy is not None and not enemy.stunned and 'blinding_gas' in hero.skills \
and hero.rank in AttackSkills['blinding_gas'][0] \
and blinding_stun_chance - enemy.stunResist >= 50 \
and any(rank in AttackSkills['blinding_gas'][1] for rank in enemy.rank) and hero.blinding_gas_count < 3:
attack = 'blinding_gas'
target = enemy
elif enemy is None or enemy.threat < 4 or (enemy.stunned and enemy.canBeKilledIn1Hit):
attack, target = plague_doctor_stall_helper(hero, party)
if target is None and hero.rank == 1:
if 'incision' in hero.skills:
if party[1].heroClass not in BackLineClasses:
attack = 'swap'
elif party[1].heroClass in BackLineClasses:
attack = 'incision'
elif target is None and hero.rank == 2:
enemy_in_rank2 = True if (len(enemy_formation) == 1 and 2 in enemy_formation[0].rank) \
or len(enemy_formation) > 1 else False
if 'disorienting_blast' in hero.skills:
stun_targets = [enemy for enemy in enemies_not_dead_already
if (2 in enemy.rank or 3 in enemy.rank or 4 in enemy.rank)
and (disorienting_stun_chance - enemy.stunResist >= 50 and not enemy.stunned)]
if len(stun_targets) > 0:
attack = 'disorienting_blast'
potential_targets = stun_targets
stun_chance = disorienting_stun_chance
elif party[2].heroClass == 'highwayman' or party[2].heroClass == 'shield_breaker' \
or (party[2].heroClass == 'crusader' and enemy_in_rank2):
attack = 'noxious_blast'
else:
attack = 'swap' if party[2].heroClass not in BackLineClasses else None
elif target is None and (hero.rank == 3 or hero.rank == 4):
rank3_enemy = next((enemy for enemy in enemy_formation if 3 in enemy.rank), None)
rank4_enemy = next((enemy for enemy in enemy_formation if 4 in enemy.rank and 3 not in enemy.rank), None)
# two enemies on ranks 3 & 4 and at least 1 very high threat or skeleton archer
if rank3_enemy is not None and rank4_enemy is not None:
back_rank_targets = [rank3_enemy, rank4_enemy]
priority_target = next((enemy for enemy in back_rank_targets
if enemy.threat >= 7
and not enemy.alreadyGoingToDie and not enemy.alreadyMoved), None)
if priority_target is None:
priority_target = next((enemy for enemy in back_rank_targets
if enemy.threat >= 7 and not enemy.alreadyGoingToDie), None)
if priority_target is None:
priority_target = next((enemy for enemy in back_rank_targets
if enemy.name == 'Bone Arbalist'
and not enemy.alreadyGoingToDie and not enemy.alreadyMoved), None)
if priority_target is None:
priority_target = next((enemy for enemy in back_rank_targets
if enemy.name == 'Bone Arbalist' and not enemy.alreadyGoingToDie), None)
if priority_target is not None and not rank3_enemy.alreadyGoingToDie and not rank4_enemy.alreadyGoingToDie:
# can stun the priority target with blinding gas and possibly another
if 'blinding_gas' in hero.skills and not rank3_enemy.stunned and not rank4_enemy.stunned \
and hero.blinding_gas_count < 3 and (blinding_stun_chance - priority_target.stunResist >= 35) \
and any(blinding_stun_chance - enemy.stunResist >= 50 for enemy in back_rank_targets):
attack = 'blinding_gas'
stun_chance = blinding_stun_chance
target = priority_target
# can kill or stun a very high threat enemy that hasn't moved (>= 7)
if attack is None and len(very_high_threat_not_moved) > 0:
attack, stun_chance, potential_targets = plague_doctor_attack_helper(hero, very_high_threat_not_moved)
# can kill or stun a very high threat enemy (>= 7)
if attack is None and len(very_high_threat_enemies) > 0:
attack, stun_chance, potential_targets = plague_doctor_attack_helper(hero, very_high_threat_enemies)
# can pull or stun a skeleton archer on rank 3 or 4
if attack is None:
skeleton_archer = next((enemy for enemy in enemies_not_dead_already if enemy.name == 'Bone Arbalist'
and (3 in enemy.rank or 4 in enemy.rank) and not enemy.alreadyMoved), None)
if skeleton_archer is None:
skeleton_archer = next((enemy for enemy in enemies_not_dead_already if enemy.name == 'Bone Arbalist'
and (3 in enemy.rank or 4 in enemy.rank)), None)
if skeleton_archer is not None:
if 'disorienting_blast' in hero.skills:
attack = 'disorienting_blast'
stun_chance = disorienting_stun_chance
target = skeleton_archer
elif 'blinding_gas' in hero.skills and blinding_stun_chance - skeleton_archer.stunResist > 50 \
and not skeleton_archer.stunned and hero.blinding_gas_count < 3:
attack = 'blinding_gas'
stun_chance = blinding_stun_chance
# two enemies on ranks 3 & 4
if attack is None and rank3_enemy is not None and rank4_enemy is not None \
and not rank3_enemy.alreadyGoingToDie and not rank4_enemy.alreadyGoingToDie:
# can stun two enemies with blinding gas
if 'blinding_gas' in hero.skills and not rank3_enemy.stunned and not rank4_enemy.stunned \
and hero.blinding_gas_count < 3 and (blinding_stun_chance - rank3_enemy.stunResist >= 50
or blinding_stun_chance - rank4_enemy.stunResist >= 50):
attack = 'blinding_gas'
stun_chance = blinding_stun_chance
potential_targets = [rank3_enemy, rank4_enemy]
# can kill one of them
elif 'plague_grenade' in hero.skills and (rank3_enemy.effectiveHp <= plague_poison_dmg + 1
or rank4_enemy.effectiveHp <= plague_poison_dmg + 1):
attack = 'plague_grenade'
# can kill or stun a high threat enemy that hasn't moved (>= 4)
if attack is None and len(high_threat_not_moved) > 0:
attack, stun_chance, potential_targets = plague_doctor_attack_helper(hero, high_threat_not_moved)
# can kill or stun a high threat enemy (>= 4)
if attack is None and len(high_threat_enemies) > 0:
attack, stun_chance, potential_targets = plague_doctor_attack_helper(hero, high_threat_enemies)
# can kill or stun enemy
if attack is None and len(enemies_not_dead_already) > 0:
attack, stun_chance, potential_targets = plague_doctor_attack_helper(hero, enemies_not_dead_already)
# swap if beneficial
if attack is None and hero.rank == 3 and party[3].heroClass != 'vestal':
attack = 'swap'
# cure bleed/blight
if attack is None and 'battlefield_medicine' in hero.skills and (hero.rank == 3 or hero.rank == 4) \
and len(high_threat_enemies) == 0 and any(ally.percentHp < 90 for ally in party):
heal_targets = list(hero for hero in party if hero.bleedAmount > 0 or hero.blightAmount > 0)
if len(heal_targets) > 0:
heal_targets.sort(key=lambda k: k.damageOverTime, reverse=True)
target = heal_targets[0]
attack = 'battlefield_medicine'
if attack == 'swap':
target = -1 if target is None else target
swap_hero(hero, swap_distance=target, party_order=UpdatedPartyOrder, debug=Debug)
elif attack == 'blinding_gas' or attack == 'disorienting_blast':
find_target_and_stun(hero, potential_targets, attack, stun_chance, UpdatedPartyOrder, target)
elif attack == 'battlefield_medicine' or attack == 'emboldening_vapours':
heal_target(hero, target, attack, Debug)
else:
# Find target and attack enemy
if attack is not None:
list_of_attacks.insert(0, attack)
find_target_and_attack(raid_info, potential_targets, hero, party, list_of_attacks, UpdatedPartyOrder)
# heal, buff, or swap if can't hit enemies with any attacks
if not attack_completed and attack != 'swap' and attack != 'blinding_gas' and attack != 'disorienting_blast' \
and attack != 'battlefield_medicine' and attack != 'emboldening_vapours':
attack, target = plague_doctor_stall_helper(hero, party_sorted_by_rank)
if attack == 'swap':
swap_hero(hero, swap_distance=target, party_order=UpdatedPartyOrder, debug=Debug)
elif attack == 'blinding_gas' or attack == 'disorienting_blast':
find_target_and_stun(hero, potential_targets, attack, stun_chance, UpdatedPartyOrder, target)
elif attack == 'battlefield_medicine' or attack == 'emboldening_vapours':
heal_target(hero, target, attack, Debug)
def plague_doctor_stall_helper(hero, party_sorted_by_rank):
if 'battlefield_medicine' in hero.skills and (hero.rank == 3 or hero.rank == 4):
attack = 'battlefield_medicine'
party_sorted_by_rank.sort(key=lambda k: k.damageOverTime, reverse=True)
if party_sorted_by_rank[0].damageOverTime <= 2:
party_sorted_by_rank.sort(key=lambda k: k.percentHp)
return attack, party_sorted_by_rank[0]
elif 'emboldening_vapours' in hero.skills and hero.emboldening_vapours_count < 2:
attack = 'emboldening_vapours'
target = next(hero for hero in party_sorted_by_rank if hero.heroClass == 'vestal')
| |
B1, B2]
idxs = [ (-3, 1, 2),
(-4, 2, 3),
(-1, 1, 4),
(-2, 4, 3) ]
rholoc = scon(to_contract, idxs).reshape((d**2, d**2))
return rholoc
# def DMRG_superblock(left, right, hL, h, hR):
# theta = twositecontract(left, right, h)
# theta = thetaleft(theta, hL)
# theta = thetaright(theta, hR)
# return theta
"""
2--left-right--4
|__|
|U |
----
| |
1 3
"""
def twositecontract(left, right, U=None):
if U is None:
return np.dot(left, right)
else:
to_contract = (left, right, U)
idxs = [ (2, -2, 1),
(3, 1, -4),
(-1, -3, 2, 3)]
return scon(to_contract, idxs)
"""
2--th--4
|__|
|U |
----
| |
1 3
"""
def twositecontracttheta(theta, U):
to_contract = (theta, U)
idxs = [ (1, -2, 3, -4),
(-1, 1, -3, 3) ]
return scon(to_contract, idxs)
"""
--theta--4
|__| |
|hL| 3
----
| |
2 1
"""
def thetaleft(theta, hL):
to_contract = (theta, hL)
idxs = [ (2, 1, -3, -4),
(-2, -1, 1, 2) ]
try:
ans = scon(to_contract, idxs)
except ValueError:
errstr = "Theta shape: " + str(theta.shape) + "\n"
errstr += "hL shape: " + str(hL.shape) + "\n"
raise ValueError(errstr)
return ans
"""
2--theta
| |__|
1 |hR|
----
| |
3 4
"""
def thetaright(theta, hR):
to_contract = (theta, hR)
idxs = [ (-1, -2, 1, 2),
(-3, -4, 1, 2) ]
return scon(to_contract, idxs)
"""
A--3 4
|_| |
|H| |
--- |
| | |
A*--1 2
"""
def DMRG_hL1(A, Adag, hL):
d = A.shape[0]
I = np.eye(d, dtype=A.dtype)
to_contract = (A, Adag, hL, I)
A_idxs = (4, 3, -3)
Adag_idxs = (2, 1, -1)
h_idxs = (1, 2, 3, 4)
I_idxs = (-2, -4)
answer = scon(to_contract, (A_idxs, Adag_idxs, h_idxs, I_idxs))
return answer
"""
--A--3 4
| |____|
| |_H__|
| | |
--A*-1 2
"""
def DMRG_hL2(A, Adag, h):
to_contract = (A, Adag, h)
A_idxs = (2, 1, -3)
Adag_idxs = (3, 1, -1)
h_idxs = (3, -2, 2, -4)
answer = scon(to_contract, (A_idxs, Adag_idxs, h_idxs))
return answer
def DMRG_hL(A, hL, h, Adag=None):
if Adag is None:
Adag = np.conj(A)
term1 = DMRG_hL1(A, Adag, hL)
term2 = DMRG_hL2(A, Adag, h)
answer = term1 + term2
return answer
"""
3 4-B-----
| |____|
| |_H__|
| | |
1 2-B*---|
"""
def DMRG_hR1(B, Bdag, hR):
d = B.shape[0]
I = np.eye(d, dtype=B.dtype)
to_contract = (B, Bdag, hR, I)
B_idxs = (4, -4, 2)
Bdag_idxs = (3, -2, 1)
h_idxs = (3, 1, 4, 2)
I_idxs = (-1, -3)
answer = scon(to_contract, (B_idxs, Bdag_idxs, h_idxs, I_idxs))
return answer
"""
3 4-B-
|____| |
|_H__| |
| | |
1 2-B*
"""
def DMRG_hR2(B, Bdag, h):
to_contract = (B, Bdag, h)
B_idxs = (3, -4, 1)
Bdag_idxs = (2, -2, 1)
h_idxs = (-1, 2, -3, 3)
answer = scon(to_contract, (B_idxs, Bdag_idxs, h_idxs))
return answer
def DMRG_hR(B, hR, h, Bdag=None):
if Bdag is None:
Bdag = np.conj(B)
term1 = DMRG_hR1(B, Bdag, hR)
term2 = DMRG_hR2(B, Bdag, h)
answer = term1 + term2
return answer
# LR = np.dot(left, right)
# if U is None:
# return LR
# d, chiL, d, chiR = LR.shape
# U.shape = (d,d,d**2)
# #U = U.reshape((d,d,d**2))
# LR = LR.transpose((1,0,2,3))
# #LR.shape = (chiL, d**2, chiR)
# LR = LR.reshape((chiL, d**2, chiR))
# ans = np.dot(U, LR).reshape((d, d, chiL, chiR))
# ans = ans.transpose((0,2,1,3))
# return ans
# if U is None:
# else:
# ans = np.einsum('jbi, kid, jkac', left, right, U)
#return ans
def H_times_psi(H, left, right, HL=None, HR=None):
"""
Serves as matvec for the DMRG eigensolver.
"""
d = H.shape[0]
Hmat = H.reshape((d*d, d*d))
Hnorm = npla.norm(Hmat)
Hshift = (Hmat - Hnorm*np.eye(d*d)).reshape((d,d,d,d))
if HL is not None:
left = leftmult(HL, left)
if HR is not None:
right = rightmult(right, HR)
answer = twositecontract(left, right, U=Hshift)
return answer
def DMRGeigs(left, right, H, HL=None, HR=None):
"""
Eigensolver for DMRG.
"""
Hshift = Hbig - Hnorm * np.eye(d1*chi1*d2*chi2)
Es, Vs = sp.sparse.linalg.eigs(Hshift, v0=thvec, k=1)
print(Es)
E0 = Es[0] + Hnorm
thnew = Vs[:, 0]
thnew = thnew / npla.norm(thnew)
thnew = thnew.reshape((d1, chi1, d2, chi2))
return (E0, thnew)
def svd(th, minlam=1E-13, maxchi=None, normalize=True, fixed=False):
"""Computes the singular value decomposition of the input matrix
'th', which should be the (chi*d x chi*d) result of a contraction
between two nodes (and, possibly, a two-site operator).
"""
d1, chi1, d2, chi2 = th.shape
newshp = (d1*chi1, d2*chi2)
#th.shape = newshp
# thview = th.view()
# thview.shape = newshp
th = th.reshape(newshp) #note - this does a copy and should be optimized
try:
U, s, V = npla.svd(th, full_matrices=False, compute_uv=True)
except:
print("Divide-and-conquer SVD failed. Trying gesvd...")
U, s, V = spla.svd(th, full_matrices=False, overwrite_a=False,
check_finite=True, lapack_driver='gesvd')
#print(s)
if maxchi is None or maxchi > s.size:
maxchi = s.size
truncidx = maxchi
S = s[:truncidx]
if minlam is not None and S[-1] < minlam:
toosmallidx = maxchi - np.searchsorted(S[::-1], minlam)
if toosmallidx==0:
print("Warning: very small singular values: ", s)
truncidx = 1
#raise ValueError("Could not truncate because there were no singular "
# "values greater than the minimum ", minlam, ". s was: ", s)
if fixed:
S[toosmallidx:] = minlam
else:
truncidx = toosmallidx
S = S[:truncidx]
X = U[:, :truncidx]
X = X.reshape((d1, chi1, truncidx))
Y = V[:truncidx, :]
Y = Y.reshape((truncidx, d2, chi2))
Y = Y.transpose((1,0,2))
truncerror = npla.norm(s[truncidx:])
#truncerror = np.sqrt(np.sum(s2[truncidx:]))
#print(S)
if normalize:
S /= npla.norm(S)
#print(S)
#print("**")
#print(S[:-1])
return X, Y, S, truncidx, truncerror
def truncateandreshape(U, s, V, truncidx, d, chi1, chi2):
"""
U, s, V are the output of an SVD. s will be truncated to 'truncidx',
and U and V will be similarly truncated so that UsV remains well-defined.
U and Y will then be reshaped into gamma tensors; this involves transposing
Y.
This function turns out to be surprisingly expensive for some reason.
"""
X = U[:, :truncidx]
X = X.reshape((d, chi1, truncidx))
#V = np.conj(V)
Y = V[:truncidx, :]
Y = Y.reshape((truncidx, d, chi2))
Y = Y.transpose((1,0,2))
#S = s[:truncidx]
truncerror = npla.norm(s[truncidx:])
# if truncerror > 1E-8:
# print("Warning - large truncation error: ", truncerror)
return X, s[:truncidx], Y, truncerror
def newsvd(th, err=1E-12, minlam=1E-13, maxchi=None, normalize=True):
"""Computes the singular value decomposition of the input matrix
'th', which should be the (chi*d x chi*d) result of a contraction
between two nodes (and, possibly, a two-site operator).
"""
d1, chi1, d2, chi2 = th.shape
newshp = (d1*chi1, d2*chi2)
th = th.reshape(newshp)
if maxchi is None:
maxchi = max(newshp)
US = svd.fit_transform(th)
S = svd.singular_values_
V = svd.components_
#We still need to truncate any poorly conditioned values
toosmall = S<minlam
if toosmall[0] == True:
raise ValueError("No nonzero singular values!")
truncidx = np.argmax(toosmall)
if truncidx == 0:
truncidx = len(S)
#reshape the dimension which is shared with S
XS = US[:, :truncidx]#.reshape((d1, chi1, truncidx))
s = S[:truncidx]
X = XS*(1./s)#.reshape((d1,chi1,truncidx))
X = X.reshape((d1,chi1,truncidx))
Y = V[:truncidx, :].reshape((truncidx, d2, chi2))
Y = np.transpose(Y, (1,0,2))
error = 1.0-np.sum(svd.explained_variance_ratio_)
if error > err:
print("Warning - large error: ", error)
if normalize:
s /= npla.norm(s)
return X, Y, s, truncidx
def makechis(d, N, maxchi):
"""Create the vector of chis for the chain.
This is a length-N+1 list of exponents of d. The exponents are of
the form
[0, 1, 2, 1, 0] (if N+1 is odd)
[0, 1, 2, 2, 1, 0] (if N+1 is even)
Any numbers in the above exceeding ln(maxchi) / ln(d) are replaced
with maxchi.
"""
last = N
maxexp = int(np.log(maxchi) // np.log(d))
exp = list(range(0, (last+1)//2))
reverse = exp[::-1]
if last % 2 != 1:
exp = exp + [last//2]
exp = exp + reverse
for i in range(0, len(exp)):
if exp[i] > maxexp:
exp[i] = maxexp
chis = np.power(d, exp, dtype=int)
return chis
def randommps(d, n, maxchi, minlam=1E-13):
chis = makechis(d, n, maxchi)
lams = []
for i in range(len(chis)):
lamshape = (chis[i])
thislam = utils.random_rng(lamshape, 0., 1.)
thislam /= npla.norm(thislam)
| |
or array_like): Slippage in percentage of price.
Will broadcast. Note that 0.01 = 1%.
min_size (float or array_like): Minimum size for an order to be accepted.
Will broadcast.
max_size (float or array_like): Maximum size for an order.
Will broadcast.
Will be partially filled if exceeded. You might not be able to properly close
the position if accumulation is enabled and `max_size` is too low.
reject_prob (float or array_like): Order rejection probability.
Will broadcast.
lock_cash (bool or array_like): Whether to lock cash when shorting.
Will broadcast.
allow_partial (bool or array_like): Whether to allow partial fills.
Will broadcast.
Does not apply when size is `np.inf`.
raise_reject (bool or array_like): Whether to raise an exception if order gets rejected.
Will broadcast.
log (bool or array_like): Whether to log orders.
Will broadcast.
accumulate (bool or array_like): Whether to accumulate signals.
Will broadcast.
Behaves similarly to `Portfolio.from_orders`.
conflict_mode (ConflictMode or array_like): See `vectorbt.portfolio.enums.ConflictMode`.
Will broadcast.
close_first (bool or array_like): Whether to close the position first before reversal.
Will broadcast.
Otherwise reverses the position with a single order and within the same tick.
Takes only effect under `Direction.All`. Requires a second signal to enter
the opposite position. This allows to define parameters such as `fixed_fees` for long
and short positions separately.
val_price (array_like of float): Asset valuation price.
See `val_price` in `Portfolio.from_orders`.
init_cash (InitCashMode, float or array_like of float): Initial capital.
See `init_cash` in `Portfolio.from_orders`.
cash_sharing (bool): Whether to share cash within the same group.
See `cash_sharing` in `Portfolio.from_orders`.
call_seq (CallSeqType or array_like of int): Default sequence of calls per row and group.
See `call_seq` in `Portfolio.from_orders`.
update_value (bool): Whether to update group value after each filled order.
max_orders (int): Size of the order records array.
See `max_orders` in `Portfolio.from_orders`.
max_logs (int): Size of the log records array.
See `max_logs` in `Portfolio.from_orders`.
seed (int): Seed to be set for both `call_seq` and at the beginning of the simulation.
group_by (any): Group columns. See `vectorbt.base.column_grouper.ColumnGrouper`.
broadcast_kwargs (dict): Keyword arguments passed to `vectorbt.base.reshape_fns.broadcast`.
wrapper_kwargs (dict): Keyword arguments passed to `vectorbt.base.array_wrapper.ArrayWrapper`.
freq (any): Index frequency in case `close.index` is not datetime-like.
**kwargs: Keyword arguments passed to the `__init__` method.
All broadcastable arguments will broadcast using `vectorbt.base.reshape_fns.broadcast`
but keep original shape to utilize flexible indexing and to save memory.
For defaults, see `portfolio` in `vectorbt._settings.settings`.
!!! hint
If you generated signals using close price, don't forget to shift your signals by one tick
forward, for example, with `signals.vbt.fshift(1)`. In general, make sure to use a price
that comes after the signal.
Also see notes and hints for `Portfolio.from_orders`.
## Example
Entry opens long, exit closes long:
```python-repl
>>> import pandas as pd
>>> import vectorbt as vbt
>>> close = pd.Series([1, 2, 3, 4, 5])
>>> entries = pd.Series([True, True, True, False, False])
>>> exits = pd.Series([False, False, True, True, True])
>>> portfolio = vbt.Portfolio.from_signals(
... close, entries, exits, size=1., direction='longonly')
>>> portfolio.asset_flow()
0 1.0
1 0.0
2 0.0
3 -1.0
4 0.0
dtype: float64
```
Entry opens short, exit closes short:
```python-repl
>>> portfolio = vbt.Portfolio.from_signals(
... close, entries, exits, size=1., direction='shortonly')
>>> portfolio.asset_flow()
0 -1.0
1 0.0
2 0.0
3 1.0
4 0.0
dtype: float64
```
Reversal within one tick. Entry opens long and closes short, exit closes long and opens short:
```python-repl
>>> portfolio = vbt.Portfolio.from_signals(
... close, entries, exits, size=1., direction='all')
>>> portfolio.asset_flow()
0 1.0
1 0.0
2 0.0
3 -2.0
4 0.0
dtype: float64
```
Reversal within two ticks. First signal closes position, second signal opens the opposite one:
```python-repl
>>> portfolio = vbt.Portfolio.from_signals(
... close, entries, exits, size=1., direction='all',
... close_first=True)
>>> portfolio.asset_flow()
0 1.0
1 0.0
2 0.0
3 -1.0
4 -1.0
dtype: float64
```
If entry and exit, chooses exit:
```python-repl
>>> portfolio = vbt.Portfolio.from_signals(
... close, entries, exits, size=1., direction='all',
... close_first=True, conflict_mode='exit')
>>> portfolio.asset_flow()
0 1.0
1 0.0
2 -1.0
3 -1.0
4 0.0
dtype: float64
```
Entry means long order, exit means short order (acts similar to `from_orders`):
```python-repl
>>> portfolio = vbt.Portfolio.from_signals(
... close, entries, exits, size=1., direction='all',
... accumulate=True)
>>> portfolio.asset_flow()
0 1.0
1 1.0
2 0.0
3 -1.0
4 -1.0
dtype: float64
```
Testing multiple parameters (via broadcasting):
```python-repl
>>> from vectorbt.portfolio.enums import Direction
>>> portfolio = vbt.Portfolio.from_signals(
... close, entries, exits, direction=[list(Direction)],
... broadcast_kwargs=dict(columns_from=Direction._fields))
>>> portfolio.asset_flow()
Long Short All
0 100.0 -100.0 100.0
1 0.0 0.0 0.0
2 0.0 0.0 0.0
3 -100.0 50.0 -200.0
4 0.0 0.0 0.0
```
Specifying information in a more granular way thanks to broadcasting.
Reverse the first long position by first closing it, and all other immediately:
```python-repl
>>> entries = pd.Series([True, False, False, True, False])
>>> exits = pd.Series([False, True, True, False, True])
>>> close_first = pd.Series([False, True, False, False, False])
>>> portfolio = vbt.Portfolio.from_signals(
... close, entries, exits, size=1., direction='all',
... close_first=close_first)
>>> portfolio.asset_flow()
0 1.0
1 -1.0
2 -1.0
3 2.0
4 -2.0
dtype: float64
```
Combine multiple exit conditions. Exit early if the price hits some threshold before an actual exit:
```python-repl
>>> close = pd.Series([10, 11, 12, 13, 14, 15])
>>> entries = pd.Series([True, True, True, False, False, False])
>>> exits = pd.Series([False, False, False, True, True, True])
>>> # 1. Remove adjacent entries and exits
>>> # since stop condition refers only to the first signal
>>> entries, exits = entries.vbt.signals.clean(exits)
>>> entries
0 True
1 False
2 False
3 False
4 False
5 False
dtype: bool
>>> exits
0 False
1 False
2 False
3 True
4 False
5 False
dtype: bool
>>> # 2. Find stop exits
>>> stop_exits = entries.vbt.signals.generate_stop_exits(close, 0.1)
>>> stop_exits
0 False
1 True
2 False
3 False
4 False
5 False
dtype: bool
>>> # 3. Combine exits
>>> exits = exits | stop_exits
>>> exits
0 False
1 True
2 False
3 True
4 False
5 False
dtype: bool
>>> # 4. Pick the first exit after each entry
>>> exits = exits.vbt.signals.first(reset_by=entries, allow_gaps=True)
>>> exits
0 False
1 True
2 False
3 False
4 False
5 False
dtype: bool
>>> # 5. Simulate portfolio
>>> portfolio = vbt.Portfolio.from_signals(close, entries, exits)
>>> portfolio.asset_flow()
0 10.0
1 -10.0
2 0.0
3 0.0
4 0.0
5 0.0
dtype: float64
```
!!! note
By cleaning signals, you lose information. Moreover, this automatically assumes
that each entry/signal signal succeeds (= order gets filled). Use this with caution,
and consider rewriting your strategy with `Portfolio.from_order_func`, which is a
preferred way of defining complex logic in vectorbt.
"""
# Get defaults
from vectorbt._settings import settings
portfolio_cfg = settings['portfolio']
if size is None:
size = portfolio_cfg['size']
if size_type is None:
size_type = portfolio_cfg['signal_size_type']
size_type = prepare_enum_value(SizeType, size_type)
if direction is None:
direction = portfolio_cfg['signal_direction']
direction = prepare_enum_value(Direction, direction)
if price is None:
price = close
if fees is None:
fees = portfolio_cfg['fees']
if fixed_fees is None:
fixed_fees = portfolio_cfg['fixed_fees']
if slippage is None:
slippage = portfolio_cfg['slippage']
if min_size is None:
min_size = portfolio_cfg['min_size']
if max_size is None:
max_size = portfolio_cfg['max_size']
if reject_prob is None:
reject_prob = portfolio_cfg['reject_prob']
if lock_cash is None:
lock_cash = portfolio_cfg['lock_cash']
if allow_partial is None:
allow_partial = portfolio_cfg['allow_partial']
if raise_reject is None:
raise_reject = portfolio_cfg['raise_reject']
if log is None:
log = portfolio_cfg['log']
if accumulate is None:
accumulate = portfolio_cfg['accumulate']
if conflict_mode is None:
conflict_mode = portfolio_cfg['conflict_mode']
conflict_mode = prepare_enum_value(ConflictMode, conflict_mode)
if close_first is None:
close_first = portfolio_cfg['close_first']
if val_price is None:
val_price = price
if init_cash is None:
init_cash = portfolio_cfg['init_cash']
init_cash = prepare_enum_value(InitCashMode, init_cash)
if isinstance(init_cash, int) and init_cash in InitCashMode:
init_cash_mode = init_cash
init_cash = np.inf
else:
init_cash_mode = None
if cash_sharing is None:
cash_sharing = portfolio_cfg['cash_sharing']
if call_seq is None:
call_seq = portfolio_cfg['call_seq']
call_seq = prepare_enum_value(CallSeqType, call_seq)
auto_call_seq = False
if isinstance(call_seq, int):
if call_seq == CallSeqType.Auto:
call_seq = CallSeqType.Default
auto_call_seq = True
if update_value is None:
update_value = portfolio_cfg['update_value']
if seed is None:
seed = | |
<reponame>Bruce979527682/ProjectHelper<gh_stars>0
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import xlrd # 读
import xlwt # 写
import math
import datetime
import os
def GetDesktopPath():
return os.path.join(os.path.expanduser("~"), 'Desktop')
deskPath = GetDesktopPath()
cdate = datetime.datetime.now().strftime('%Y-%m-%d')
filepath = deskPath + '\\' +cdate
isExists=os.path.exists(filepath)
if not isExists:
os.makedirs(filepath)
workbook = xlrd.open_workbook(r''+deskPath+'\\shop.xlsx')
excels = workbook.sheet_by_index(0)
tables = {}
tnames = {}
key = ''
for rows in excels._cell_values:
if rows != excels._cell_values[0]:
if rows[0] == '':
tables[key].append(rows)
else:
key = rows[1]
tnames.setdefault(key, rows[0])
tables.setdefault(key, [rows])
i = 0
def entity(rows, key):
sb = []
sb.append('using SqlSugar;'+'\n')
sb.append('using System;'+'\n')
sb.append(''+'\n')
sb.append('namespace Entity.Api'+'\n')
sb.append('{'+'\n')
sb.append(' /// <summary>'+'\n')
sb.append(' /// ' + tnames[key]+'表\n')
sb.append(' /// </summary>'+'\n')
sb.append(' public class ' + key + '\n')
sb.append(' {'+'\n')
for row in rows:
sb.append(' /// <summary>'+'\n')
sb.append(' /// ' + row[2]+'\n')
sb.append(' /// </summary>'+'\n')
if row[6] == 'y' and row[7] == 'y':
sb.append(' [SugarColumn(IsPrimaryKey = true, IsIdentity = true)]'+'\n')
elif row[6] == 'y':
sb.append(' [SugarColumn(IsPrimaryKey = true)]'+'\n')
elif row[7] == 'y':
sb.append(' [SugarColumn(IsIdentity = true)]'+'\n')
if row[11] != '':
if str(row[4]) == 'varchar':
sb.append(' public string ' + str(row[3]) + ' { get; set; } = ' + str(row[11]) + ';'+'\n')
elif str(row[4]) == 'datetime':
sb.append(' public DateTime ' + str(row[3]) + ' { get; set; } = ' + str(row[11]) + ';'+'\n')
elif str(row[4]) == 'text':
sb.append(' public string ' + str(row[3]) + ' { get; set; } = ' + str(row[11]) + ';'+'\n')
elif str(row[4]) == 'int':
sb.append(' public int ' + str(row[3]) + ' { get; set; } = ' + str(int(row[11])) + ';'+'\n')
else:
sb.append(' public ' + str(row[4]) + ' ' + str(row[3]) + ' { get; set; } = ' + str(row[11]) + ';'+'\n')
else:
if str(row[4]) == 'varchar':
sb.append(' public string ' + str(row[3]) + ' { get; set; }'+'\n')
elif str(row[4]) == 'datetime':
sb.append(' public DateTime ' + str(row[3]) + ' { get; set; }'+'\n')
elif str(row[4]) == 'text':
sb.append(' public string ' + str(row[3]) + ' { get; set; }'+'\n')
elif str(row[4]) == 'int':
sb.append(' public int ' + str(row[3]) + ' { get; set; }'+'\n')
else:
sb.append(' public ' + str(row[4]) + ' ' + str(row[3]) + ' { get; set; }'+'\n')
sb.append(''+'\n')
sb.append(' #region 扩展字段'+'\n')
sb.append(''+'\n')
sb.append(' //[SugarColumn(IsIgnore = true)]'+'\n')
sb.append(' //public string AddTimeStr { get; set; }'+'\n')
sb.append(''+'\n')
sb.append(' #endregion 扩展字段'+'\n')
sb.append(''+'\n')
sb.append(' }'+'\n')
sb.append('}'+'\n')
fout = open(filepath + '/'+key+'.cs', "w", encoding='utf-8')
fout.writelines(sb)
fout.close()
def bll(rows, key):
sb = []
sb.append('using Entity.Api;'+'\n')
sb.append('using System.Collections.Generic;'+'\n')
sb.append(''+'\n')
sb.append('namespace BLL.Api'+'\n')
sb.append('{'+'\n')
sb.append(' /// <summary>'+'\n')
sb.append(' /// ' + tnames[key]+'表BLL\n')
sb.append(' /// </summary>'+'\n')
sb.append(' public class ' + key + 'BLL: BaseSqlSugar<' + key + '>\n')
sb.append(' {'+'\n')
sb.append(' private string _cacheKey = "'+key+'_{0}";'+'\n')
sb.append(' #region 单例'+'\n')
sb.append(' private static ' + key + 'BLL _singleton;'+'\n')
sb.append(' private static readonly object LockObject = new object();'+'\n')
sb.append(' '+'\n')
sb.append(' private ' + key + 'BLL()'+'\n')
sb.append(' {'+'\n')
sb.append(' '+'\n')
sb.append(' }'+'\n')
sb.append(' '+'\n')
sb.append(' public static ' + key + 'BLL Singleton'+'\n')
sb.append(' {'+'\n')
sb.append(' get'+'\n')
sb.append(' {'+'\n')
sb.append(' if (_singleton == null)'+'\n')
sb.append(' {'+'\n')
sb.append(' lock (LockObject)'+'\n')
sb.append(' {'+'\n')
sb.append(' if (_singleton == null)'+'\n')
sb.append(' {'+'\n')
sb.append(' _singleton = new ' + key + 'BLL();'+'\n')
sb.append(' }'+'\n')
sb.append(' }'+'\n')
sb.append(' }'+'\n')
sb.append(' return _singleton;'+'\n')
sb.append(' }'+'\n')
sb.append(' }'+'\n')
sb.append(' #endregion 单例'+'\n')
sb.append(' '+'\n')
sb.append(' /// <summary>'+'\n')
sb.append(' /// 获取' + tnames[key] +'数据'+'\n')
sb.append(' /// </summary>'+'\n')
sb.append(' /// <param name="cityInfoId">同城ID</param>'+'\n')
sb.append(' /// <param name="pageIndex"></param>'+'\n')
sb.append(' /// <param name="pageSize"></param>'+'\n')
sb.append(' /// <returns></returns>'+'\n')
sb.append(' public List<' + key + '> Get' + key + 'List(int pageIndex = 1, int pageSize = 10)'+'\n')
sb.append(' {'+'\n')
sb.append(' var where = $"Status<>-4";'+'\n')
sb.append(' var totalCount = 0;'+'\n')
sb.append(' return GetList(where, ref totalCount, pageIndex, pageSize);'+'\n')
sb.append(' }'+'\n')
sb.append(' '+'\n')
sb.append(' /// <summary>'+'\n')
sb.append(' /// 修改' + tnames[key] +'状态'+'\n')
sb.append(' /// </summary>'+'\n')
sb.append(' /// <param name="cityInfoId">同城ID</param>'+'\n')
sb.append(' /// <param name="id"></param>'+'\n')
sb.append(' /// <param name="status">状态</param>'+'\n')
sb.append(' /// <returns></returns>'+'\n')
sb.append(' public bool UpdateStatus(int id, int status)'+'\n')
sb.append(' {'+'\n')
sb.append(' var model = GetModel(id);'+'\n')
sb.append(' if (model != null)'+'\n')
sb.append(' {'+'\n')
sb.append(' model.Status = status;'+'\n')
sb.append(' return Update(model, new string[] { "Status" });'+'\n')
sb.append(' }'+'\n')
sb.append(' return false;'+'\n')
sb.append(' }'+'\n')
sb.append(''+'\n')
sb.append(''+'\n')
sb.append(' }'+'\n')
sb.append('}'+'\n')
fout = open(filepath + '/'+key+'BLL.cs', "w", encoding='utf-8')
fout.writelines(sb)
fout.close()
def database(rows):
sb = []
sb.append('CREATE TABLE `' + key + '` ('+'\n')
for row in rows:
if row[4] == 'int' or row[4] == 'varchar':
if row[4] == 'int' and row[7] == 'y':
sb.append(' `' + row[3] + '` ' + row[4] + '(' + str(int(row[5])) + ') ' + (
'NOT NULL' if row[9] == 'y' else '') + ' ' + ('AUTO_INCREMENT' if row[7] == 'y' else '')+' COMMENT \''+ row[2] +'\',' + '\n')
else:
sb.append(' `' + row[3] + '` ' + row[4] + '(' + str(int(row[5])) + ') ' + (
'NOT NULL' if row[9] == 'y' else '') + 'COMMENT \''+ row[2] +'\',' + '\n')
else:
sb.append(' `' + row[3] + '` ' + row[4] + ' ' +
('NOT NULL' if row[9] == 'y' else '') + 'COMMENT \''+ row[2] +'\','+'\n')
pkeycol = list(filter(lambda x: x[6] == 'y', rows))
indexcol = list(filter(lambda x: x[8] == 'y', rows))
ukeycol = list(filter(lambda x: x[10] == 'y', rows))
if pkeycol != None and len(pkeycol) > 0:
if len(indexcol) > 0:
sb.append(' PRIMARY KEY (`' + pkeycol[0][3] + '`),'+'\n')
else:
sb.append(' PRIMARY KEY (`' + pkeycol[0][3] + '`)'+'\n')
else:
print( rows[0][0] + 'PRIMARY KEY null')
ikey = []
for ic in indexcol:
ikey.append(' `' + ic[3] + '`')
ukey = []
for uc in ukeycol:
ukey.append(' `' + uc[3] + '`')
if ukeycol != None:
sb.append(' UNIQUE KEY `Key_Unique` (' + str(ukey).replace('[', '').replace(']', '').replace("'", '')+') USING BTREE,'+'\n')
sb.append(' KEY `Key_Index` (' + str(ikey).replace('[', '').replace(']', '').replace("'", '')+')'+'\n')
sb.append(') ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;'+'\n')
fout = open(filepath + '/'+key+'-database.txt', "w", encoding='utf-8')
fout.writelines(sb)
fout.close()
def htmllist(rows, key):
sb = []
sb.append('<template>'+'\n')
sb.append(' <div :style="{padding:"15px"}">'+'\n')
sb.append(' <div style="margin:20px 0 10px;">'+'\n')
sb.append(' <el-button type="primary" @click="editClick(0)">新增'+ tnames[key] +'</el-button>'+'\n')
sb.append(' </div>'+'\n')
sb.append(' <div>'+'\n')
sb.append(' <el-table :data="list" border style="width: 100%">'+'\n')
for row in rows:
sb.append(' <el-table-column prop="' + str(row[3]) + '" label="' + str(row[2]) + '" width="120"></el-table-column>'+'\n')
sb.append(' <el-table-column fixed="right" label="操作" width="100">'+'\n')
sb.append(' <template slot-scope="scope">'+'\n')
sb.append(' <el-button type="text" size="small" @click="editClick(1,scope.row)">编辑</el-button>'+'\n')
sb.append(' <el-button type="text" size="small" @click="delService(scope.row.Id)">删除</el-button>'+'\n')
sb.append(' </template>'+'\n')
sb.append(' </el-table-column>'+'\n')
sb.append(' </el-table>'+'\n')
sb.append(' <el-pagination'+'\n')
sb.append(' layout="prev, pager, next"'+'\n')
sb.append(' :current-page="query.pageIndex"'+'\n')
sb.append(' :page-size="query.pageSize"'+'\n')
sb.append(' :total="count"'+'\n')
sb.append(' ></el-pagination>'+'\n')
sb.append(' <el-dialog title="添加修改'+ tnames[key] +'" :visible.sync="dialogVisible" width="30%" center>'+'\n')
sb.append(' <div class="div_box">'+'\n')
sb.append(' <el-form ref="elform" :model="editModel" label-width="120px" @submit.native.prevent>'+'\n')
for row in rows:
sb.append(' <el-form-item label="' + str(row[2]) + '">'+'\n')
sb.append(' <el-input v-model="editModel.' + str(row[3]) + '"></el-input>'+'\n')
sb.append(' </el-form-item>'+'\n')
sb.append(' </el-form>'+'\n')
sb.append(' </div>'+'\n')
sb.append(' <span slot="footer" class="dialog-footer">'+'\n')
sb.append(' <el-button @click="dialogVisible = false">取 消</el-button>'+'\n')
sb.append(' <el-button type="primary" @click="saveModel()">确 定</el-button>'+'\n')
sb.append(' </span>'+'\n')
sb.append(' </el-dialog>'+'\n')
sb.append(' </div>'+'\n')
sb.append(' </div>'+'\n')
sb.append('</template>'+'\n')
sb.append(''+'\n')
sb.append('<script>'+'\n')
sb.append('export default {'+'\n')
sb.append(' components: {},'+'\n')
sb.append(' props: {},'+'\n')
sb.append(' data() {'+'\n')
sb.append(' return {'+'\n')
sb.append(' query: {'+'\n')
sb.append(' pageIndex: 1,'+'\n')
sb.append(' pageSize: 10,'+'\n')
sb.append(' },'+'\n')
sb.append(' list: [],'+'\n')
sb.append(' count: 0,'+'\n')
sb.append(' dialogVisible: false,'+'\n')
sb.append(' editModel: {'+'\n')
for row in rows:
sb.append(' ' + str(row[3]) + ': "",'+'\n')
sb.append(' },'+'\n')
sb.append(' typeList: ['+'\n')
sb.append(' { Id: 0, Name: "网页类型" },'+'\n')
sb.append(' { Id: 1, Name: "接口类型" },'+'\n')
sb.append(' ],'+'\n')
sb.append(' };'+'\n')
sb.append(' },'+'\n')
sb.append(' watch: {},'+'\n')
sb.append(' computed: {},'+'\n')
sb.append(' methods: {'+'\n')
sb.append(' loadData() {'+'\n')
sb.append(' var self = this;'+'\n')
sb.append(' this.$HttpGet("/Admin/Get' + key + 'List", self.query)'+'\n')
sb.append(' .then(function (response) {'+'\n')
sb.append(' if (response.data.code === 1) {'+'\n')
sb.append(' self.list = response.data.obj.list;'+'\n')
sb.append(' self.count = response.data.obj.count;'+'\n')
sb.append(' } else {'+'\n')
sb.append(' self.$message(response.data.msg);'+'\n')
sb.append(' }'+'\n')
sb.append(' })'+'\n')
sb.append(' .catch(function (error) {'+'\n')
sb.append(' console.log(error);'+'\n')
sb.append(' });'+'\n')
sb.append(' },'+'\n')
sb.append(' saveModel() {'+'\n')
sb.append(' var self = this;'+'\n')
sb.append(' this.$HttpPostJson("/Admin/Save' + key + '", this.editModel)'+'\n')
sb.append(' .then(function (response) {'+'\n')
sb.append(' if (response.data.code == 1) {'+'\n')
sb.append(' self.$message(response.data.msg);'+'\n')
sb.append(' self.loadData();'+'\n')
sb.append(' } else {'+'\n')
sb.append(' self.$message(response.data.msg);'+'\n')
sb.append(' }'+'\n')
sb.append(' })'+'\n')
sb.append(' .catch(function (error) {'+'\n')
sb.append(' console.log(error);'+'\n')
sb.append(' });'+'\n')
sb.append(' self.dialogVisible = false;'+'\n')
sb.append(' },'+'\n')
sb.append(' editClick(type, row) {'+'\n')
sb.append(' if (type == | |
"""
This file contains classes and functions for representing, solving, and simulating
agents who must allocate their resources among consumption, saving in a risk-free
asset (with a low return), and saving in a risky asset (with higher average return).
"""
import numpy as np
from copy import deepcopy
from HARK import MetricObject, NullFunc # Basic HARK features
from HARK.ConsumptionSaving.ConsIndShockModel import (
utility, # CRRA utility function
utility_inv, # Inverse CRRA utility function
utilityP, # CRRA marginal utility function
utilityP_inv, # Inverse CRRA marginal utility function
init_lifecycle,
)
from HARK.ConsumptionSaving.ConsRiskyAssetModel import (
RiskyAssetConsumerType,
risky_asset_parms,
init_risky_asset,
)
from HARK.distribution import calc_expectation
from HARK.interpolation import (
LinearInterp, # Piecewise linear interpolation
BilinearInterp, # 2D interpolator
TrilinearInterp, # 3D interpolator
ConstantFunction, # Interpolator-like class that returns constant value
IdentityFunction, # Interpolator-like class that returns one of its arguments
ValueFuncCRRA,
MargValueFuncCRRA,
DiscreteInterp,
)
from HARK.utilities import make_grid_exp_mult
class RiskyContribConsumerType(RiskyAssetConsumerType):
"""
A consumer type with idiosyncratic shocks to permanent and transitory income,
who can save in both a risk-free and a risky asset but faces frictions to
moving funds between them. The agent can only consume out of his risk-free
asset.
The frictions are:
- A proportional tax on funds moved from the risky to the risk-free
asset.
- A stochastic inability to move funds between his accounts.
To partially avoid the second friction, the agent can commit to have a
fraction of his labor income, which is usually deposited in his risk-free
account, diverted to his risky account. He can change this fraction
only in periods where he is able to move funds between accounts.
"""
time_inv_ = deepcopy(RiskyAssetConsumerType.time_inv_)
time_inv_ = time_inv_ + ["DiscreteShareBool"]
# The new state variables (over those in ConsIndShock) are:
# - nMrm: start-of-period risky resources.
# - mNrmTilde: post-rebalancing risk-free resources.
# - nNrmTilde: post-rebalancing risky resources.
# - Share: income-deduction share.
# For details, see
# https://github.com/Mv77/RiskyContrib/blob/main/RiskyContrib.pdf
state_vars = RiskyAssetConsumerType.state_vars + [
"gNrm",
"nNrm",
"mNrmTilde",
"nNrmTilde",
"Share",
]
shock_vars_ = RiskyAssetConsumerType.shock_vars_
def __init__(self, verbose=False, quiet=False, **kwds):
params = init_risky_contrib.copy()
params.update(kwds)
kwds = params
# Initialize a basic consumer type
RiskyAssetConsumerType.__init__(self, verbose=verbose, quiet=quiet, **kwds)
# The model is solved and simulated spliting each of the agent's
# decisions into its own "stage". The stages in chronological order
# are
# - Reb: asset-rebalancing stage.
# - Sha: definition of the income contribution share.
# - Cns: consumption stage.
self.stages = ["Reb", "Sha", "Cns"]
# Each stage has its own states and controls, and its methods
# to find them.
self.get_states = {
"Reb": self.get_states_Reb,
"Sha": self.get_states_Sha,
"Cns": self.get_states_Cns,
}
self.get_controls = {
"Reb": self.get_controls_Reb,
"Sha": self.get_controls_Sha,
"Cns": self.get_controls_Cns,
}
# Set the solver for the portfolio model, and update various constructed attributes
self.solve_one_period = solveRiskyContrib
self.update()
def pre_solve(self):
self.update_solution_terminal()
def update(self):
RiskyAssetConsumerType.update(self)
self.update_share_grid()
self.update_dfrac_grid()
self.update_nNrm_grid()
self.update_mNrm_grid()
self.update_tau()
def update_solution_terminal(self):
"""
Solves the terminal period. The solution is trivial.
Cns: agent will consume all of his liquid resources.
Sha: irrelevant as there is no "next" period.
Reb: agent will shift all of his resources to the risk-free asset.
Parameters
----------
None
Returns
-------
None
"""
# Construct the terminal solution backwards.
# Start with the consumption stage. All liquid resources are consumed.
cFunc_term = IdentityFunction(i_dim=0, n_dims=3)
vFunc_Cns_term = ValueFuncCRRA(cFunc_term, CRRA=self.CRRA)
# Marginal values
dvdmFunc_Cns_term = MargValueFuncCRRA(cFunc_term, CRRA=self.CRRA)
dvdnFunc_Cns_term = ConstantFunction(0.0)
dvdsFunc_Cns_term = ConstantFunction(0.0)
Cns_stage_sol = RiskyContribCnsSolution(
# Consumption stage
vFunc=vFunc_Cns_term,
cFunc=cFunc_term,
dvdmFunc=dvdmFunc_Cns_term,
dvdnFunc=dvdnFunc_Cns_term,
dvdsFunc=dvdsFunc_Cns_term,
)
# Share stage
# It's irrelevant because there is no future period. Set share to 0.
# Create a dummy 2-d consumption function to get value function and marginal
c2d = IdentityFunction(i_dim=0, n_dims=2)
Sha_stage_sol = RiskyContribShaSolution(
# Adjust
vFunc_Adj=ValueFuncCRRA(c2d, CRRA=self.CRRA),
ShareFunc_Adj=ConstantFunction(0.0),
dvdmFunc_Adj=MargValueFuncCRRA(c2d, CRRA=self.CRRA),
dvdnFunc_Adj=ConstantFunction(0.0),
# Fixed
vFunc_Fxd=vFunc_Cns_term,
ShareFunc_Fxd=IdentityFunction(i_dim=2, n_dims=3),
dvdmFunc_Fxd=dvdmFunc_Cns_term,
dvdnFunc_Fxd=dvdnFunc_Cns_term,
dvdsFunc_Fxd=dvdsFunc_Cns_term,
)
# Rebalancing stage
# Adjusting agent:
# Withdraw everything from the pension fund and consume everything
dfracFunc_Adj_term = ConstantFunction(-1.0)
# Find the withdrawal penalty. If it is time-varying, assume it takes
# the same value as in the last non-terminal period
if type(self.tau) is list:
tau = self.tau[-1]
else:
tau = self.tau
# Value and marginal value function of the adjusting agent
vFunc_Reb_Adj_term = ValueFuncCRRA(lambda m, n: m + n / (1 + tau), self.CRRA)
dvdmFunc_Reb_Adj_term = MargValueFuncCRRA(
lambda m, n: m + n / (1 + tau), self.CRRA
)
# A marginal unit of n will be withdrawn and put into m. Then consumed.
dvdnFunc_Reb_Adj_term = lambda m, n: dvdmFunc_Reb_Adj_term(m, n) / (1 + tau)
Reb_stage_sol = RiskyContribRebSolution(
# Rebalancing stage
vFunc_Adj=vFunc_Reb_Adj_term,
dfracFunc_Adj=dfracFunc_Adj_term,
dvdmFunc_Adj=dvdmFunc_Reb_Adj_term,
dvdnFunc_Adj=dvdnFunc_Reb_Adj_term,
# Adjusting stage
vFunc_Fxd=vFunc_Cns_term,
dfracFunc_Fxd=ConstantFunction(0.0),
dvdmFunc_Fxd=dvdmFunc_Cns_term,
dvdnFunc_Fxd=dvdnFunc_Cns_term,
dvdsFunc_Fxd=dvdsFunc_Cns_term,
)
# Construct the terminal period solution
self.solution_terminal = RiskyContribSolution(
Reb_stage_sol, Sha_stage_sol, Cns_stage_sol
)
def update_tau(self):
"""
Checks that the tax rate on risky-to-risk-free flows has the appropriate
length adds it to time_(in)vary
Returns
-------
None.
"""
if type(self.tau) is list and (len(self.tau) == self.T_cycle):
self.add_to_time_vary("tau")
elif type(self.tau) is list:
raise AttributeError(
"If tau is time-varying, it must have length of T_cycle!"
)
else:
self.add_to_time_inv("tau")
def update_share_grid(self):
"""
Creates grid for the income contribution share.
Parameters
----------
None
Returns
-------
None
"""
self.ShareGrid = np.linspace(0.0, self.ShareMax, self.ShareCount)
self.add_to_time_inv("ShareGrid")
def update_dfrac_grid(self):
"""
Creates grid for the rebalancing flow between assets. This flow is
normalized as a ratio.
- If d > 0, d*mNrm flows towards the risky asset.
- If d < 0, d*nNrm (pre-tax) flows towards the risk-free asset.
Returns
-------
None.
"""
self.dfracGrid = np.linspace(0, 1, self.dCount)
self.add_to_time_inv("dfracGrid")
def update_nNrm_grid(self):
"""
Updates the agent's iliquid assets grid by constructing a
multi-exponentially spaced grid of nNrm values.
Parameters
----------
None
Returns
-------
None.
"""
# Extract parameters
nNrmMin = self.nNrmMin
nNrmMax = self.nNrmMax
nNrmCount = self.nNrmCount
exp_nest = self.nNrmNestFac
# Create grid
nNrmGrid = make_grid_exp_mult(
ming=nNrmMin, maxg=nNrmMax, ng=nNrmCount, timestonest=exp_nest
)
# Assign and set it as time invariant
self.nNrmGrid = nNrmGrid
self.add_to_time_inv("nNrmGrid")
def update_mNrm_grid(self):
"""
Updates the agent's liquid assets exogenous grid by constructing a
multi-exponentially spaced grid of mNrm values.
Parameters
----------
None
Returns
-------
None.
"""
# Extract parameters
mNrmMin = self.mNrmMin
mNrmMax = self.mNrmMax
mNrmCount = self.mNrmCount
exp_nest = self.mNrmNestFac
# Create grid
mNrmGrid = make_grid_exp_mult(
ming=mNrmMin, maxg=mNrmMax, ng=mNrmCount, timestonest=exp_nest
)
# Assign and set it as time invariant
self.mNrmGrid = mNrmGrid
self.add_to_time_inv("mNrmGrid")
def initialize_sim(self):
"""
Initialize the state of simulation attributes.
Parameters
----------
None
Returns
-------
None
"""
RiskyAssetConsumerType.initialize_sim(self)
self.state_now["Share"] = np.zeros(self.AgentCount)
def sim_birth(self, which_agents):
"""
Create new agents to replace ones who have recently died; takes draws of
initial aNrm and pLvl, as in ConsIndShockModel, then sets Share, Adjust
and post-rebalancing risky asset nNrmTilde to zero as initial values.
Parameters
----------
which_agents : np.array
Boolean array of size AgentCount indicating which agents should be "born".
Returns
-------
None
"""
RiskyAssetConsumerType.sim_birth(self, which_agents)
self.state_now["Share"][which_agents] = 0.0
self.state_now["nNrmTilde"][which_agents] = 0.0
def sim_one_period(self):
"""
Simulates one period for this type.
Has to be re-defined instead of using AgentType.sim_one_period() because
of the "stages" structure.
Parameters
----------
None
Returns
-------
None
"""
if not hasattr(self, "solution"):
raise Exception(
"Model instance does not have a solution stored. To simulate, it is necessary"
" to run the `solve()` method of the class first."
)
# Mortality adjusts the agent population
self.get_mortality() # Replace some agents with "newborns"
# Make state_now into state_prev, clearing state_now
for var in self.state_now:
self.state_prev[var] = self.state_now[var]
if isinstance(self.state_now[var], np.ndarray):
self.state_now[var] = np.empty(self.AgentCount)
else:
# Probably an aggregate variable. It may be getting set by the Market.
pass
if self.read_shocks: # If shock histories have been pre-specified, use those
self.read_shocks_from_history()
else: # Otherwise, draw shocks as usual according to subclass-specific method
self.get_shocks()
# Sequentially get states and controls of every stage
for s in self.stages:
self.get_states[s]()
self.get_controls[s]()
self.get_post_states()
# Advance time for all agents
self.t_age = self.t_age + 1 # Age all consumers by one period
self.t_cycle = self.t_cycle + 1 # Age all consumers within their cycle
self.t_cycle[
self.t_cycle == self.T_cycle
] = 0 # Resetting to zero for those who have reached | |
+ toGuess[6:]
if word[1] != "Z" and word[1] != "z" and word[2] != "Z" and word[2] != "z" and word[3] != "Z" and word[3] != "z" and word[4] != "Z" and word[4] != "z" and word[5] != "Z" and word[5] != "z" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "z" + ", "
if numberOfErrors == 0 :
print("\t___________")
print("\t| |")
print("\t|")
print("\t|")
print("\t|")
print("\t|")
print("\t|")
if numberOfErrors == 1 :
print("\t___________")
print("\t| |")
print("\t| O")
print("\t|")
print("\t|")
print("\t|")
print("\t|")
if numberOfErrors == 2 :
print("\t___________")
print("\t| |")
print("\t| O")
print("\t| |")
print("\t|")
print("\t|")
print("\t|")
print("\n\tWord: " + toGuess)
print("\tMisses: " + wrongChars)
if "_" in toGuess and not loser :
guessChar = ""
while not guessChar.isalpha() :
guessChar = input("\n---------------------------------\nEnter your letter: ")
_ = os.system('cls' if os.name=='nt' else 'clear')
if guessChar == "A" or guessChar == "a" :
if word[1] == "A" or word[1] == "a" :
toGuess = toGuess[:1] + "a" + toGuess[2:]
if word[2] == "A" or word[2] == "a" :
toGuess = toGuess[:2] + "a" + toGuess[3:]
if word[3] == "A" or word[3] == "a" :
toGuess = toGuess[:3] + "a" + toGuess[4:]
if word[4] == "A" or word[4] == "a" :
toGuess = toGuess[:4] + "a" + toGuess[5:]
if word[5] == "A" or word[5] == "a" :
toGuess = toGuess[:5] + "a" + toGuess[6:]
if word[1] != "A" and word[1] != "a" and word[2] != "A" and word[2] != "a" and word[3] != "A" and word[3] != "a" and word[4] != "A" and word[4] != "a" and word[5] != "A" and word[5] != "a" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "a" + ", "
if guessChar == "B" or guessChar == "b" :
if word[1] == "B" or word[1] == "b" :
toGuess = toGuess[:1] + "b" + toGuess[2:]
if word[2] == "B" or word[2] == "b" :
toGuess = toGuess[:2] + "b" + toGuess[3:]
if word[3] == "B" or word[3] == "b" :
toGuess = toGuess[:3] + "b" + toGuess[4:]
if word[4] == "B" or word[4] == "b" :
toGuess = toGuess[:4] + "b" + toGuess[5:]
if word[5] == "B" or word[5] == "b" :
toGuess = toGuess[:5] + "b" + toGuess[6:]
if word[1] != "B" and word[1] != "b" and word[2] != "B" and word[2] != "b" and word[3] != "B" and word[3] != "b" and word[4] != "B" and word[4] != "b" and word[5] != "B" and word[5] != "b" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "b" + ", "
if guessChar == "C" or guessChar == "c" :
if word[1] == "C" or word[1] == "c" :
toGuess = toGuess[:1] + "c" + toGuess[2:]
if word[2] == "C" or word[2] == "c" :
toGuess = toGuess[:2] + "c" + toGuess[3:]
if word[3] == "C" or word[3] == "c" :
toGuess = toGuess[:3] + "c" + toGuess[4:]
if word[4] == "C" or word[4] == "c" :
toGuess = toGuess[:4] + "c" + toGuess[5:]
if word[5] == "C" or word[5] == "c" :
toGuess = toGuess[:5] + "c" + toGuess[6:]
if word[1] != "C" and word[1] != "c" and word[2] != "C" and word[2] != "c" and word[3] != "C" and word[3] != "c" and word[4] != "C" and word[4] != "c" and word[5] != "C" and word[5] != "c" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "c" + ", "
if guessChar == "D" or guessChar == "d" :
if word[1] == "D" or word[1] == "d" :
toGuess = toGuess[:1] + "d" + toGuess[2:]
if word[2] == "D" or word[2] == "d" :
toGuess = toGuess[:2] + "d" + toGuess[3:]
if word[3] == "D" or word[3] == "d" :
toGuess = toGuess[:3] + "d" + toGuess[4:]
if word[4] == "D" or word[4] == "d" :
toGuess = toGuess[:4] + "d" + toGuess[5:]
if word[5] == "D" or word[5] == "d" :
toGuess = toGuess[:5] + "d" + toGuess[6:]
if word[1] != "D" and word[1] != "d" and word[2] != "D" and word[2] != "d" and word[3] != "D" and word[3] != "d" and word[4] != "D" and word[4] != "d" and word[5] != "D" and word[5] != "d" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "d" + ", "
if guessChar == "E" or guessChar == "e" :
if word[1] == "E" or word[1] == "e" :
toGuess = toGuess[:1] + "e" + toGuess[2:]
if word[2] == "E" or word[2] == "e" :
toGuess = toGuess[:2] + "e" + toGuess[3:]
if word[3] == "E" or word[3] == "e" :
toGuess = toGuess[:3] + "e" + toGuess[4:]
if word[4] == "E" or word[4] == "e" :
toGuess = toGuess[:4] + "e" + toGuess[5:]
if word[5] == "E" or word[5] == "e" :
toGuess = toGuess[:5] + "e" + toGuess[6:]
if word[1] != "E" and word[1] != "e" and word[2] != "E" and word[2] != "e" and word[3] != "E" and word[3] != "e" and word[4] != "E" and word[4] != "e" and word[5] != "E" and word[5] != "e" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "e" + ", "
if guessChar == "F" or guessChar == "f" :
if word[1] == "F" or word[1] == "f" :
toGuess = toGuess[:1] + "f" + toGuess[2:]
if word[2] == "F" or word[2] == "f" :
toGuess = toGuess[:2] + "f" + toGuess[3:]
if word[3] == "F" or word[3] == "f" :
toGuess = toGuess[:3] + "f" + toGuess[4:]
if word[4] == "F" or word[4] == "f" :
toGuess = toGuess[:4] + "f" + toGuess[5:]
if word[5] == "F" or word[5] == "f" :
toGuess = toGuess[:5] + "f" + toGuess[6:]
if word[1] != "F" and word[1] != "f" and word[2] != "F" and word[2] != "f" and word[3] != "F" and word[3] != "f" and word[4] != "F" and word[4] != "f" and word[5] != "F" and word[5] != "f" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "f" + ", "
if guessChar == "G" or guessChar == "g" :
if word[1] == "G" or word[1] == "g" :
toGuess = toGuess[:1] + "g" + toGuess[2:]
if word[2] == "G" or word[2] == "g" :
toGuess = toGuess[:2] + "g" + toGuess[3:]
if word[3] == "G" or word[3] == "g" :
toGuess = toGuess[:3] + "g" + toGuess[4:]
if word[4] == "G" or word[4] == "g" :
toGuess = toGuess[:4] + "g" + toGuess[5:]
if word[5] == "G" or word[5] == "g" :
toGuess = toGuess[:5] + "g" + toGuess[6:]
if word[1] != "G" and word[1] != "g" and word[2] != "G" and word[2] != "g" and word[3] != "G" and word[3] != "g" and word[4] != "G" and word[4] != "g" and word[5] != "G" and word[5] != "g" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "g" + ", "
if guessChar == "H" or guessChar == "h" :
if word[1] == "H" or word[1] == "h" :
toGuess = toGuess[:1] + "h" + toGuess[2:]
if word[2] == "H" or word[2] == "h" :
toGuess = toGuess[:2] + "h" + toGuess[3:]
if word[3] == "H" or word[3] == "h" :
toGuess = toGuess[:3] + "h" + toGuess[4:]
if word[4] == "H" or word[4] == "h" :
toGuess = toGuess[:4] + "h" + toGuess[5:]
if word[5] == "H" or word[5] == "h" :
toGuess = toGuess[:5] + "h" + toGuess[6:]
if word[1] != "H" and word[1] != "h" and word[2] != "H" and word[2] != "h" and word[3] != "H" and word[3] != "h" and word[4] != "H" and word[4] != "h" and word[5] != "H" and word[5] != "h" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "h" + ", "
if guessChar == "I" or guessChar == "i" :
if word[1] == "I" or word[1] == "i" :
toGuess = toGuess[:1] + "i" + toGuess[2:]
if word[2] == "I" or word[2] == "i" :
toGuess = toGuess[:2] + "i" + toGuess[3:]
if word[3] == "I" or word[3] == "i" :
toGuess = toGuess[:3] + "i" + toGuess[4:]
if word[4] == "I" or word[4] == "i" :
toGuess = toGuess[:4] + "i" + toGuess[5:]
if word[5] == "I" or word[5] == "i" :
toGuess = toGuess[:5] + "i" + toGuess[6:]
if word[1] != "I" and word[1] != "i" and word[2] != "I" and word[2] != "i" and word[3] != "I" and word[3] != "i" and word[4] != "I" and word[4] != "i" and word[5] != "I" and word[5] != "i" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "i" + ", "
if guessChar == "J" or guessChar == "j" :
if word[1] == "J" or word[1] == "j" :
toGuess = toGuess[:1] + | |
## Automatically adapted for numpy.oldnumeric Jul 23, 2007 by
#############################################################################
#
# Author: <NAME>
#
# Copyright: <NAME> TSRI 2000
#
#############################################################################
#
# $Header: /opt/cvs/DejaVu2/povray3.py,v 1.1.1.1 2014/06/19 19:41:03 sanner Exp $
#
# $Id: povray3.py,v 1.1.1.1 2014/06/19 19:41:03 sanner Exp $
#
"""
Povray module: driver to generate PovRay scenes form a DejaVu2 scene
The driver currently handles: camera parameters (background color, perpective parameters, position), light sources, and Spheres, IndexedPolygons and IndexedPolylines geometries.
The projection is still somewhat approximative ! still a 10 translation that is not explained :(.
"""
from opengltk.OpenGL import GL
#from opengltk.extent.utillib import glTriangleNormals
from geomutils.geomalgorithms import TriangleNormals
from DejaVu2 import viewerConst
import numpy.oldnumeric as Numeric
class PovRay:
"""Driver for Povray v3.x
"""
def __init__(self, includes = []):
"""Create a PovRay file generator class for a given camera"""
self.camera = None
self.entries = ["""
//
// Povray scene file written by Pmv version 0.1
//
// POV-Ray can be retrieved at: http://www.povray.org
// N.Guex, 1995-1999
//
"""]
for i in includes:
self.entries.append('#include "%s"\n'%i)
def write(self, filename):
"""void <- write(filename) dumps povray scene description"""
self.filename = filename
f = open(filename, 'w')
for entry in self.entries:
f.write(entry)
f.close()
def clear(self):
"""void <- clear() empties povray scene description"""
self.entries = []
def coord3(self, x, y=None, z=None):
"""string <- coord3(x, y=None, z=None) returns a povray vector string
this function inverts the z coordinates.
It can be called with a sequence of 3 or with 3 values.
"""
if y is None:
return ' <%f, %f, %f> '% (x[0],x[1],-x[2])
else:
return ' <%f, %f, %f> '% (x, y, -z)
def vec3(self, x, y=None, z=None):
"""string <- vec3(x, y=None, z=None) returns a povray vector string
this function does not invert the z coordinates.
It can be called with a sequence of 3 or with 3 values.
"""
if y is None:
return ' <%.2f, %.2f, %.2f> '% (x[0],x[1],x[2])
else:
return ' <%.2f, %.2f, %.2f> '% (x, y, z)
def vec4(self, x, y=None, z=None, t=None):
"""string <- vec4(x, y=None, z=None, t=None) returns a povray 4-vector
this function does not invert the z coordinates.
It can be called with a sequence of 3 and a y-value or with 4 values.
"""
if z is None:
return ' <%.2f, %.2f, %.2f, %.2f> '% (x[0],x[1],x[2],y)
else:
return ' <%.2f, %.2f, %.2f, %.2f> '% (x, y, z, t)
def matrix(self, mat):
"""string <- matrix(mat) returns a 4x4 matrix as a povray matrix"""
str = 'matrix < %f, %f, %f, %f, %f, %f, %f, %f, %f, %f, %f, %f >\n' % (
mat[0][0], mat[0][1], mat[0][2],
mat[1][0], mat[1][1], mat[1][2],
mat[2][0], mat[2][1], mat[2][2],
mat[0][3], mat[1][3], mat[2][3] )
return str
def addCamera(self, camera, scaleLight=2.0):
"""void <- addCamera(camera) adds the given camera to the povray scene
handles: camera background color
projection matrix (only perspective currently
light sources
"""
# doesn't handle camera transformation yet
str = 'background { color rgb ' + self.vec3(camera.backgroundColor)
str = str + '}\n\n'
self.entries.append( str )
str = 'camera { perspective'
# I can't remember why I have to move 10 more than direction vector
# but it seems familiar
str = str + '\n location' + self.coord3(camera.lookFrom[0],
camera.lookFrom[1],
camera.lookFrom[2]+10)
str = str + '\n look_at' + self.coord3(camera.lookAt)
str = str + '\n angle %f'% ( camera.fovy+10.0, )
str = str + '\n up <0,1,0> // required for 1/1 aspect ratio p 277'
str = str + '\n right <%d/%d,0,0>'%(camera.width,camera.height)
str = str + '\n}\n\n'
self.entries.append( str )
for l in camera.viewer.lights:
if l.enabled: self.addLight(l, scaleLight)
def addLight(self, light, scale=2.0):
"""void <- addLight(light) add a light source to the povray scene
"""
# doesn't handle light transformation yet
d = light.direction
str = 'light_source {' + self.coord3(d[0], d[1], d[2])
# str = str + 'color rgb' + self.vec3(light.diffuse) + '}\n\n'
str = str + 'color rgb ' + self.vec3(scale*light.diffuse[0],
scale*light.diffuse[1],
scale*light.diffuse[2])
str += 'parallel '
str = str + '}\n\n'
self.entries.append( str )
def addTexture(self, texture, col):
"""void <- addTexture(texture, col) Add texture to an object.
texture is a dictionnary of texture properties,
col is used as pigment is pimgment is not in textures
"""
str = ' texture {\n'
if not 'pigment' in texture.keys():
str = str + ' pigment { color rgb %s }\n' %self.vec3( col )
elif texture['pigment']=='':
str = str + ' pigment { color rgb %s }\n' %self.vec3( col )
else:
str = str + ' pigment { %s }\n' % texture['pigment']
for k,v in texture.items():
if k=='pigment': continue
if v is None:
str = str + ' %s\n' % (k,)
else:
str = str + ' %s { %s }\n' % (k,v)
str = str + ' }\n'
self.entries.append( str )
def endShape(self):
self.entries.append( '}\n' );
def addGeoms(self, geometries,
texture = {'finish':'specular 1 roughness 0.001 ambient 0.3'},
bondRad = 0.15):
"""void <- addGeoms(geometries, texture) adds a list of geometries to
the povray scene
only visible geometries are added
"""
for g in geometries:
if g.visible and len(g.vertexSet):
self.entries.append('// geometry %s\n//\n'%g.name)
self.addGeom(g, texture, bondRad)
def addGeom(self, geom,
texture = {'finish':'specular 1 roughness 0.001 ambient 0.3'},
bondRad = 0.15):
"""void <- addGeom(geometry, texture) adds a geometries to the povray
scene
Spheres, IndexedPolygons and IndexedPolylines are handled
"""
from DejaVu2.Spheres import Spheres
from DejaVu2.IndexedPolygons import IndexedPolygons
from DejaVu2.IndexedPolylines import IndexedPolylines
from DejaVu2.Cylinders import Cylinders
if isinstance(geom, Spheres):
self.entries.append('// Object %s\n//\n' % geom.name)
self.addSpheres(geom, texture)
elif isinstance(geom, IndexedPolygons) and len(geom.faceSet):
self.entries.append('// Object %s\n//\n' % geom.name)
self.addIndexedPolgygons(geom, texture)
elif isinstance(geom, IndexedPolylines) and len(geom.faceSet):
self.entries.append('// Object %s\n//\n' % geom.name)
self.addIndexedPolylines(geom, texture, bondRad)
elif isinstance(geom, Cylinders) and len(geom.faceSet):
self.entries.append('// Object %s\n//\n' % geom.name)
self.addCylinders(geom, texture)
else:
print 'WARNING: %s the geometry is not yet supported'%geom.__class__
def addIndexedPolylines(self, geom, texture, bondRad):
"""void <- addIndexedPolylines(geom, texture)
"""
mat = geom.GetMatrix()
v = geom.vertexSet.vertices*mat
c = geom.materials[GL.GL_FRONT].prop[1]
lines = geom.faceSet.faces.array
for i in xrange(len(v)):
if len(c)==len(v): col = c[i]
else: col = c[0]
self.entries.append('sphere{%s,%f\n' % (self.coord3(v[i]),
bondRad) )
self.addTexture( texture, col )
self.endShape()
for j in xrange(len(lines)):
l = lines[j]
if len(c) == len(v):
col1 = c[l[0]]
col2 = c[l[1]]
else: col1 = col2 = c[0]
if Numeric.sum(col1-col2) < 0.0001:
p2 = v[l[1]]
oneCyl = 1
else:
p2 = (v[l[1]]+v[l[0]])/2.
oneCyl = 0
self.entries.append('cylinder{%s,%s, %f open\n' % \
(self.coord3(v[l[0]]),
self.coord3(p2),
bondRad) )
self.addTexture( texture, col1 )
self.endShape()
if not oneCyl:
self.entries.append('cylinder{%s, %s, %f open\n' % \
(self.coord3(p2),
self.coord3(v[l[1]]),
bondRad))
self.addTexture( texture, col2 )
self.endShape()
def addIndexedPolgygons(self, geom, texture):
"""void <- addIndexedPolgygons(geom, texture)
"""
mat = geom.GetMatrix()
vt = geom.vertexSet.vertices*mat
# FIXME need to add instance
# FIXME need to handle flat shading
# add vertices
lines = ["mesh2 {\n\tvertex_vectors {\n\t\t%d,\n"%len(vt)]
for v in vt:
lines.append("\t\t%s,\n"%self.coord3(v))
lines.append("\t}\n")
# add normals
normals = geom.getVNormals()
lines.append("\tnormal_vectors {\n\t\t%d,\n"%len(normals))
for n in normals:
lines.append("\t\t%s,\n"%(self.coord3(n)))
lines.append("\t}\n")
# handle colors
colors = geom.materials[GL.GL_FRONT].prop[1]
colBinding = 'Overall'
faces = geom.getFaces()
if len(colors)==len(vt):
colBinding = 'Per Vertex'
elif len(colors)==len(faces):
colBinding = 'Per Face'
print len(colors), len(faces), len(vt), colBinding
if colBinding!='Overall':
lines.append("\ttexture_list {\n\t\t%d,\n"%len(colors))
for c in colors:
lines.append(
"\t\ttexture { pigment { color rgb<%6.3f, %6.3f, %6.3f> }\n"% tuple(c[:3]))
lines.append("\t\t\tfinish { specular 1 roughness 0.001 ambient 0.3 } }\n")
lines.append("\t}\n")
# add faces
lines.append("\tface_indices {\n\t\t%d,\n"%len(faces))
faceNumberLine = len(lines)-1
nbfaces = 0
if colBinding=='Overall':
for t in faces:
for ti in range(len(t)-2):
lines.append("\t\t<%d,%d,%d>\n"%(t[0], t[ti+1], t[ti+2]))
nbfaces += 1
elif colBinding=='Per Face':
for fn,t in enumerate(faces):
for ti in range(len(t)-2):
lines.append("\t\t<%d,%d,%d>,%d\n"%(
t[0], t[ti+1], t[ti+2], fn))
nbfaces += 1
elif colBinding=='Per Vertex':
for t in faces:
for ti in range(len(t)-2):
lines.append("\t\t<%d,%d,%d>,%d,%d,%d\n"%(
t[0], t[ti+1], t[ti+2], t[0], t[ti+1], t[ti+2]))
nbfaces += 1
lines.append("\t}\n")
lines[faceNumberLine] = "\tface_indices {\n\t\t%d,\n"%nbfaces
if colBinding=='Overall':
lines.append("\tpigment { color rgb<%6.3f, %6.3f, %6.3f> }\n"% tuple(colors[0][:3]))
lines.append("\t\tfinish { specular 1 roughness 0.001 ambient 0.3 }\n")
lines.append("}\n")
self.entries.extend(lines)
## mat = geom.GetMatrix()
## v = geom.vertexSet.vertices*mat
## #v = geom.vertexSet.vertices.array
## c = geom.materials[GL.GL_FRONT].prop[1]
## n = geom.normals
## tri = geom.faceSet.faces.array
## fn = TriangleNormals( v, tri, 'PER_FACE')
## colBinding = 0 #Overall
## if not color:
## if len(c)==len(v): colBinding = 1 # Per Vertex
## elif len(c)==len(tri): colBinding = 2 # Per Face
## else:
## col = c[0] # Overall
## else:
## col = | |
from __future__ import print_function, division, absolute_import
from llvm.core import Type, Constant
import llvm.core as lc
import llvm.ee as le
from llvm import LLVMException
from numba.config import PYVERSION
import numba.ctypes_support as ctypes
from numba import types, utils, cgutils, _helperlib, assume
_PyNone = ctypes.c_ssize_t(id(None))
class NativeError(RuntimeError):
pass
@utils.runonce
def fix_python_api():
"""
Execute once to install special symbols into the LLVM symbol table
"""
le.dylib_add_symbol("Py_None", ctypes.addressof(_PyNone))
le.dylib_add_symbol("numba_native_error", id(NativeError))
# Add C helper functions
c_helpers = _helperlib.c_helpers
for py_name in c_helpers:
c_name = "numba_" + py_name
c_address = c_helpers[py_name]
le.dylib_add_symbol(c_name, c_address)
# Add all built-in exception classes
for obj in utils.builtins.__dict__.values():
if isinstance(obj, type) and issubclass(obj, BaseException):
le.dylib_add_symbol("PyExc_%s" % (obj.__name__), id(obj))
class PythonAPI(object):
"""
Code generation facilities to call into the CPython C API (and related
helpers).
"""
def __init__(self, context, builder):
"""
Note: Maybe called multiple times when lowering a function
"""
fix_python_api()
self.context = context
self.builder = builder
self.module = builder.basic_block.function.module
# Initialize types
self.pyobj = self.context.get_argument_type(types.pyobject)
self.voidptr = Type.pointer(Type.int(8))
self.long = Type.int(ctypes.sizeof(ctypes.c_long) * 8)
self.ulonglong = Type.int(ctypes.sizeof(ctypes.c_ulonglong) * 8)
self.longlong = self.ulonglong
self.double = Type.double()
self.py_ssize_t = self.context.get_value_type(types.intp)
self.cstring = Type.pointer(Type.int(8))
self.gil_state = Type.int(_helperlib.py_gil_state_size * 8)
# ------ Python API -----
#
# Basic object API
#
def incref(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="Py_IncRef")
self.builder.call(fn, [obj])
def decref(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="Py_DecRef")
self.builder.call(fn, [obj])
#
# Argument unpacking
#
def parse_tuple_and_keywords(self, args, kws, fmt, keywords, *objs):
charptr = Type.pointer(Type.int(8))
charptrary = Type.pointer(charptr)
argtypes = [self.pyobj, self.pyobj, charptr, charptrary]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_ParseTupleAndKeywords")
return self.builder.call(fn, [args, kws, fmt, keywords] + list(objs))
def parse_tuple(self, args, fmt, *objs):
charptr = Type.pointer(Type.int(8))
argtypes = [self.pyobj, charptr]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_ParseTuple")
return self.builder.call(fn, [args, fmt] + list(objs))
#
# Exception handling
#
def err_occurred(self):
fnty = Type.function(self.pyobj, ())
fn = self._get_function(fnty, name="PyErr_Occurred")
return self.builder.call(fn, ())
def err_clear(self):
fnty = Type.function(Type.void(), ())
fn = self._get_function(fnty, name="PyErr_Clear")
return self.builder.call(fn, ())
def err_set_string(self, exctype, msg):
fnty = Type.function(Type.void(), [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyErr_SetString")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
if isinstance(msg, str):
msg = self.context.insert_const_string(self.module, msg)
return self.builder.call(fn, (exctype, msg))
def err_set_object(self, exctype, excval):
fnty = Type.function(Type.void(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyErr_SetObject")
return self.builder.call(fn, (exctype, excval))
def raise_native_error(self, msg):
cstr = self.context.insert_const_string(self.module, msg)
self.err_set_string(self.native_error_type, cstr)
def raise_exception(self, exctype, excval):
# XXX This produces non-reusable bitcode: the pointer's value
# is specific to this process execution.
exctypeaddr = self.context.get_constant(types.intp, id(exctype))
excvaladdr = self.context.get_constant(types.intp, id(excval))
self.err_set_object(exctypeaddr.inttoptr(self.pyobj),
excvaladdr.inttoptr(self.pyobj))
def get_c_object(self, name):
"""
Get a Python object through its C-accessible *name*.
(e.g. "PyExc_ValueError").
"""
try:
gv = self.module.get_global_variable_named(name)
except LLVMException:
gv = self.module.add_global_variable(self.pyobj.pointee, name)
return gv
@property
def native_error_type(self):
return self.get_c_object("numba_native_error")
def raise_missing_global_error(self, name):
msg = "global name '%s' is not defined" % name
cstr = self.context.insert_const_string(self.module, msg)
self.err_set_string("PyExc_NameError", cstr)
def raise_missing_name_error(self, name):
msg = "name '%s' is not defined" % name
cstr = self.context.insert_const_string(self.module, msg)
self.err_set_string("PyExc_NameError", cstr)
#
# Concrete dict API
#
def dict_getitem_string(self, dic, name):
"""Returns a borrowed reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyDict_GetItemString")
cstr = self.context.insert_const_string(self.module, name)
return self.builder.call(fn, [dic, cstr])
def dict_new(self, presize=0):
if presize == 0:
fnty = Type.function(self.pyobj, ())
fn = self._get_function(fnty, name="PyDict_New")
return self.builder.call(fn, ())
else:
fnty = Type.function(self.pyobj, [self.py_ssize_t])
fn = self._get_function(fnty, name="_PyDict_NewPresized")
return self.builder.call(fn,
[Constant.int(self.py_ssize_t, presize)])
def dict_setitem(self, dictobj, nameobj, valobj):
fnty = Type.function(Type.int(), (self.pyobj, self.pyobj,
self.pyobj))
fn = self._get_function(fnty, name="PyDict_SetItem")
return self.builder.call(fn, (dictobj, nameobj, valobj))
def dict_setitem_string(self, dictobj, name, valobj):
fnty = Type.function(Type.int(), (self.pyobj, self.cstring,
self.pyobj))
fn = self._get_function(fnty, name="PyDict_SetItemString")
cstr = self.context.insert_const_string(self.module, name)
return self.builder.call(fn, (dictobj, cstr, valobj))
def dict_pack(self, keyvalues):
"""
Args
-----
keyvalues: iterable of (str, llvm.Value of PyObject*)
"""
dictobj = self.dict_new()
not_null = cgutils.is_not_null(self.builder, dictobj)
with cgutils.if_likely(self.builder, not_null):
for k, v in keyvalues:
self.dict_setitem_string(dictobj, k, v)
return dictobj
#
# Concrete number APIs
#
def float_from_double(self, fval):
fnty = Type.function(self.pyobj, [self.double])
fn = self._get_function(fnty, name="PyFloat_FromDouble")
return self.builder.call(fn, [fval])
def number_as_ssize_t(self, numobj):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_AsSsize_t")
return self.builder.call(fn, [numobj])
def number_long(self, numobj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Long")
return self.builder.call(fn, [numobj])
def long_as_ulonglong(self, numobj):
fnty = Type.function(self.ulonglong, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsUnsignedLongLong")
return self.builder.call(fn, [numobj])
def long_as_longlong(self, numobj):
fnty = Type.function(self.ulonglong, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsLongLong")
return self.builder.call(fn, [numobj])
def _long_from_native_int(self, ival, func_name, native_int_type,
signed):
fnty = Type.function(self.pyobj, [native_int_type])
fn = self._get_function(fnty, name=func_name)
resptr = cgutils.alloca_once(self.builder, self.pyobj)
if PYVERSION < (3, 0):
# Under Python 2, we try to return a PyInt object whenever
# the given number fits in a C long.
pyint_fnty = Type.function(self.pyobj, [self.long])
pyint_fn = self._get_function(pyint_fnty, name="PyInt_FromLong")
long_max = Constant.int(native_int_type, _helperlib.long_max)
if signed:
long_min = Constant.int(native_int_type, _helperlib.long_min)
use_pyint = self.builder.and_(
self.builder.icmp(lc.ICMP_SGE, ival, long_min),
self.builder.icmp(lc.ICMP_SLE, ival, long_max),
)
else:
use_pyint = self.builder.icmp(lc.ICMP_ULE, ival, long_max)
with cgutils.ifelse(self.builder, use_pyint) as (then, otherwise):
with then:
downcast_ival = self.builder.trunc(ival, self.long)
res = self.builder.call(pyint_fn, [downcast_ival])
self.builder.store(res, resptr)
with otherwise:
res = self.builder.call(fn, [ival])
self.builder.store(res, resptr)
else:
fn = self._get_function(fnty, name=func_name)
self.builder.store(self.builder.call(fn, [ival]), resptr)
return self.builder.load(resptr)
def long_from_long(self, ival):
if PYVERSION < (3, 0):
func_name = "PyInt_FromLong"
else:
func_name = "PyLong_FromLong"
fnty = Type.function(self.pyobj, [self.long])
fn = self._get_function(fnty, name=func_name)
return self.builder.call(fn, [ival])
def long_from_ssize_t(self, ival):
return self._long_from_native_int(ival, "PyLong_FromSsize_t",
self.py_ssize_t, signed=True)
def long_from_longlong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromLongLong",
self.longlong, signed=True)
def long_from_ulonglong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromUnsignedLongLong",
self.ulonglong, signed=False)
def _get_number_operator(self, name):
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyNumber_%s" % name)
return fn
def _call_number_operator(self, name, lhs, rhs, inplace=False):
if inplace:
name = "InPlace" + name
fn = self._get_number_operator(name)
return self.builder.call(fn, [lhs, rhs])
def number_add(self, lhs, rhs, inplace=False):
return self._call_number_operator("Add", lhs, rhs, inplace=inplace)
def number_subtract(self, lhs, rhs, inplace=False):
return self._call_number_operator("Subtract", lhs, rhs, inplace=inplace)
def number_multiply(self, lhs, rhs, inplace=False):
return self._call_number_operator("Multiply", lhs, rhs, inplace=inplace)
def number_divide(self, lhs, rhs, inplace=False):
assert PYVERSION < (3, 0)
return self._call_number_operator("Divide", lhs, rhs, inplace=inplace)
def number_truedivide(self, lhs, rhs, inplace=False):
return self._call_number_operator("TrueDivide", lhs, rhs, inplace=inplace)
def number_floordivide(self, lhs, rhs, inplace=False):
return self._call_number_operator("FloorDivide", lhs, rhs, inplace=inplace)
def number_remainder(self, lhs, rhs, inplace=False):
return self._call_number_operator("Remainder", lhs, rhs, inplace=inplace)
def number_lshift(self, lhs, rhs, inplace=False):
return self._call_number_operator("Lshift", lhs, rhs, inplace=inplace)
def number_rshift(self, lhs, rhs, inplace=False):
return self._call_number_operator("Rshift", lhs, rhs, inplace=inplace)
def number_and(self, lhs, rhs, inplace=False):
return self._call_number_operator("And", lhs, rhs, inplace=inplace)
def number_or(self, lhs, rhs, inplace=False):
return self._call_number_operator("Or", lhs, rhs, inplace=inplace)
def number_xor(self, lhs, rhs, inplace=False):
return self._call_number_operator("Xor", lhs, rhs, inplace=inplace)
def number_power(self, lhs, rhs, inplace=False):
fnty = Type.function(self.pyobj, [self.pyobj] * 3)
fname = "PyNumber_InPlacePower" if inplace else "PyNumber_Power"
fn = self._get_function(fnty, fname)
return self.builder.call(fn, [lhs, rhs, self.borrow_none()])
def number_negative(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Negative")
return self.builder.call(fn, (obj,))
def number_positive(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Positive")
return self.builder.call(fn, (obj,))
def number_float(self, val):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Float")
return self.builder.call(fn, [val])
def number_invert(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Invert")
return self.builder.call(fn, (obj,))
def float_as_double(self, fobj):
fnty = Type.function(self.double, [self.pyobj])
fn = self._get_function(fnty, name="PyFloat_AsDouble")
return self.builder.call(fn, [fobj])
def bool_from_bool(self, bval):
"""
Get a Python bool from a LLVM boolean.
"""
longval = self.builder.zext(bval, self.long)
return self.bool_from_long(longval)
def bool_from_long(self, ival):
fnty = Type.function(self.pyobj, [self.long])
fn = self._get_function(fnty, name="PyBool_FromLong")
return self.builder.call(fn, [ival])
def complex_from_doubles(self, realval, imagval):
fnty = Type.function(self.pyobj, [Type.double(), Type.double()])
fn = self._get_function(fnty, name="PyComplex_FromDoubles")
return self.builder.call(fn, [realval, imagval])
def complex_real_as_double(self, cobj):
fnty = Type.function(Type.double(), [self.pyobj])
fn = self._get_function(fnty, name="PyComplex_RealAsDouble")
return self.builder.call(fn, [cobj])
def complex_imag_as_double(self, cobj):
fnty = Type.function(Type.double(), [self.pyobj])
fn = self._get_function(fnty, name="PyComplex_ImagAsDouble")
return self.builder.call(fn, [cobj])
#
# List and sequence APIs
#
def sequence_getslice(self, obj, start, stop):
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t,
self.py_ssize_t])
fn = self._get_function(fnty, name="PySequence_GetSlice")
return self.builder.call(fn, (obj, start, stop))
def sequence_tuple(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PySequence_Tuple")
return self.builder.call(fn, [obj])
def list_new(self, szval):
fnty = Type.function(self.pyobj, [self.py_ssize_t])
fn = self._get_function(fnty, name="PyList_New")
return self.builder.call(fn, [szval])
def list_setitem(self, seq, idx, val):
"""
Warning: Steals reference to ``val``
"""
fnty = Type.function(Type.int(), [self.pyobj, self.py_ssize_t,
self.pyobj])
fn = self._get_function(fnty, name="PyList_SetItem")
return self.builder.call(fn, [seq, idx, val])
def list_getitem(self, lst, idx):
"""
Returns a borrowed reference.
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t])
fn = | |
4).permute(0, 1, 4, 2, 3).contiguous()
sample_feats = sample_feats.view((n, -1, h, w))
return sample_feats
def forward(self, x):
ret = {}
center_feat = self.center_conv(x)
center_map = self.center_head(center_feat)
corner_feat = self.corner_conv(x)
corner_map = self.corner_head(corner_feat)
fg_feat = self.fg_conv(x)
fg_map = self.fg_head(fg_feat)
fusion_feat = self.fusion_module(
orig_feat=x,
center_feat=torch.cat([center_feat, center_map], dim=1),
corner_feat=torch.cat([corner_feat, corner_map], dim=1),
foreground_feat=torch.cat([fg_feat, fg_map], dim=1)
)
final_feat = torch.cat([x, fusion_feat], dim=1)
ret['center_map'] = center_map
ret['corner_map'] = corner_map
ret['foreground_map'] = fg_map
ret['final_feat'] = final_feat
context = {}
pred_feats = self.cls_conv(final_feat)
pred_logits = self.cls_head(pred_feats)
theta_feat = torch.cat([final_feat, pred_feats], dim=1)
theta_feat = self.theta_conv(theta_feat)
theta = self.theta_head(theta_feat)
theta = torch.clamp(theta, min=-2, max=2)
theta_feat = self.get_theta_sampled_feat(theta, theta_feat, context)
wlh_feat = torch.cat([final_feat, theta_feat], dim=1)
wlh_feat = self.wlh_conv(wlh_feat)
wlh = self.wlh_head(wlh_feat)
wlh = torch.clamp(wlh, min=-2, max=3)
wlh_feat = self.get_wlh_sampled_feat(wlh, wlh_feat, context)
xyz_feat = torch.cat([final_feat, wlh_feat], dim=1)
xyz_feat = self.xyz_conv(xyz_feat)
xyz = self.xyz_head(xyz_feat)
xyz = torch.clamp(xyz, min=-4, max=4)
ret['pred_logits'] = pred_logits
ret['pred_boxes'] = torch.cat([xyz, wlh, theta], dim=1)
return ret
class OneNetSeqFusionHeadCLSSTC(nn.Module):
def __init__(self, in_channels, heads, **kwargs):
super().__init__(**kwargs)
ks = heads['kernel_size']
num_cls = heads['num_classes']
head_ch = heads['head_channels']
init_bias = heads['init_bias']
self.register_buffer('template_box', torch.tensor(heads['template_box']))
self.pc_range = heads['pc_range']
self.voxel_size = heads['voxel_size']
self.register_buffer('xy_offset', heads['offset_grid'])
self.naive = heads.get('naive', False)
# heatmap prediction head
self.center_conv = Sequential(
nn.Conv2d(in_channels, head_ch, kernel_size=3, padding=1, bias=True),
nn.BatchNorm2d(head_ch),
nn.ReLU(inplace=True)
)
self.center_head = nn.Conv2d(head_ch, num_cls, kernel_size=3, stride=1, padding=1, bias=True)
# corner_map prediction head
self.corner_conv = Sequential(
nn.Conv2d(in_channels, head_ch, kernel_size=3, padding=1, bias=True),
nn.BatchNorm2d(head_ch),
nn.ReLU(inplace=True)
)
self.corner_head = nn.Conv2d(head_ch, num_cls * 4, kernel_size=3, stride=1, padding=1, bias=True)
self.fg_conv = Sequential(
nn.Conv2d(in_channels, head_ch, kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
)
self.fg_head = nn.Conv2d(head_ch, num_cls, kernel_size=3, stride=1, padding=1, bias=True)
self.fusion_module = DCNV2Fusion(head_ch, num_cls, in_channels, head_ch, naive=self.naive)
self.cls_conv = nn.Sequential(
nn.Conv2d(in_channels + (1 * head_ch), head_ch, kernel_size=ks, stride=1, padding=ks // 2),
nn.BatchNorm2d(head_ch),
nn.ReLU(inplace=True),
)
self.cls_head = nn.Conv2d(head_ch, num_cls, kernel_size=ks, stride=1, padding=ks // 2)
self.wlh_conv = nn.Sequential(
nn.Conv2d(in_channels + (2 * head_ch), head_ch, kernel_size=ks, stride=1, padding=ks // 2),
nn.BatchNorm2d(head_ch),
nn.ReLU(inplace=True),
)
self.wlh_head = nn.Conv2d(head_ch, 3, kernel_size=ks, stride=1, padding=ks // 2)
self.theta_conv = nn.Sequential(
nn.Conv2d(in_channels + (5 * head_ch), head_ch, kernel_size=ks, stride=1, padding=ks // 2),
nn.BatchNorm2d(head_ch),
nn.ReLU(inplace=True),
)
self.theta_head = nn.Conv2d(head_ch, 2, kernel_size=ks, stride=1, padding=ks // 2)
self.xyz_conv = nn.Sequential(
nn.Conv2d(in_channels + (5 * head_ch), head_ch, kernel_size=ks, stride=1, padding=ks // 2),
nn.BatchNorm2d(head_ch),
nn.ReLU(inplace=True),
)
self.xyz_head = nn.Conv2d(head_ch, 3, kernel_size=ks, stride=1, padding=ks // 2)
self._reset_parameters()
self.cls_head.bias.data.fill_(init_bias)
self.fg_head.bias.data.fill_(init_bias)
self.corner_head.bias.data.fill_(init_bias)
self.center_head.bias.data.fill_(init_bias)
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def to_grid_coord(self, global_x, global_y):
xmin, ymin, _, xmax, ymax, _ = self.pc_range
x_v, y_v, _ = self.voxel_size
xall = xmax - xmin - x_v
yall = ymax - ymin - y_v
grid_x = (global_x - (xmin + (x_v / 2))) / xall * 2 - 1
grid_y = (global_y - (ymin + (y_v / 2))) / yall * 2 - 1
return grid_x.contiguous(), grid_y.contiguous()
def get_center_sampled_feat(self, xy, xy_feat, context):
n, _, h, w = xy.shape
raw_xy = xy[:, :2] + self.xy_offset # n, 2, h, w
grid_x, grid_y = self.to_grid_coord(raw_xy[:, 0], raw_xy[:, 1])
sample_grids = torch.stack([grid_x, grid_y], dim=-1)
sample_feats = F.grid_sample(xy_feat, sample_grids)
return sample_feats
def get_theta_sampled_feat(self, theta, theta_feat, context):
n, _, h, w = theta.shape
wlh = context['wlh']
sint, cost = theta[:, 1, :, :].contiguous(), theta[:, 0, :, :].contiguous()
theta = torch.atan2(sint, cost).view(-1) # N, h, w - > n*h*w
tmp_xy = self.xy_offset.permute(0, 2, 3, 1).repeat(n, 1, 1, 1).contiguous().view(n * h * w, 2)
context['theta'] = theta
sample_point = box_utils.box_to_surface_bev(wlh, theta, tmp_xy)
sample_point = sample_point.view(n, h, w, 4, 2)
grid_x, grid_y = self.to_grid_coord(sample_point[..., 0], sample_point[..., 1])
sample_grids = torch.stack([grid_x, grid_y], dim=-1).view(n, h, w * 4, 2) # n, 2, 4, h, w
sample_feats = F.grid_sample(theta_feat, sample_grids).view(n, -1, h, w, 4).permute(0, 1, 4, 2, 3).contiguous()
sample_feats = sample_feats.view((n, -1, h, w))
return sample_feats
def get_wlh_sampled_feat(self, wlh, wlh_feat, context):
n, _, h, w = wlh.shape
tmp_xy = self.xy_offset.permute(0, 2, 3, 1).repeat(n, 1, 1, 1).contiguous().view(n * h * w, 2)
wlh = torch.exp(wlh).permute(0, 2, 3, 1).contiguous().view(n * h * w, -1)
context['wlh'] = wlh
# wlh = torch.clamp(wlh, min=0, max=9.4)
sample_point = box_utils.box_to_surface_bev(wlh, torch.zeros_like(wlh[:, 0]), tmp_xy)
sample_point = sample_point.view(n, h, w, 4, 2)
grid_x, grid_y = self.to_grid_coord(sample_point[..., 0], sample_point[..., 1])
sample_grids = torch.stack([grid_x, grid_y], dim=-1).view(n, h, w * 4, 2) # n, 2, 4, h, w
sample_feats = F.grid_sample(wlh_feat, sample_grids).view(n, -1, h, w, 4).permute(0, 1, 4, 2, 3).contiguous()
sample_feats = sample_feats.view((n, -1, h, w))
return sample_feats
def forward(self, x):
ret = {}
center_feat = self.center_conv(x)
center_map = self.center_head(center_feat)
corner_feat = self.corner_conv(x)
corner_map = self.corner_head(corner_feat)
fg_feat = self.fg_conv(x)
fg_map = self.fg_head(fg_feat)
fusion_feat = self.fusion_module(
orig_feat=x,
center_feat=torch.cat([center_feat, center_map], dim=1),
corner_feat=torch.cat([corner_feat, corner_map], dim=1),
foreground_feat=torch.cat([fg_feat, fg_map], dim=1)
)
final_feat = torch.cat([x, fusion_feat], dim=1)
ret['center_map'] = center_map
ret['corner_map'] = corner_map
ret['foreground_map'] = fg_map
ret['final_feat'] = final_feat
context = {}
pred_feats = self.cls_conv(final_feat)
pred_logits = self.cls_head(pred_feats)
wlh_feat = torch.cat([final_feat, pred_feats], dim=1)
wlh_feat = self.wlh_conv(wlh_feat)
wlh = self.wlh_head(wlh_feat)
wlh = torch.clamp(wlh, min=-2, max=3)
wlh_feat = self.get_wlh_sampled_feat(wlh, wlh_feat, context)
theta_feat = torch.cat([final_feat, wlh_feat], dim=1)
theta_feat = self.theta_conv(theta_feat)
theta = self.theta_head(theta_feat)
theta = torch.clamp(theta, min=-2, max=2)
theta_feat = self.get_theta_sampled_feat(theta, theta_feat, context)
xyz_feat = torch.cat([final_feat, theta_feat], dim=1)
xyz_feat = self.xyz_conv(xyz_feat)
xyz = self.xyz_head(xyz_feat)
xyz = torch.clamp(xyz, min=-4, max=4)
ret['pred_logits'] = pred_logits
ret['pred_boxes'] = torch.cat([xyz, wlh, theta], dim=1)
return ret
class OneNetSeqFusionHeadCLSTCS(nn.Module):
def __init__(self, in_channels, heads, **kwargs):
super().__init__(**kwargs)
ks = heads['kernel_size']
num_cls = heads['num_classes']
head_ch = heads['head_channels']
init_bias = heads['init_bias']
self.register_buffer('template_box', torch.tensor(heads['template_box']))
self.pc_range = heads['pc_range']
self.voxel_size = heads['voxel_size']
self.register_buffer('xy_offset', heads['offset_grid'])
self.naive = heads.get('naive', False)
# heatmap prediction head
self.center_conv = Sequential(
nn.Conv2d(in_channels, head_ch, kernel_size=3, padding=1, bias=True),
nn.BatchNorm2d(head_ch),
nn.ReLU(inplace=True)
)
self.center_head = nn.Conv2d(head_ch, num_cls, kernel_size=3, stride=1, padding=1, bias=True)
# corner_map prediction head
self.corner_conv = Sequential(
nn.Conv2d(in_channels, head_ch, kernel_size=3, padding=1, bias=True),
nn.BatchNorm2d(head_ch),
nn.ReLU(inplace=True)
)
self.corner_head = nn.Conv2d(head_ch, num_cls * 4, kernel_size=3, stride=1, padding=1, bias=True)
self.fg_conv = Sequential(
nn.Conv2d(in_channels, head_ch, kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
)
self.fg_head = nn.Conv2d(head_ch, num_cls, kernel_size=3, stride=1, padding=1, bias=True)
self.fusion_module = DCNV2Fusion(head_ch, num_cls, in_channels, head_ch, naive=self.naive)
self.theta_conv = nn.Sequential(
nn.Conv2d(in_channels + (2 * head_ch), head_ch, kernel_size=ks, stride=1, padding=ks // 2),
nn.BatchNorm2d(head_ch),
nn.ReLU(inplace=True),
)
self.theta_head = nn.Conv2d(head_ch, 2, kernel_size=ks, stride=1, padding=ks // 2)
self.xyz_conv = nn.Sequential(
nn.Conv2d(in_channels + (5 * head_ch), head_ch, kernel_size=ks, stride=1, padding=ks // 2),
nn.BatchNorm2d(head_ch),
nn.ReLU(inplace=True),
)
self.xyz_head = nn.Conv2d(head_ch, 3, kernel_size=ks, stride=1, padding=ks // 2)
self.wlh_conv = nn.Sequential(
nn.Conv2d(in_channels + (2 * head_ch), head_ch, kernel_size=ks, stride=1, padding=ks // 2),
nn.BatchNorm2d(head_ch),
nn.ReLU(inplace=True),
)
self.wlh_head = nn.Conv2d(head_ch, 3, kernel_size=ks, stride=1, padding=ks // 2)
self.cls_conv = nn.Sequential(
nn.Conv2d(in_channels + (1 * head_ch), head_ch, kernel_size=ks, stride=1, padding=ks // 2),
nn.BatchNorm2d(head_ch),
nn.ReLU(inplace=True),
)
self.cls_head = nn.Conv2d(head_ch, num_cls, kernel_size=ks, stride=1, padding=ks // 2)
self._reset_parameters()
self.cls_head.bias.data.fill_(init_bias)
self.fg_head.bias.data.fill_(init_bias)
self.corner_head.bias.data.fill_(init_bias)
self.center_head.bias.data.fill_(init_bias)
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def to_grid_coord(self, global_x, global_y):
xmin, ymin, _, xmax, ymax, _ = self.pc_range
x_v, y_v, _ = self.voxel_size
xall = xmax - xmin - x_v
yall = ymax - ymin - y_v
grid_x = (global_x - (xmin + (x_v / 2))) / xall * 2 - 1
grid_y = (global_y - (ymin + (y_v / 2))) / yall * 2 - 1
return grid_x.contiguous(), grid_y.contiguous()
def get_center_sampled_feat(self, xy, xy_feat, context):
n, _, h, w = xy.shape
raw_xy = xy[:, :2] + self.xy_offset # n, 2, h, w
grid_x, grid_y = self.to_grid_coord(raw_xy[:, 0], raw_xy[:, 1])
sample_grids = torch.stack([grid_x, grid_y], dim=-1)
sample_feats = F.grid_sample(xy_feat, sample_grids)
context['flatten_xy'] = raw_xy.permute(0, 2, 3, 1).contiguous().view(n * h * w, 2)
return sample_feats
def get_theta_sampled_feat(self, theta, theta_feat, context):
n, _, h, w = theta.shape
sint, cost = theta[:, 1, :, :].contiguous(), theta[:, 0, :, :].contiguous()
theta = torch.atan2(sint, cost).view(-1) # N, h, w - > n*h*w
tmp_xy = self.xy_offset.permute(0, 2, 3, 1).repeat(n, 1, 1, 1).contiguous().view(n * h * w, 2)
context['theta'] = theta
sample_point = box_utils.template_to_surface_bev(self.template_box, theta, tmp_xy)
sample_point = sample_point.view(n, h, w, 4, 2)
grid_x, grid_y = self.to_grid_coord(sample_point[..., 0], sample_point[..., 1])
sample_grids = torch.stack([grid_x, grid_y], dim=-1).view(n, h, w * 4, 2) # n, 2, 4, h, w
sample_feats = F.grid_sample(theta_feat, sample_grids).view(n, -1, h, w, 4).permute(0, 1, 4, 2, 3).contiguous()
sample_feats = sample_feats.view((n, -1, h, w))
return sample_feats
def get_wlh_sampled_feat(self, wlh, wlh_feat, context):
theta = context['theta']
flatten_xy = context['flatten_xy']
n, _, h, w = | |
import numpy as np
from calciumflexanalysis import calcium_flex as cal
from collections import defaultdict
import pandas as pd
import matplotlib.pyplot as plt
from datetime import date
import copy
class CaFlexGroup:
"""Class used for the analysis of multiple Calcium Flex well plates.
:param caflexplates: List of caflexplates to combine, generated from CaFlexPlate class
:type caflexplates: list of calciumflexanalysis.calcium_flex.CaFlexPlates
"""
def __init__(self, caflexplates = []):
self.caflexplates = caflexplates
self.grouplist = ['Protein', 'Type', 'Compound', 'Concentration', 'Concentration Units']
self.titles = {}
self.plate_maps = {}
self.data = {'ratio':{}}
inject_list = []
# iterate through each plate and update attributes using predefined caflexanalysis methods
for key, val in enumerate(self.caflexplates):
# titles (nice ref for the user)
self.titles["plate_{}".format(key+1)] = val.title
# update processed data w/ ratios
self.data['ratio']["plate_{}".format(key+1)] = val.processed_data['ratio']
# dictionary of plate maps for each plate
self.plate_maps["plate_{}".format(key+1)] = val.plate_map
# append list with injection times for each plate
inject_list.append(val.inject)
# mean inject across all plates (this might be changed)
self.inject = np.array(inject_list).mean()
def visualise_plates(self, share_y, export = False, title = "", colormap = 'Dark2_r',
colorby = 'Type', labelby = 'Type', dpi = 200):
"""Returns color-coded and labelled plots of the data collected for each well of each well plate.
:param share_y: 'True' sets y axis the same for all plots
:type share_y: bool
:param export: If 'True' a .png file of the figure is saved, default = False
:type export: bool
:param title: Sets the title of the figure, optional
:type title: str
:param colormap: Sets the colormap for the color-coding, default = 'Dark2_r'
:type colormap: str
:param colorby: Chooses the parameter to color code by, for example 'Type', 'Contents', 'Concentration', 'Compound', 'Protein', 'Concentration Units', default = 'Type'
:type colorby: str
:param labelby: Chooses the parameter to label code by, for example 'Type', 'Contents', 'Concentration', 'Compound', 'Protein', 'Concentration Units', default = 'Type'
:type labelby: str
:param dpi: Size of the figure, default = 200
:type dpi: int
:return: Figure of plotted data for each well of the well plate described in plate_map_file
:rtype: figure
"""
plates = self.caflexplates
for key, val in enumerate(plates):
if title == "":
Title = "Plate {}\n{}".format(key+1, val.title)
else:
Title = "Plate {}\n{}".format(key+1, title)
val.visualise_assay(share_y, export, title, colormap, colorby, labelby, dpi)
def see_plates(self, title = "", export = False, colormap = 'Paired', colorby = 'Type', labelby = 'Type', dpi = 100):
"""Returns a visual representation of each plate map.
The label and colour for each well can be customised to be a variable, for example 'Compound', 'Protein', 'Concentration', 'Concentration Units', 'Contents' or 'Type'. The size of the plate map used to generate the figure can be either 6, 12, 24, 48, 96 or 384.
:param size: Size of platemap, 6, 12, 24, 48, 96 or 384, default = 96
:type size: int
:param export: If 'True' a .png file of the figure is saved, default = False
:type export: bool
:param title: Sets the title of the figure, optional
:type title: str
:param colormap: Sets the colormap for the color-coding, default = 'Paired'
:type colormap: str
:param colorby: Chooses the parameter to color code by, for example 'Type', 'Contents', 'Concentration', 'Compound', 'Protein', 'Concentration Units', default = 'Type'
:type colorby: str
:param labelby: Chooses the parameter to label code by, for example 'Type', 'Contents', 'Concentration', 'Compound', 'Protein', 'Concentration Units', default = 'Type'
:type labelby: str
:param dpi: Size of the figure, default = 150
:type dpi: int
:return: Visual representation of the plate map.
:rtype: figure
"""
plates = self.caflexplates
for key, val in enumerate(plates):
if title == "":
Title = "Plate {}\n{}".format(key+1, val.title)
else:
Title = "Plate {}\n{}".format(key+1, title)
try:
val.see_plate(Title, export, colormap, colorby, labelby, dpi)
except:
print("Check plate {}".format(key+1))
def baseline_correct(self):
"""Baseline corrects 'ratio' data for each well using the pre-injection time points."""
self.data['baseline_corrected'] = {}
for key, val in enumerate(self.caflexplates):
try:
val.baseline_correct()
print("Plate {}".format(key+1))
self.data['baseline_corrected']["plate_{}".format(key+1)] = val.processed_data['baseline_corrected']
except:
print("Baseline correction for plate {} failed".format(key+1))
def get_window(self, data_type):
"""Finds the lowest overall mean gradient for across the ten time point window post injection for the plates
:param data_type: Data series to calculate plateau, either 'ratio' or 'baseline_corrected'
:type data_type: str
:return: Tuple containing start and end index of plateau window
:rtype: (int, int)
"""
plates = self.caflexplates
gradients = {}
for key, val in enumerate(plates):
g = val.get_gradients(data_type)
# keys for each plate are numbered by key this time - easier for unpacking
gradients[key] = g
# collect gradients for each window in each plate into single dictionary using default dict
windows = defaultdict(list)
for key, val in gradients.items():
for k, v in val.items(): # unpack dictionary of dictionaries
windows[k].append(v) # where multiple plates have the same window, the resulting dict value will be a list of those gradients
# take means of each window
mean_windows = {}
for key, val in windows.items():
mean_windows[key] = np.array(val).mean()
# get minimum gradient index window across all plates and update self.window
self.window = (min(mean_windows, key = mean_windows.get))
# update windows for each plate
for key, val in enumerate(plates):
val.window = self.window
return self.window
def def_window(self, time, data_type):
"""Manually sets each plateau window.
:param time: Time point at start of window
:type time: int
:param data_type: Data to set window on, either 'ratio' or 'baseline_corrected'
:type data_type: str
:return: Tuple containing start and end index of plateau window
:rtype: (int, int)
"""
plates = self.caflexplates
temp = []
for key, val in enumerate(plates):
val.def_window(time, data_type)
temp.append(val.window)
if all(x == temp[0] for x in temp) == True:
self.window = temp[0]
print("all windows equal, self.window updated")
return self.window
else:
raise ValueError("Time points are not equal")
def group_data(self, data_type):
"""Groups data from each plate of desired type (either ratio or baseline_corrected) into single dataframe.
:param data_type: Data to be groupe, either 'ratio' or 'baseline_corrected'
:type data_type: str
:return: Dictionary of dataframes
:rtype: {str:pandas.DataFrame, str:pandas.DataFrame}
"""
plates = self.caflexplates
group_list = self.caflexplates
data_list = [] # collect all data in list, then concatenate dfs, take means for each condition
time_list = [] # same for time (sem not required)
# get data for each plate in plates_list
for key, val in enumerate(plates):
plate_map = val.plate_map
# extract data, combine with the plate's plate map, append data_list
mapped = plate_map.fillna('none').join(val.processed_data[data_type]['data'])
data_list.append(mapped)
# repeat for time:
for key, val in enumerate(plates):
plate_map = val.plate_map
# extract data, combine with the plate's plate map, append data_list
mapped = plate_map.fillna('none').join(val.processed_data[data_type]['time'])
time_list.append(mapped)
# concat data and time - all data for every well now in one df for data and time
all_data = pd.concat(data_list, ignore_index = True)
all_time = pd.concat(time_list, ignore_index = True)
self.data[data_type]['grouped'] = {'data':all_data, 'time': all_time}
print("self.data updated. See self.data[{}]['grouped']".format(data_type))
def plot_conditions(self, data_type, plate_number = True, activator = " ", show_window = False, dpi = 120, title = "", error = False, control = ['control'], cmap = "winter_r", window_color = 'hotpink', proteins = [], compounds = [], marker = 'o', unique_markers = False, marker_list = ["o", "^", "s", "D", "p", "*", "v"], show_control = True):
"""Plots each mean condition versus time, for either each plate or over all plates, for each compound and protein.
If no title is desired, set title to " ".
:param plate_number: If True, plate number is added to each plot title, default = True
:type plate_number: bool
:param data_type: Data to be plotted, either 'ratio' or 'baseline_corrected'
:type data_type: str
:param show_window: If 'True', shows the window from which the plateau for each condition is calculated, default = False
:type show_window: bool
:param dpi: Size of figure, default = 120
:type dpi: int
:param title: Title of plot ADD LIST OF TITLES?
:type title: str
:param error: If True, plots error bars for each mean condition, default = | |
not in kwargs.keys():
e.code.CodingError(
msgs=[
f"The yaml_state dict loaded from file_or_text "
f"does has parent_folder set to `..`",
f"This means we do not have access to "
f"parent_folder instance so please supply it "
f"while Folder syncs files/folders inside it.",
f"Note that if you are using from_yaml then also "
f"you can supply the extra kwarg so that "
f"from_dict receives it."
]
)
else:
yaml_state["parent_folder"] = kwargs["parent_folder"]
# noinspection PyArgumentList
return cls(**yaml_state)
def as_dict(
self
) -> t.Dict[str, m.SUPPORTED_HASHABLE_OBJECTS_TYPE]:
# get dict from super
_dict = super().as_dict()
# if uses parent_folder
if self.uses_parent_folder:
# get parent folder
_parent_folder = getattr(self, 'parent_folder')
# if there is parent_folder update it to ..
if _parent_folder == _DOT_DOT:
e.code.CodingError(
msgs=[
f"If loading from yaml on disk make sure that a "
f"Folder is doing that un sync so that parent_folder "
f"is set appropriately before calling __post_init__ on "
f"StorageHashable"
]
)
# modify dict so that representation is change on disc
# note that this does not modify self.__dict__ ;)
# we do this only when parent_folder is available
_dict['parent_folder'] = _DOT_DOT
# return
return _dict
def create_pre_runner(self):
# check if already created
if self.is_created:
e.code.NotAllowed(
msgs=[
f"Things related to hashable class {self.__class__} "
f"with name `{self.name}` has already been created ...",
]
)
def create(self) -> t.Any:
e.code.CodingError(
msgs=[
f"There is nothing to create for class {self.__class__}",
F"You might need to override this method if you have "
F"something to create ...",
f"If you override this method make sure you override "
f"corresponding `delete()` too ..."
]
)
# noinspection PyUnusedLocal
def create_post_runner(
self, *, hooked_method_return_value: t.Any
):
# ----------------------------------------------------------- 01
# The below call will create state manager files on the disk
# check if .info and .config file exists i.e. state exists
if self.config.is_available:
e.code.CodingError(
msgs=[
f"Looks like you have updated config before this parent "
f"create_post_runner was called.",
f"Try to make updates to config after the config is "
f"created the parent create_post_runner by calling sync()"
]
)
if self.info.is_available:
e.code.CodingError(
msgs=[
f"looks like info file for this StorageHashable is "
f"already present",
f"As files were just created we expect that this state "
f"file should not be present ..."
]
)
# redundant
_ = self.is_created
# ----------------------------------------------------------- 02
# sync to disk ... note that from here on state files will be on the
# disc and the child methods that will call super can take over and
# modify state files like config
self.info.sync()
self.config.sync()
# ----------------------------------------------------------- 03
# also sync the created on ... note that config can auto sync on
# update to its fields
self.config.created_on = datetime.datetime.now()
# ----------------------------------------------------------- 04
# check if property updated
if not self.is_created:
e.code.NotAllowed(
msgs=[
f"Did you forget to update appropriately the things in "
f"`create()` method of {self.__class__}",
f"Property `self.is_created` should return `True` as "
f"things are now created."
]
)
# noinspection PyUnusedLocal
def delete_pre_runner(self, *, force: bool = False):
# check if already created
if not self.is_created:
e.code.NotAllowed(
msgs=[
f"Things related to hashable class {self.__class__} are "
f"not created ..."
]
)
def delete(self, *, force: bool = False) -> t.Any:
e.code.CodingError(
msgs=[
f"There is nothing to delete for class {self.__class__}",
F"You might need to override this method if you have "
F"something to delete ...",
f"You only `delete()` if you create something in `create()`"
]
)
# noinspection PyUnusedLocal
def delete_post_runner(
self, *, hooked_method_return_value: t.Any
):
# delete state files as they were created along with the
# files for this StorageHashable in create_post_runner
self.info.delete()
self.config.delete()
# also delete the empty path folder
if util.io_is_dir_empty(self.path):
self.path.rmdir()
else:
e.code.CodingError(
msgs=[
f"All the files inside folder should be deleted by now ...",
f"Expected path dir to be empty",
f"Check path {self.path}"
]
)
# check if property updated
if self.is_created:
e.code.NotAllowed(
msgs=[
f"Did you forget to update appropriately the things in "
f"`delete()` method of {self.__class__}",
f"Property `self.is_created` should return `False` as "
f"things are now deleted."
]
)
# if parent_folder is there try to remove item from the tracking dict
# items
if self.uses_parent_folder:
# get parent folder
_parent_folder = getattr(self, 'parent_folder')
# if parent folder can track then delete items that it has tracked
if _parent_folder.contains is not None:
# just do sanity check if we are having same item
if id(self) != id(_parent_folder.items[self.name]):
e.code.CodingError(
msgs=[
f"We expect these objects to be same ... "
f"make sure to add item using "
f"parent_folder.add_item() method for integrity"
]
)
# in init() we added self by calling
# self.parent_folder.add_item(self) ... now we just remove the
# item from tracking dict items so that parent folder is in sync
del _parent_folder.items[self.name]
# now we have removed strong reference to self in parent_folder.items
# dict ... let us make this instance useless as files are deleted
# hence we want to make sure any other references will fail to use
# this instance ...
# To achieve this we just clear out the internal __dict__
if not settings.FileHash.DEBUG_HASHABLE_STATE:
self.__dict__.clear()
@dataclasses.dataclass(frozen=True)
class Folder(StorageHashable):
"""
A folder for hashable instance like Dataset or Model.
Name of the folder:
The name of folder is the name of the hashable it represents. The
dataclass field `for_hashable` signifies the uniqueness of the folder
while the `paren_folder` field super class does not affect uniqueness
as the folder represented by this class is saved under it ;)
Deviation from `HashableClass.name` behaviour:
You might be thinking why not have have folder hex_hash as folder name.
That sounds fine. But the name of folder using hashables name can in
future let us use via external utilities to pick up folders only by
knowing hashable and the path must be provided only once.
Also parent_folder is required only to get parent folder info we can
get away just by knowing the path.
Note that for FileGroup the parent_folder is considered as they have
even more fields and here we ignore parent_folder so that for_hashable
decides the folder name
We do not allow to add fields in subclass:
In order that *.info files do not pollute with more info we do not
allow to add fields to Folder class while subclassing.
In case you want more info please use *.config file via Folder.config
property
The contains property:
Indicates what will stored in this Folder
When parent_folder is None override path
This behaviour is borrowed from super class and well suits the
requirement for Folder class
Made up of three things
+ <hash>.info
- when loaded gives out Folder object with hashable instance object
+ <hash>.config
- the access info
+ <hash> folder
- A folder inside which you can have folder's or file_group's
"""
for_hashable: t.Union[str, m.HashableClass]
@property
def name(self) -> str:
"""
Do not override.
NOTE this also happens to be name of the folder
Note that for Folder the uniqueness is completely decided by
self.for_hashable field.
If self.for_hashable is str then the user is not using hashable and
simply wants to create folder with some specific name
We use self.for_hashable.name as name of the folder. Remember that
name is to be unique across hashable. By default the name returns
hex_hash but when you override it to return string the user need to
take care that it is unique for each instance of that class.
Note that FileGroup considers parent_folder while creating name but
here we ignore as there are no extra fields we will define here. Also
we want fo_hashable to dictate things in Folder like the name of
folder created on disk.
"""
# the name is dictated by for_hashable as we will not allow any
# fields in Folder (check validation)
# This is unlike FileGroup where all fields decide name ... this si
# because in FileGroup we intend to have more fields
| |
<reponame>inkmonk/flask-sqlalchemy-plus
# from sqlalchemy import func
from __future__ import absolute_import
from toolspy import subdict, remove_and_mark_duplicate_dicts, merge
from sqlalchemy.ext.associationproxy import AssociationProxyInstance
from sqlalchemy.ext.orderinglist import OrderingList
from sqlalchemy.orm import class_mapper
from sqlalchemy.sql.schema import UniqueConstraint
import six
from six.moves import range
from ..utils import cast_as_column_type
class QueryableMixin(object):
"""Contains all querying methods. Used for common ORM operations
Attributes:
_no_overwrite_(list): The list of attributes that should not be overwritten.
"""
_no_overwrite_ = []
_prevent_primary_key_initialization_ = True
_prevent_primary_key_updation_ = True
_fields_forbidden_from_being_set_ = None
allow_updation_based_on_unique_keys = False
@classmethod
def is_a_to_many_rel(cls, attr):
return attr in cls.__mapper__.relationships and cls.__mapper__.relationships[attr].uselist
@classmethod
def is_a_to_one_rel(cls, attr):
return attr in cls.__mapper__.relationships and not cls.__mapper__.relationships[attr].uselist
@classmethod
def columns(cls):
return [c for c in class_mapper(cls).columns]
@classmethod
def unique_columns(cls):
return [c for c in cls.columns() if c.unique]
@classmethod
def unique_column_names(cls):
return [c.key for c in cls.unique_columns()]
@classmethod
def table(cls):
return cls.__table__
@classmethod
def constraints(cls):
return cls.table().constraints
@classmethod
def unique_constraints(cls):
return [
c for c in cls.constraints()
if isinstance(c, UniqueConstraint)]
@classmethod
def unique_constraint_col_name_tuples(cls):
return [c.columns.keys() for c in cls.unique_constraints()]
@classmethod
def primary_key_name(cls):
return cls.__mapper__.primary_key[0].key
@classmethod
def primary_key(cls):
return getattr(cls, cls.primary_key_name())
def primary_key_value(self):
return getattr(self, self.primary_key().name)
@classmethod
def column_names(cls):
return list(cls.__mapper__.columns.keys())
@classmethod
def is_the_primary_key(cls, attr):
return attr == cls.primary_key_name()
@classmethod
def mapped_rel_class(cls, attr):
mapped_rel = next(
r for r in cls.__mapper__.relationships
if r.key == attr)
return mapped_rel.mapper.class_
def update_without_commit(self, **kwargs):
cls = type(self)
kwargs = cls.pre_save_adapter(kwargs, existing_instance=self)
kwargs = self._prepare_data_for_saving(kwargs)
for key, value in six.iteritems(kwargs):
if key not in cls.all_settable_keys():
continue
if not hasattr(cls, key) or isinstance(getattr(cls, key), property):
continue
if key not in self._no_overwrite_:
try:
setattr(self, key, value)
except Exception as e:
print(key, value, e.message)
raise
if isinstance(getattr(self, key), OrderingList):
getattr(self, key).reorder()
elif isinstance(getattr(cls, key), AssociationProxyInstance):
target_name = getattr(cls, key).target_collection
target_rel = getattr(self, target_name)
if isinstance(target_rel, OrderingList):
target_rel.reorder()
return self
def commit(self):
"""Commits a transaction.
"""
self.session.commit()
def save(self):
"""Saves a model instance to db.
Examples:
>>> customer = Customer.new(name="hari")
>>> customer.save()
"""
self.session.add(self)
self.session.commit()
def delete(self, commit=True):
"""Deletes a model instance.
Examples:
>>> customer.delete()
"""
self.session.delete(self)
if commit:
self.session.commit()
def _isinstance(self, model, raise_error=True):
"""Checks if the specified model instance matches the class model.
By default this method will raise a `ValueError` if the model is not of
expected type.
Args:
model (Model) : The instance to be type checked
raise_error (bool) : Flag to specify whether to raise error on
type check failure
Raises:
ValueError: If `model` is not an instance of the respective Model
class
"""
rv = isinstance(model, self.__model__)
if not rv and raise_error:
raise ValueError('%s is not of type %s' % (model, self.__model__))
return rv
@classmethod
def rollback_session(cls):
cls.session.rollback()
@classmethod
def _prepare_data_for_saving(cls, kwargs):
"""Returns a preprocessed dictionary of parameters.
Use this to filter the kwargs passed to `new`, `create`,
`build` methods.
Args:
**kwargs: a dictionary of parameters
"""
# kwargs.pop('csrf_token', None)
attrs_to_delete = []
for attr, val in kwargs.items():
if cls.is_the_primary_key(attr) and cls._prevent_primary_key_initialization_:
attrs_to_delete.append(attr)
# del kwargs[attr]
continue
if val == "":
# Making an assumption that there is no good usecase
# for setting an empty string. This will help prevent
# cases where empty string is sent because of client
# not clearing form fields to null
kwargs[attr] = None
continue
if attr in class_mapper(cls).relationships and attr not in cls._no_overwrite_:
rel = class_mapper(cls).relationships[attr]
if rel.uselist:
if isinstance(val, list):
if all(isinstance(v, dict) for v in val):
rel_cls = cls.mapped_rel_class(attr)
kwargs[attr] = rel_cls.update_or_new_all(
list_of_kwargs=val,
keys=None if rel_cls.allow_updation_based_on_unique_keys else [
rel_cls.primary_key_name()]
)
elif isinstance(val, dict):
rel_cls = cls.mapped_rel_class(attr)
mapping_col = rel.collection_class().keyfunc.name
list_of_kwargs = [
merge(v, {mapping_col: k}) for k, v in val.items()]
kwargs[attr] = {
getattr(obj, mapping_col): obj
for obj in rel_cls.update_or_new_all(
list_of_kwargs=list_of_kwargs,
keys=None if rel_cls.allow_updation_based_on_unique_keys else [
rel_cls.primary_key_name()]
)}
elif isinstance(val, dict):
rel_cls = cls.mapped_rel_class(attr)
if not rel_cls.allow_updation_based_on_unique_keys:
val['keys'] = [rel_cls.primary_key_name()]
kwargs[attr] = rel_cls.update_or_new(**val)
for attr in attrs_to_delete:
del kwargs[attr]
return kwargs
@classmethod
def pre_save_adapter(cls, data, existing_instance=None):
return data
def clone(self, dict_struct=None, commit=True):
def remove_primary_keys_from_dict_struct(klass, ds):
pk = klass.primary_key_name()
if "attrs" not in ds:
ds['attrs'] = list(class_mapper(klass).columns.keys())
if 'attrs' in ds and pk in ds['attrs']:
ds['attrs'].remove(pk)
if 'rels' in ds:
for rel_name in ds['rels']:
mapped_rel = next(
r for r in class_mapper(klass).relationships
if r.key == rel_name)
rel_class = mapped_rel.mapper.class_
ds['rels'][rel_name] = remove_primary_keys_from_dict_struct(
rel_class, ds['rels'][rel_name])
return ds
cls = self.__class__
if dict_struct is None:
dict_struct = {}
dict_struct = remove_primary_keys_from_dict_struct(cls, dict_struct)
return cls.add(
cls.new(**self.todict(dict_struct=dict_struct)), commit=commit)
def update(self, **kwargs):
"""Updates an instance.
Args:
**kwargs : Arbitrary keyword arguments. Column names are
keywords and their new values are the values.
Examples:
>>> customer.update(email="<EMAIL>", name="new")
"""
cls = type(self)
kwargs = cls.pre_save_adapter(kwargs, existing_instance=self)
kwargs = self._prepare_data_for_saving(kwargs)
for key, value in six.iteritems(kwargs):
if not hasattr(cls, key) or isinstance(getattr(cls, key), property):
continue
if key not in self._no_overwrite_:
try:
setattr(self, key, value)
except Exception as e:
print(key, value, e.message)
raise
if isinstance(getattr(self, key), OrderingList):
getattr(self, key).reorder()
elif isinstance(getattr(cls, key), AssociationProxyInstance):
target_name = getattr(cls, key).target_collection
target_rel = getattr(self, target_name)
if isinstance(target_rel, OrderingList):
target_rel.reorder()
try:
self.session.commit()
return self
except Exception as e:
self.session.rollback()
raise e
@classmethod
def filter_by(cls, **kwargs):
"""Same as SQLAlchemy's filter_by. Additionally this accepts
two special keyword arguments `limit` and `reverse` for limiting
the results and reversing the order respectively.
Args:
**kwargs: filter parameters
Examples:
>>> user = User.filter_by(email="<EMAIL>")
>>> shipments = Shipment.filter_by(country="India", limit=3, reverse=True)
"""
limit = kwargs.pop('limit', None)
reverse = kwargs.pop('reverse', False)
q = cls.query.filter_by(**kwargs)
if reverse:
q = q.order_by(cls.id.desc())
if limit:
q = q.limit(limit)
return q
@classmethod
def filter(cls, *criterion, **kwargs):
"""Same as SQLAlchemy's filter. Additionally this accepts
two special keyword arguments `limit` and `reverse` for limiting
the results and reversing the order respectively.
Args:
**kwargs: filter parameters
Examples:
>>> user = User.filter(User.email=="<EMAIL>")
>>> shipments = Order.filter(Order.price < 500, limit=3, reverse=True)
"""
limit = kwargs.pop('limit', None)
reverse = kwargs.pop('reverse', False)
q = cls.query.filter_by(**kwargs).filter(*criterion)
if reverse:
q = q.order_by(cls.id.desc())
if limit:
q = q.limit(limit)
return q
@classmethod
def count(cls, *criterion, **kwargs):
"""Returns a count of the instances meeting the specified
filter criterion and kwargs.
Examples:
>>> User.count()
500
>>> User.count(country="India")
300
>>> User.count(User.age > 50, country="India")
39
"""
if criterion or kwargs:
return cls.filter(
*criterion,
**kwargs).count()
else:
return cls.query.count()
@classmethod
def all(cls, *criterion, **kwargs):
"""Returns all the instances which fulfil the filtering
criterion and kwargs if any given.
Examples:
>>> Tshirt.all()
[tee1, tee2, tee4, tee5]
>> Tshirt.all(reverse=True, limit=3)
[tee5, tee4, tee2]
>> Tshirt.all(color="Red")
[tee4, tee2]
"""
return cls.filter(*criterion, **kwargs).all()
@classmethod
def first(cls, *criterion, **kwargs):
"""Returns the first instance found of the model class
filtered by the specified criterion and/or key word arguments.
Return None if no result found.
Examples:
>>> will = User.first(name="Will")
"""
return cls.filter(*criterion, **kwargs).first()
@classmethod
def one(cls, *criterion, **kwargs):
"""Similar to `first`. But throws an exception if
no result is found.
Examples:
>>> user = User.one(name="here")
Raises:
NoResultFound: No row was found for one()
"""
return cls.filter(*criterion, **kwargs).one()
@classmethod
def last(cls, *criterion, **kwargs):
"""Returns the last instance matching the criterion and/or
keyword arguments.
Examples:
last_male_user = User.last(gender="male")
"""
kwargs['reverse'] = True
return cls.first(*criterion, **kwargs)
@classmethod
def new(cls, **kwargs):
"""Returns a new, unsaved instance of the model class.
"""
kwargs = cls.pre_save_adapter(kwargs)
if cls.__mapper__.polymorphic_on is not None:
discriminator_key = cls.__mapper__.polymorphic_on.name
discriminator_val = kwargs.get(discriminator_key)
if discriminator_val is not None and discriminator_val in cls.__mapper__.polymorphic_map:
actual_cls = cls.__mapper__.polymorphic_map[discriminator_val].class_
return actual_cls(
**subdict(
actual_cls._prepare_data_for_saving(kwargs),
actual_cls.all_settable_keys())
)
return cls(**subdict(cls._prepare_data_for_saving(kwargs), cls.all_settable_keys()))
@classmethod
def add(cls, model, commit=True):
"""Adds a model instance to session and commits the
transaction.
Args:
model: The instance to add.
Examples:
>>> customer = Customer.new(name="hari", email="<EMAIL>")
>>> Customer.add(customer)
<EMAIL>
"""
if not isinstance(model, cls):
raise ValueError('%s is not of type %s' % (model, cls))
cls.session.add(model)
try:
if commit:
cls.session.commit()
return model
except:
cls.session.rollback()
raise
@classmethod
def add_all(cls, models, commit=True, check_type=False):
"""Batch method for adding a list of model instances
to the db in one get_or_404.
Args:
models (list): A list of the instances to add.
commit (bool, optional): Defaults to True. If False, the
transaction won't get committed.
check_type (bool, optional) : If True, each instance
is type checked and exception is thrown if it is
not an instance of the model. By default, False.
Returns:
list: A list of | |
# check the standard deviation of the transformed feature
# we set ddof to 1 so that np.std gave the same result as pandas .std
# we also set the tolerance limit to account for cases where std
# is computed as a very low decimal rather than 0
# We only do this for the training set.
if exclude_zero_sd:
feature_sd = np.std(transformed_feature, ddof=1)
if np.isclose(feature_sd, 0, atol=1e-07):
raise ValueError("The standard deviation for "
"feature {} is 0 after pre-processing. "
"Please exclude this feature and re-run "
"the experiment.".format(feature_name))
return transformed_feature
def preprocess_features(self,
df_train,
df_test,
df_feature_specs,
standardize_features=True,
use_truncations=False):
"""
Pre-process those features in the given training and testing
data frame `df` whose specifications are contained in
`feature_specs`. Also return a third data frame containing the
feature specs themselves.
Parameters
----------
df_train : pandas DataFrame
Data frame containing the raw feature values
for the training set.
df_test : pandas DataFrame
Data frame containing the raw feature values
for the test set.
df_feature_specs : pandas DataFrame
Data frame containing the various specifications
from the feature file.
standardize_features : bool, optional
Whether to standardize the features
Defaults to True.
use_truncations : bool, optional
Whether we should use the truncation set
for removing outliers.
Defaults to False.
Returns
-------
df_train_preprocessed : pd.DataFrame
DataFrame with preprocessed training data
df_test_preprocessed : pd.DataFrame
DataFrame with preprocessed test data
df_feature_info : pd.DataFrame
DataFrame with feature information
"""
# keep the original data frames and make copies
# that only include features used in the model
df_train_preprocessed = df_train.copy()
df_test_preprocessed = df_test.copy()
# we also need to create a data frame that includes
# all relevant information about each feature
df_feature_info = pd.DataFrame()
# make feature the index of df_feature_specs
df_feature_specs.index = df_feature_specs['feature']
# if we are should be using truncations, then we create the truncations
# set from the feature specifications
if use_truncations:
truncations = df_feature_specs[['feature', 'min', 'max']].set_index('feature')
else:
truncations = None
# now iterate over each feature
for feature_name in df_feature_specs['feature']:
feature_transformation = df_feature_specs.at[feature_name, 'transform']
feature_sign = df_feature_specs.at[feature_name, 'sign']
train_feature_mean = df_train[feature_name].mean()
train_feature_sd = df_train[feature_name].std()
training_feature_values = df_train[feature_name].values
df_train_preprocessed[feature_name] = self.preprocess_feature(training_feature_values,
feature_name,
feature_transformation,
train_feature_mean,
train_feature_sd,
exclude_zero_sd=True,
truncations=truncations)
testing_feature_values = df_test[feature_name].values
df_test_preprocessed[feature_name] = self.preprocess_feature(testing_feature_values,
feature_name,
feature_transformation,
train_feature_mean,
train_feature_sd,
truncations=truncations)
# Standardize the features using the mean and sd computed on the
# training set. These are computed separately because we need to
# get the mean of transformed feature before standardization.
train_transformed_mean = df_train_preprocessed[feature_name].mean()
train_transformed_sd = df_train_preprocessed[feature_name].std()
if standardize_features:
df_train_without_mean = (df_train_preprocessed[feature_name] -
train_transformed_mean)
df_train_preprocessed[feature_name] = df_train_without_mean / train_transformed_sd
df_test_without_mean = (df_test_preprocessed[feature_name] -
train_transformed_mean)
df_test_preprocessed[feature_name] = df_test_without_mean / train_transformed_sd
# Multiply both train and test feature by sign.
df_train_preprocessed[feature_name] = (df_train_preprocessed[feature_name] *
feature_sign)
df_test_preprocessed[feature_name] = (df_test_preprocessed[feature_name] *
feature_sign)
# update the feature preprocessing metadata frame
df_feature = pd.DataFrame([{"feature": feature_name,
"transform": feature_transformation,
"sign": feature_sign,
"train_mean": train_feature_mean,
"train_sd": train_feature_sd,
"train_transformed_mean": train_transformed_mean,
"train_transformed_sd": train_transformed_sd}])
df_feature_info = df_feature_info.append(df_feature)
# reset the index for the feature metadata frame
# since we built it up row by row
df_feature_info = df_feature_info.reset_index().drop('index', 1)
# return the three data frames
return (df_train_preprocessed,
df_test_preprocessed,
df_feature_info)
def filter_data(self,
df,
label_column,
id_column,
length_column,
second_human_score_column,
candidate_column,
requested_feature_names,
reserved_column_names,
given_trim_min,
given_trim_max,
flag_column_dict,
subgroups,
exclude_zero_scores=True,
exclude_zero_sd=False,
feature_subset_specs=None,
feature_subset=None,
min_candidate_items=None,
use_fake_labels=False):
"""
Filter the data to remove rows that have zero/non-numeric values
for `label_column`. If feature_names are specified, check whether any
features that are specifically requested in `feature_names`
are missing from the data. If no feature_names are specified,
these are generated based on column names and subset information,
if available. The function then excludes non-numeric values for
any feature. If the user requested to exclude candidates with less
than min_items_per_candidates, such candidates are excluded.
It also generates fake labels between 1 and 10 if
`use_fake_parameters` is set to True. Finally, it renames the id
and label column and splits the data into the data frame with
feature values and score label, the data frame with information about
subgroup and candidate (metadata) and the data frame with all other
columns.
Parameters
----------
df : pd.DataFrame
The DataFrame to filter.
label_column : str
The label column in the data.
id_column : str
The ID column in the data.
length_column : str
The length column in the data.
second_human_score_column : str
The second human score column in the data.
candidate_column : str
The candidate column in the data.
requested_feature_names : list
A list of requested feature names.
reserved_column_names : list
A list of reserved column names.
given_trim_min : float
The minimum trim value.
given_trim_max : float
The maximum trim value.
flag_column_dict : dict
A dictionary of flag columns.
subgroups : list, optional
A list of subgroups, if any.
exclude_zero_scores : bool
Whether to exclude zero scores.
Defaults to True.
exclude_zero_sd : bool, optional
Whether to exclude zero standard deviation.
Defaults to False.
feature_subset_specs : pd.DataFrame, optional
The feature_subset_specs DataFrame
Defaults to None.
feature_subset : str, optional
The feature subset group (e.g. 'A').
Defaults to None.
min_candidate_items : int, optional
The minimum number of items needed to include candidate.
Defaults to None
use_fake_labels : bool, optional
Whether to use fake labels.
Defaults to None.
Returns
-------
df_filtered_features : pd.DataFrame
DataFrame with filtered features
df_filtered_metadata : pd.DataFrame
DataFrame with filtered metadata
df_filtered_other_columns : pd.DataFrame
DataFrame with other columns filtered
df_excluded : pd.DataFrame
DataFrame with excluded records
df_filtered_length : pd.DataFrame
DataFrame with length column(s) filtered
df_filtered_human_scores : pd.DataFrame
DataFrame with human scores filtered
df_responses_with_excluded_flags : pd.DataFrame
A DataFrame containing responses with excluded flags
trim_min : float
The maximum trim value
trim_max : float
The minimum trim value
feature_names : list
A list of feature names
"""
# make sure that the columns specified in the
# config file actually exist
columns_to_check = [id_column, label_column]
if length_column:
columns_to_check.append(length_column)
if second_human_score_column:
columns_to_check.append(second_human_score_column)
if candidate_column:
columns_to_check.append(candidate_column)
missing_columns = set(columns_to_check).difference(df.columns)
if missing_columns:
raise KeyError("Columns {} from the config file "
"do not exist in the data.".format(missing_columns))
# it is possible for the `id_column` and `candidate_column` to be
# set to the same column name in the CSV file, e.g., if there is
# only one response per candidate. If this happens, we neeed to
# create a duplicate column for candidates or id for the downstream
# processing to work as usual.
if id_column == candidate_column:
# if the name for both columns is `candidate`, we need to
# create a separate id_column name
if id_column == 'candidate':
df['spkitemid'] = df['candidate'].copy()
id_column = 'spkitemid'
# else we create a separate `candidate` column
else:
df['candidate'] = df[id_column].copy()
candidate_column = 'candidate'
df = self.rename_default_columns(df,
requested_feature_names,
id_column,
label_column,
second_human_score_column,
length_column,
None,
candidate_column)
# check that the id_column contains unique values
if df['spkitemid'].size != df['spkitemid'].unique().size:
raise ValueError("The data contains duplicate response IDs in "
"'{}'. Please make sure all response IDs are "
"unique and re-run the tool.".format(id_column))
# Generate feature names if no specific features were requested by the user
if len(requested_feature_names) == 0:
feature_names = self.generate_feature_names(df,
reserved_column_names,
feature_subset_specs=feature_subset_specs,
feature_subset=feature_subset)
else:
feature_names = requested_feature_names
# make sure that feature names do not contain reserved column names
illegal_feature_names = set(feature_names).intersection(reserved_column_names)
if illegal_feature_names:
raise ValueError("The following reserved "
"column names cannot be "
"used as feature names: '{}'. "
"Please rename these columns "
"and re-run the "
"experiment.".format(', '.join(illegal_feature_names)))
# check to make sure that the subgroup columns are all present
df = FeaturePreprocessor.check_subgroups(df, subgroups)
# filter out the responses based on flag columns
(df_responses_with_requested_flags,
df_responses_with_excluded_flags) = self.filter_on_flag_columns(df, flag_column_dict)
# filter out the rows that have non-numeric or zero labels
# unless we are going to generate fake labels in the first place
if not use_fake_labels:
(df_filtered,
df_excluded) = self.filter_on_column(df_responses_with_requested_flags,
'sc1',
'spkitemid',
exclude_zeros=exclude_zero_scores)
# make sure that the remaining data frame is not empty
if len(df_filtered) == 0:
raise ValueError("No responses remaining after filtering out "
"non-numeric human scores. No further analysis "
"can be run. ")
trim_min = given_trim_min if given_trim_min else df_filtered['sc1'].min()
trim_max = | |
<gh_stars>10-100
import time
import random
import glob
import argparse
import logging
import sys
import os
from PIL import Image
import uuid
import copy
import numpy as np
import carla_utils
import pickle
import dynamic_weather as weather
import datetime
import csv
try:
import queue
except ImportError:
import Queue as queue
# import ipdb
# st = ipdb.set_trace
try:
sys.path.append(glob.glob('/hdd/carla97/PythonAPI/carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
import carla_utils
from carla_utils import ClientSideBoundingBoxes
import image_converter
from carla import ColorConverter as cc
from carla_sync_mode import CarlaSyncMode
def save_npy(data, filename):
np.save(filename, data)
# Built on carla_two_unique.py
# can be used to spawn 2-3 vehicles
'''
camR ranges: 8-14, -5,5, 1-3
'''
class CarlaMultiviewRunner():
def __init__(self, start_episode, end_episode, mod, save_dir):
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
self.base_path = os.path.join(save_dir, "surveil_{}".format(mod))
if not os.path.exists(self.base_path):
os.mkdir(self.base_path)
self.randomize_each_camera = True
self.start_frame = 0
self.start_episode = start_episode
self.end_episode = end_episode
self.frames_per_episode = 1
self.num_vehicles = 2
self.today = str(datetime.date.today())
self.episode_path_format = '{:s}_episode_{:0>4d}_vehicles_{:0>3d}'
self.out_filename_format = '{:s}_episode_{:0>4d}_vehicles_{:0>3d}/{:s}/{:0>6d}'
self.nearest_x = 2.0
self.dis_to_center = 15.0#5.0
self.height = 0.65
self.d_height = 1.0
# Camera specific params
self.image_size_x = 768
self.image_size_y = 256
self.fov = 90
self.focal = self.image_size_x/(2.0*np.tan(self.fov*np.pi/360.0))
self.fps = 10
self.half_sqrt2 = np.sqrt(2)/2
self.host = "127.0.0.1"
self.port = 2000
self.filterv = "vehicle.*"
self.sensor_types = ['sensor.camera.rgb', 'sensor.camera.depth', 'sensor.camera.semantic_segmentation']
self.sensor_names_format = ['CameraRGB{}', 'CameraDepth{}', 'CameraSemantic{}']
self.category = {'vehicle': 0}
self.calculate_camera_locations()
self.visualize = True
self.visual_dir = 'visual'
if self.visualize and not os.path.exists(self.visual_dir):
os.mkdir(self.visual_dir)
def calculate_camera_locations(self):
self.center = np.array([self.nearest_x+self.dis_to_center, 0.0, self.height])
self.positions = []
self.rotations = []
#hori_rs = [self.dis_to_center, self.dis_to_center, self.dis_to_center]
verti_hs = [self.height+self.d_height, self.height, self.height+2*self.d_height]
pitches = [-0.08/np.pi*180 for i in range(3)]
#for i in [0]: #range(len(hori_rs)):
#hori_r = hori_rs[0]
h = 1.65 # verti_hs[1]
p_angle = pitches[0]
#theta = 15.0
# for angle in np.arange(5.0, 355.0, 20.0):#np.arange(0.0, 359.0, 30.0):
# a_rad = angle/180.0*np.pi
# scale = 1.
# trans = np.zeros(3)
# #pos = self.center + scale*np.array([-hori_r*np.cos(a_rad), hori_r*np.sin(a_rad), h]) + trans
# pos = scale*np.array([-hori_r*np.cos(a_rad), hori_r*np.sin(a_rad), h]) + trans
# self.positions.append(pos)
# self.rotations.append([p_angle, -angle, 0])
# specify angles to rotate around
bin_angle_size = 20.0
num_views_per_bin = 2
angles = np.arange(0, 360.0, bin_angle_size)
angles = np.tile(angles, num_views_per_bin)
#hori_rs = [10.0, 15.0, 20.0]
# pick radii for each angle
hori_rs = np.random.uniform(low=3.0, high=15.0, size=angles.shape[0])
ind = 0
for angle in angles: #np.arange(0.0, 359.0, 30.0):
a_rad = angle/180.0*np.pi
scale = 1.
trans = np.zeros(3)
hori_r = hori_rs[ind]
#pos = self.center + scale*np.array([-hori_r*np.cos(a_rad), hori_r*np.sin(a_rad), h]) + trans
pos = scale*np.array([-hori_r*np.cos(a_rad), hori_r*np.sin(a_rad), h]) + trans
self.positions.append(pos)
self.rotations.append([p_angle, -angle, 0])
ind += 1
self.K = np.identity(3)
self.K[0, 2] = self.image_size_x / 2.0
self.K[1, 2] = self.image_size_y / 2.0
# use -focal to be the same as the previous version
self.K[0, 0] = self.K[1, 1] = self.image_size_x / \
(2.0 * np.tan(self.fov * np.pi / 360.0))
def destroy_actors(self):
print('Destroying actors.')
for actor in self.vehicles_list:
if actor.is_alive:
actor.destroy()
for actor in self.sensors_list:
if actor.is_alive:
actor.destroy()
print("Destroyed all actors")
def run_carla_client(self):
# print(self.positions)
# print(self.rotations)
# print(self.start_episode, self.end_episode)
client = carla.Client(self.host, self.port)
client.set_timeout(20.0)
try:
self.available_maps = client.get_available_maps()
print('loaded available maps')
logging.info('listening to server %s:%s', self.host, self.port)
num_batches = 2
for episode in range(self.start_episode, self.end_episode):
print("Starting episode number %d" % episode)
uuid_run = str(uuid.uuid1())
cur_map = random.choice(self.available_maps)
print("About to load the map %s" %cur_map)
map_name = cur_map.split('/')[-1]
episode_path = os.path.join(self.base_path, self.episode_path_format.format(map_name, episode, self.num_vehicles))
if not os.path.exists(episode_path):
os.mkdir(episode_path)
self.world = client.load_world(cur_map)
self.world.tick()
# Initialize the actor lists
self.vehicles_list = []
# self.sensors_list = []
# self.sensors_name = []
self.vehicle_dir_paths = []
self.actors = []
# self.position_list = []
# self.rotation_list = []
self.camR_sensor_indices = []
self.vehicle_bbox_list = []
self.vehicle_extrinsics = []
self.vehicle_names = []
world_T_agents_f = open(os.path.join(episode_path, 'world_T_agent.txt'), 'w')
world_T_agents_f.write('roll,pitch,yaw\n')
fcsv = open(os.path.join(episode_path, 'bboxes.csv'), 'w')
csv_writer = csv.writer(fcsv)
csv_writer.writerow(['episode', 'frame', 'obj_id', 'category','x','y','z','l','w','h','r','x1','y1','x2','y2','depth', 'occluded'])
# Get all the blueprints
blueprints = self.world.get_blueprint_library().filter(self.filterv)
# remove bikes
cars = [x for x in blueprints if int(x.get_attribute('number_of_wheels')) == 4]
blueprints = cars
#for blueprint in blueprints:
#print(blueprint)
# for blueprint in blueprints:
# print(blueprint.id)
# for attr in blueprint:
# print(' - {}'.format(attr))
weather = carla.WeatherParameters(
cloudiness=np.random.randint(0, 70),
precipitation=np.random.randint(0, 75),
sun_altitude_angle=np.random.randint(30, 90))
self.world.set_weather(weather)
self.world.tick()
# Get all the spawn points
spawn_points = self.world.get_map().get_spawn_points()
random.shuffle(spawn_points)
self.spawn_vehicles(blueprints, episode_path, spawn_points)
batches = list(np.floor(np.linspace(0, len(self.positions), num_batches)).astype(int))
print("BATCH INDICES: ", batches)
self.idx = 0 # for naming
self.positions_ = [] # actual rotations of agents
self.rotations_ = [] # actual rotations of agents
self.first_batch = True
for batch in range(len(batches)-1):
print("BATCH #", batches[batch], " to ", batches[batch+1])
self.sensors_list = []
self.sensors_name = []
self.position_list = []
self.rotation_list = []
# if batch == len(batches)-2:
# positions_batch = self.positions[batches[batch]:]
# rotations_batch = self.rotations[batches[batch]:]
# else:
positions_batch = self.positions[batches[batch]:batches[batch+1]]
rotations_batch = self.rotations[batches[batch]:batches[batch+1]]
print(positions_batch)
print(rotations_batch)
# print("SPAWN POINT: ", spawn_points[0])
self.spawn_sensors(positions_batch, rotations_batch, spawn_points[0], episode_path) # set the position relative to the first spawn point
print("Done with actor creation")
print("Total number of sensors are: ", len(self.sensors_list))
self.world.tick()
last_saved_frame = 0
# Create a synchronous mode context.
with CarlaSyncMode(self.world, self.sensors_list, fps=self.fps) as sync_mode:
cnt = 0
for v in self.vehicles_list:
# print("Bounding box for this vehicle is: ", v.bounding_box.location, v.bounding_box.extent)
bbox_loc, bbox_ext = v.bounding_box.location, v.bounding_box.extent
bbox = [bbox_loc.x - bbox_ext.x, bbox_loc.y - bbox_ext.y, bbox_loc.z - bbox_ext.z, bbox_loc.x + bbox_ext.x, bbox_loc.y + bbox_ext.y, bbox_loc.z + bbox_ext.z]
self.vehicle_bbox_list.append(bbox)
print("bbox coords are: ", bbox)
# v.set_autopilot(False)
v.set_simulate_physics(False)
print("VEHICLES: ", self.vehicles_list)
# print("All vehicles put to autopilot")
# set weather
while True:
print(cnt)
if cnt == self.frames_per_episode + self.start_frame:
print("Done with episode %d." %episode)
time.sleep(5)
break
# self.world.tick()
# print("Getting the data")
# Advance the simulation and wait for the data.
data, frame = sync_mode.tick(timeout=12.0)
# print('Got the data :))')
if self.first_batch:
frame_a = frame
#frame += batches[batch]
print("FRAME ", frame_a)
if cnt >= self.start_frame:
data = data[1:] # Remove the world tick datapoint
valid = self.save_data(data, episode, frame_a, episode_path, world_T_agents_f, csv_writer)
if not valid:
print("throwing out view ", cnt, " due to depth")
cnt += 1
for actor in self.sensors_list:
if actor.is_alive:
# print("ALIVE! ", actor)
actor.destroy()
self.first_batch = False
# these are the translation relative to the first spawn point
# print("ALL POSITIONS", self.positions_)
# print("LENGTH ", len(self.positions_))
# print("ALL ROTATIONS", self.rotations_)
# print("LENGTH ", len(self.rotations_))
save_npy(np.array(self.positions_), os.path.join(episode_path, 'all_cam_positions'))
save_npy(np.array(self.rotations_), os.path.join(episode_path, 'all_cam_rotations'))
except Exception as e: print(e)
finally:
#self.world.destroy()
# for actor in self.world.get_actors().filter("vehicle*"):
# if actor.is_alive:
# #print("2", actor)
# #sys.stdout.write('2')
# actor.destroy()
# for actor in self.sensors_list:
# if actor.is_alive:
# #print("5", actor)
# #sys.stdout.write('5')
# actor.destroy()
client.apply_batch([carla.command.DestroyActor(x) for x in self.world.get_actors().filter("vehicle*")])
client.apply_batch([carla.command.DestroyActor(x) for x in self.sensors_list])
# self.destroy_actors()
def save_data(self, data, episode, framenum, episode_path, world_T_agents_f, csv_writer):
# Check if too many occlusions (e.g. in a wall)
# for idx, name in enumerate(self.sensors_name):
# if "Depth" in name:
# data_instance = data[idx]
# processed_data = image_converter.depth_in_meters(data_instance)
# valid = self.check_depth(processed_data)
# if valid == 0:
# return False
# Save data
#print("SENSOR LENGTH:", len(self.sensors_name))
#print("SENSOR NAMES:", self.sensors_name)
for idx, name in enumerate(self.sensors_name):
camera_path = os.path.join(episode_path, name)
if not os.path.exists(camera_path):
os.mkdir(camera_path)
if not os.path.exists(os.path.join(episode_path, 'boxes')):
os.mkdir(os.path.join(episode_path, 'boxes'))
data_instance = data[idx]
if "RGB" in name:
processed_data = image_converter.to_rgb_array(data_instance)
elif "Depth" in name:
processed_data = image_converter.depth_in_meters(data_instance)
#valid = self.get_valid_view(processed_data)
# print("DEPTH")
# print(processed_data.shape)
#print(processed_data)
elif "Semantic" in name:
# TODO: handle R channel properly here.
print("SEMANTIC")
# print(data_instance.shape)
# print(data_instance)
#print(np.unique(data_instance, axis=0))
processed_data = image_converter.to_rgb_array(data_instance)
print(processed_data.shape)
print(np.unique(processed_data[:,:,0]))
# data_instance.convert(cc.CityScapesPalette)
# #data_instance.save_to_disk(os.path.join(self.visual_dir, 'img_e{:0>4d}_{:s}_f{:0>4d}.png'.format(0, name, framenum)),carla.ColorConverter.CityScapesPalette)
# #processed_data = image_converter.to_rgb_array(data_instance)
# print(processed_data.shape)
# print(np.unique(processed_data[:,:,0]))
# get segmentation
sensor = self.sensors_list[idx]
corners_cam, corners_cam_unproject, w2c, v2w = ClientSideBoundingBoxes.get_bounding_boxes(self.vehicles_list, sensor, self.K)
c2w = np.linalg.inv(w2c)
v2w = np.concatenate(v2w, axis=0)
masks = []
for obj_ind in range(len(corners_cam)):
# get 2D bounding box
xmin = int(np.min(corners_cam[obj_ind][:,0]))
xmax = int(np.max(corners_cam[obj_ind][:,0]))
ymin = int(np.min(corners_cam[obj_ind][:,1]))
ymax = int(np.max(corners_cam[obj_ind][:,1]))
if ymin < 0:
ymin = 0
elif ymax > self.image_size_y:
ymax = self.image_size_y
if xmin < 0:
xmin = 0
elif xmax > self.image_size_x:
xmax = self.image_size_x
veh_id = 10 # vehicle segmentation ID for semantic sensor
# get segmentation mask
img = processed_data.copy()
img2 = processed_data.copy()
img2 = img2[ymin:ymax, xmin:xmax, :]
#print(np.unique(img2[:,:,0]))
img2[img2!=veh_id] = 0.0
#print(np.sum(img2[:,:,:]==10))
img2[img2[:,:,0]==veh_id, :] = 1.0
#print(np.sum(img2==255))
img[:,:,:] = 0.0
img[ymin:ymax, xmin:xmax, | |
<reponame>sunny-shu/libgo
# Status: being ported by Steven Watanabe
# Base revision: 47077
# TODO: common.jam needs to be ported
# TODO: generators.jam needs to have register_c_compiler.
#
# Copyright 2001 <NAME>.
# Copyright 2002-2006 <NAME>.
# Copyright 2002-2003 <NAME>.
# Copyright (c) 2005 <NAME>.
# Copyright 2006 <NAME>.
# Copyright 2007 <NAME>
# Copyright 2007 <NAME>.
# Copyright 2008 <NAME>
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import os
import subprocess
import re
import bjam
from b2.tools import unix, common, rc, pch, builtin
from b2.build import feature, type, toolset, generators, property_set
from b2.build.property import Property
from b2.util.utility import os_name, on_windows
from b2.manager import get_manager
from b2.build.generators import Generator
from b2.build.toolset import flags
from b2.util.utility import to_seq
__debug = None
def debug():
global __debug
if __debug is None:
__debug = "--debug-configuration" in bjam.variable("ARGV")
return __debug
feature.extend('toolset', ['gcc'])
toolset.inherit_generators('gcc', [], 'unix', ['unix.link', 'unix.link.dll'])
toolset.inherit_flags('gcc', 'unix')
toolset.inherit_rules('gcc', 'unix')
generators.override('gcc.prebuilt', 'builtin.prebuilt')
generators.override('gcc.searched-lib-generator', 'searched-lib-generator')
# Target naming is determined by types/lib.jam and the settings below this
# comment.
#
# On *nix:
# libxxx.a static library
# libxxx.so shared library
#
# On windows (mingw):
# libxxx.lib static library
# xxx.dll DLL
# xxx.lib import library
#
# On windows (cygwin) i.e. <target-os>cygwin
# libxxx.a static library
# xxx.dll DLL
# libxxx.dll.a import library
#
# Note: user can always override by using the <tag>@rule
# This settings have been choosen, so that mingw
# is in line with msvc naming conventions. For
# cygwin the cygwin naming convention has been choosen.
# Make the "o" suffix used for gcc toolset on all
# platforms
type.set_generated_target_suffix('OBJ', ['<toolset>gcc'], 'o')
type.set_generated_target_suffix('STATIC_LIB', ['<toolset>gcc', '<target-os>cygwin'], 'a')
type.set_generated_target_suffix('IMPORT_LIB', ['<toolset>gcc', '<target-os>cygwin'], 'dll.a')
type.set_generated_target_prefix('IMPORT_LIB', ['<toolset>gcc', '<target-os>cygwin'], 'lib')
__machine_match = re.compile('^([^ ]+)')
__version_match = re.compile('^([0-9.]+)')
def init(version = None, command = None, options = None):
"""
Initializes the gcc toolset for the given version. If necessary, command may
be used to specify where the compiler is located. The parameter 'options' is a
space-delimited list of options, each one specified as
<option-name>option-value. Valid option names are: cxxflags, linkflags and
linker-type. Accepted linker-type values are gnu, darwin, osf, hpux or sun
and the default value will be selected based on the current OS.
Example:
using gcc : 3.4 : : <cxxflags>foo <linkflags>bar <linker-type>sun ;
"""
options = to_seq(options)
command = to_seq(command)
# Information about the gcc command...
# The command.
command = to_seq(common.get_invocation_command('gcc', 'g++', command))
# The root directory of the tool install.
root = feature.get_values('<root>', options) ;
# The bin directory where to find the command to execute.
bin = None
# The flavor of compiler.
flavor = feature.get_values('<flavor>', options)
# Autodetect the root and bin dir if not given.
if command:
if not bin:
bin = common.get_absolute_tool_path(command[-1])
if not root:
root = os.path.dirname(bin)
# Autodetect the version and flavor if not given.
if command:
machine_info = subprocess.Popen(command + ['-dumpmachine'], stdout=subprocess.PIPE).communicate()[0]
machine = __machine_match.search(machine_info).group(1)
version_info = subprocess.Popen(command + ['-dumpversion'], stdout=subprocess.PIPE).communicate()[0]
version = __version_match.search(version_info).group(1)
if not flavor and machine.find('mingw') != -1:
flavor = 'mingw'
condition = None
if flavor:
condition = common.check_init_parameters('gcc', None,
('version', version),
('flavor', flavor))
else:
condition = common.check_init_parameters('gcc', None,
('version', version))
if command:
command = command[0]
common.handle_options('gcc', condition, command, options)
linker = feature.get_values('<linker-type>', options)
if not linker:
if os_name() == 'OSF':
linker = 'osf'
elif os_name() == 'HPUX':
linker = 'hpux' ;
else:
linker = 'gnu'
init_link_flags('gcc', linker, condition)
# If gcc is installed in non-standard location, we'd need to add
# LD_LIBRARY_PATH when running programs created with it (for unit-test/run
# rules).
if command:
# On multilib 64-bit boxes, there are both 32-bit and 64-bit libraries
# and all must be added to LD_LIBRARY_PATH. The linker will pick the
# right onces. Note that we don't provide a clean way to build 32-bit
# binary with 64-bit compiler, but user can always pass -m32 manually.
lib_path = [os.path.join(root, 'bin'),
os.path.join(root, 'lib'),
os.path.join(root, 'lib32'),
os.path.join(root, 'lib64')]
if debug():
print 'notice: using gcc libraries ::', condition, '::', lib_path
toolset.flags('gcc.link', 'RUN_PATH', condition, lib_path)
# If it's not a system gcc install we should adjust the various programs as
# needed to prefer using the install specific versions. This is essential
# for correct use of MinGW and for cross-compiling.
# - The archive builder.
archiver = common.get_invocation_command('gcc',
'ar', feature.get_values('<archiver>', options), [bin], path_last=True)
toolset.flags('gcc.archive', '.AR', condition, [archiver])
if debug():
print 'notice: using gcc archiver ::', condition, '::', archiver
# - Ranlib
ranlib = common.get_invocation_command('gcc',
'ranlib', feature.get_values('<ranlib>', options), [bin], path_last=True)
toolset.flags('gcc.archive', '.RANLIB', condition, [ranlib])
if debug():
print 'notice: using gcc archiver ::', condition, '::', ranlib
# - The resource compiler.
rc_command = common.get_invocation_command_nodefault('gcc',
'windres', feature.get_values('<rc>', options), [bin], path_last=True)
rc_type = feature.get_values('<rc-type>', options)
if not rc_type:
rc_type = 'windres'
if not rc_command:
# If we can't find an RC compiler we fallback to a null RC compiler that
# creates empty object files. This allows the same Jamfiles to work
# across the board. The null RC uses the assembler to create the empty
# objects, so configure that.
rc_command = common.get_invocation_command('gcc', 'as', [], [bin], path_last=True)
rc_type = 'null'
rc.configure(rc_command, condition, '<rc-type>' + rc_type)
###if [ os.name ] = NT
###{
### # This causes single-line command invocation to not go through .bat files,
### # thus avoiding command-line length limitations.
### JAMSHELL = % ;
###}
#FIXME: when register_c_compiler is moved to
# generators, these should be updated
builtin.register_c_compiler('gcc.compile.c++', ['CPP'], ['OBJ'], ['<toolset>gcc'])
builtin.register_c_compiler('gcc.compile.c', ['C'], ['OBJ'], ['<toolset>gcc'])
builtin.register_c_compiler('gcc.compile.asm', ['ASM'], ['OBJ'], ['<toolset>gcc'])
# pch support
# The compiler looks for a precompiled header in each directory just before it
# looks for the include file in that directory. The name searched for is the
# name specified in the #include directive with ".gch" suffix appended. The
# logic in gcc-pch-generator will make sure that BASE_PCH suffix is appended to
# full name of the header.
type.set_generated_target_suffix('PCH', ['<toolset>gcc'], 'gch')
# GCC-specific pch generator.
class GccPchGenerator(pch.PchGenerator):
# Inherit the __init__ method
def run_pch(self, project, name, prop_set, sources):
# Find the header in sources. Ignore any CPP sources.
header = None
for s in sources:
if type.is_derived(s.type(), 'H'):
header = s
# Error handling: Base header file name should be the same as the base
# precompiled header name.
header_name = header.name()
header_basename = os.path.basename(header_name).rsplit('.', 1)[0]
if header_basename != name:
location = project.project_module
###FIXME:
raise Exception()
### errors.user-error "in" $(location)": pch target name `"$(name)"' should be the same as the base name of header file `"$(header-name)"'" ;
pch_file = Generator.run(self, project, name, prop_set, [header])
# return result of base class and pch-file property as usage-requirements
# FIXME: what about multiple results from generator.run?
return (property_set.create([Property('pch-file', pch_file[0]),
Property('cflags', '-Winvalid-pch')]),
pch_file)
# Calls the base version specifying source's name as the name of the created
# target. As result, the PCH will be named whatever.hpp.gch, and not
# whatever.gch.
def generated_targets(self, sources, prop_set, project, name = None):
name = sources[0].name()
return Generator.generated_targets(self, sources,
prop_set, project, name)
# Note: the 'H' source type will catch both '.h' header and '.hpp' header. The
# latter have HPP type, but HPP type is derived from H. The type of compilation
# is determined entirely by the destination type.
generators.register(GccPchGenerator('gcc.compile.c.pch', False, ['H'], ['C_PCH'], ['<pch>on', '<toolset>gcc' ]))
generators.register(GccPchGenerator('gcc.compile.c++.pch', False, ['H'], ['CPP_PCH'], ['<pch>on', '<toolset>gcc' ]))
# Override default do-nothing generators.
generators.override('gcc.compile.c.pch', 'pch.default-c-pch-generator')
generators.override('gcc.compile.c++.pch', 'pch.default-cpp-pch-generator')
flags('gcc.compile', 'PCH_FILE', ['<pch>on'], ['<pch-file>'])
# Declare flags and action for compilation
flags('gcc.compile', 'OPTIONS', ['<optimization>off'], ['-O0'])
flags('gcc.compile', 'OPTIONS', ['<optimization>speed'], ['-O3'])
flags('gcc.compile', 'OPTIONS', ['<optimization>space'], ['-Os'])
flags('gcc.compile', 'OPTIONS', ['<inlining>off'], ['-fno-inline'])
flags('gcc.compile', 'OPTIONS', ['<inlining>on'], ['-Wno-inline'])
flags('gcc.compile', 'OPTIONS', ['<inlining>full'], ['-finline-functions', '-Wno-inline'])
flags('gcc.compile', 'OPTIONS', ['<warnings>off'], ['-w'])
flags('gcc.compile', 'OPTIONS', ['<warnings>on'], ['-Wall'])
flags('gcc.compile', 'OPTIONS', ['<warnings>all'], ['-Wall', '-pedantic'])
flags('gcc.compile', 'OPTIONS', ['<warnings-as-errors>on'], ['-Werror'])
flags('gcc.compile', 'OPTIONS', ['<debug-symbols>on'], ['-g'])
flags('gcc.compile', 'OPTIONS', ['<profiling>on'], ['-pg'])
flags('gcc.compile.c++', 'OPTIONS', ['<rtti>off'], ['-fno-rtti'])
flags('gcc.compile.c++', 'OPTIONS', ['<exception-handling>off'], ['-fno-exceptions'])
# On cygwin and mingw, gcc generates position independent code by default, and
# warns if -fPIC is specified. This might not be the right way of checking if
# we're using cygwin. For example, it's possible to run cygwin gcc from NT
# shell, or using crosscompiling. But we'll solve that problem when it's time.
# In that case we'll just add another parameter to 'init' and move this login
# inside 'init'.
if not os_name () in ['CYGWIN', 'NT']:
# This logic will add -fPIC for all compilations:
#
# lib a : a.cpp b ;
# obj b : b.cpp ;
# exe c : c.cpp a d ;
# obj d : d.cpp ;
#
# This all is fine, except that 'd' will be compiled with | |
""" Data objects in group "Performance Tables"
"""
from collections import OrderedDict
import logging
from pyidf.helper import DataObject
logger = logging.getLogger("pyidf")
logger.addHandler(logging.NullHandler())
class TableOneIndependentVariable(DataObject):
""" Corresponds to IDD object `Table:OneIndependentVariable`
Allows entry of tabular data pairs as alternate input
for performance curve objects.
Performance curve objects can be created using these inputs.
Linear Table Equation: Output = a + bX
Linear solution requires a minimum of 2 data pairs
Quadratic Table Equation: Output = a + b*X + c*X**2
Quadratic solution requires a minimum of 3 data pairs
Cubic Table Equation: Output = a + b*X + c* X**2 + d*X**3
Cubic solution requires a minimum of 4 data pairs
Quartic Table Equation: Output = a + b*X + c* X**2 + d*X**3 + e*X**4
Quartic solution requires a minimum of 5 data pairs
Exponent Table Equation: Output = a + b*X**c
Exponent solution requires a minimum of 4 data pairs
"""
_schema = {'extensible-fields': OrderedDict([(u'x value',
{'name': u'X Value',
'pyname': u'x_value',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'output value',
{'name': u'Output Value',
'pyname': u'output_value',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'})]),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'curve type',
{'name': u'Curve Type',
'pyname': u'curve_type',
'required-field': True,
'autosizable': False,
'accepted-values': [u'Linear',
u'Quadratic',
u'Cubic',
u'Quartic',
u'Exponent'],
'autocalculatable': False,
'type': 'alpha'}),
(u'interpolation method',
{'name': u'Interpolation Method',
'pyname': u'interpolation_method',
'required-field': False,
'autosizable': False,
'accepted-values': [u'LinearInterpolationOfTable',
u'EvaluateCurveToLimits',
u'LagrangeInterpolationLinearExtrapolation'],
'autocalculatable': False,
'type': 'alpha'}),
(u'minimum value of x',
{'name': u'Minimum Value of X',
'pyname': u'minimum_value_of_x',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum value of x',
{'name': u'Maximum Value of X',
'pyname': u'maximum_value_of_x',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'minimum table output',
{'name': u'Minimum Table Output',
'pyname': u'minimum_table_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'maximum table output',
{'name': u'Maximum Table Output',
'pyname': u'maximum_table_output',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'input unit type for x',
{'name': u'Input Unit Type for X',
'pyname': u'input_unit_type_for_x',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless',
u'Temperature',
u'VolumetricFlow',
u'MassFlow',
u'Power',
u'Distance'],
'autocalculatable': False,
'type': 'alpha'}),
(u'output unit type',
{'name': u'Output Unit Type',
'pyname': u'output_unit_type',
'default': u'Dimensionless',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Dimensionless',
u'Capacity',
u'Power'],
'autocalculatable': False,
'type': 'alpha'}),
(u'normalization reference',
{'name': u'Normalization Reference',
'pyname': u'normalization_reference',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'})]),
'format': None,
'group': u'Performance Tables',
'min-fields': 14,
'name': u'Table:OneIndependentVariable',
'pyname': u'TableOneIndependentVariable',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def curve_type(self):
"""field `Curve Type`
Args:
value (str): value for IDD Field `Curve Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `curve_type` or None if not set
"""
return self["Curve Type"]
@curve_type.setter
def curve_type(self, value=None):
"""Corresponds to IDD field `Curve Type`"""
self["Curve Type"] = value
@property
def interpolation_method(self):
"""field `Interpolation Method`
Args:
value (str): value for IDD Field `Interpolation Method`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `interpolation_method` or None if not set
"""
return self["Interpolation Method"]
@interpolation_method.setter
def interpolation_method(self, value=None):
"""Corresponds to IDD field `Interpolation Method`"""
self["Interpolation Method"] = value
@property
def minimum_value_of_x(self):
"""field `Minimum Value of X`
| used only when Interpolation Type is Evaluate Curve
| to Limits
| Units are based on field `A4`
Args:
value (float): value for IDD Field `Minimum Value of X`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_value_of_x` or None if not set
"""
return self["Minimum Value of X"]
@minimum_value_of_x.setter
def minimum_value_of_x(self, value=None):
"""Corresponds to IDD field `Minimum Value of X`"""
self["Minimum Value of X"] = value
@property
def maximum_value_of_x(self):
"""field `Maximum Value of X`
| used only when Interpolation Type is Evaluate Curve
| to Limits
| Units are based on field `A4`
Args:
value (float): value for IDD Field `Maximum Value of X`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_value_of_x` or None if not set
"""
return self["Maximum Value of X"]
@maximum_value_of_x.setter
def maximum_value_of_x(self, value=None):
"""Corresponds to IDD field `Maximum Value of X`"""
self["Maximum Value of X"] = value
@property
def minimum_table_output(self):
"""field `Minimum Table Output`
| Specify the minimum value calculated by this table
| lookup object
| used only when Interpolation Type is Evaluate Curve
| to Limits
| Units are based on field `A5`
Args:
value (float): value for IDD Field `Minimum Table Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `minimum_table_output` or None if not set
"""
return self["Minimum Table Output"]
@minimum_table_output.setter
def minimum_table_output(self, value=None):
"""Corresponds to IDD field `Minimum Table Output`"""
self["Minimum Table Output"] = value
@property
def maximum_table_output(self):
"""field `Maximum Table Output`
| Specify the maximum value calculated by this table
| lookup object
| used only when Interpolation Type is Evaluate Curve
| to Limits
| Units are based on field `A5`
Args:
value (float): value for IDD Field `Maximum Table Output`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_table_output` or None if not set
"""
return self["Maximum Table Output"]
@maximum_table_output.setter
def maximum_table_output(self, value=None):
"""Corresponds to IDD field `Maximum Table Output`"""
self["Maximum Table Output"] = value
@property
def input_unit_type_for_x(self):
"""field `Input Unit Type for X`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Input Unit Type for X`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `input_unit_type_for_x` or None if not set
"""
return self["Input Unit Type for X"]
@input_unit_type_for_x.setter
def input_unit_type_for_x(self, value="Dimensionless"):
"""Corresponds to IDD field `Input Unit Type for X`"""
self["Input Unit Type for X"] = value
@property
def output_unit_type(self):
"""field `Output Unit Type`
| Default value: Dimensionless
Args:
value (str): value for IDD Field `Output Unit Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `output_unit_type` or None if not set
"""
return self["Output Unit Type"]
@output_unit_type.setter
def output_unit_type(self, value="Dimensionless"):
"""Corresponds to IDD field `Output Unit Type`"""
self["Output Unit Type"] = value
@property
def normalization_reference(self):
"""field `Normalization Reference`
| This field is used to normalize the following ouput data.
| The minimum and maximum table output fields are also normalized.
| If this field is blank or 1, the table data presented
| in the following fields will be used with normalization
| reference set to 1.
Args:
value (float): value for IDD Field `Normalization Reference`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `normalization_reference` or None if not set
"""
return self["Normalization Reference"]
@normalization_reference.setter
def normalization_reference(self, value=None):
"""Corresponds to IDD field `Normalization Reference`"""
self["Normalization Reference"] = value
def add_extensible(self,
x_value=None,
output_value=None,
):
"""Add values for extensible fields.
Args:
x_value (float): value for IDD Field `X Value`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
output_value (float): value for IDD Field `Output Value`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
"""
vals = []
x_value = self.check_value("X Value", x_value)
vals.append(x_value)
output_value = self.check_value("Output Value", output_value)
vals.append(output_value)
self._extdata.append(vals)
@property
def extensibles(self):
"""Get list of all extensibles."""
return self._extdata
@extensibles.setter
def extensibles(self, extensibles):
"""Replaces extensible fields with `extensibles`
Args:
extensibles (list): nested list of extensible values
"""
self._extdata = []
for ext in extensibles:
self.add_extensible(*ext)
class TableTwoIndependentVariables(DataObject):
""" Corresponds to IDD object `Table:TwoIndependentVariables`
Allows entry of tabular data pairs as alternate input
for performance curve objects.
Performance curve objects can be created using these inputs.
BiQuadratic Table Equation: Output = a + bX + cX**2 + dY | |
self.args[1].value = (self.args[0].value * self.args[0].value) % program.P
@base.gf2n
@base.vectorize
class inverse(base.DataInstruction):
r""" Load secret variables $s_i$, $s_j$ and $s_k$
with the next inverse triple. """
__slots__ = []
code = base.opcodes['INV']
arg_format = ['sw','sw']
data_type = 'inverse'
def execute(self):
self.args[0].value = randint(0,program.P)
import gmpy
self.args[1].value = int(gmpy.invert(self.args[0].value, program.P))
@base.gf2n
@base.vectorize
class inputmask(base.Instruction):
r""" Load secret $s_i$ with the next input mask for player $p$ and
write the mask on player $p$'s private output. """
__slots__ = []
code = base.opcodes['INPUTMASK']
arg_format = ['sw', 'p']
field_type = 'modp'
def add_usage(self, req_node):
req_node.increment((self.field_type, 'input', self.args[1]), \
self.get_size())
@base.gf2n
@base.vectorize
class prep(base.Instruction):
r""" Custom preprocessed data """
__slots__ = []
code = base.opcodes['PREP']
arg_format = tools.chain(['str'], itertools.repeat('sw'))
gf2n_arg_format = tools.chain(['str'], itertools.repeat('sgw'))
field_type = 'modp'
def add_usage(self, req_node):
req_node.increment((self.field_type, self.args[0]), 1)
def has_var_args(self):
return True
###
### I/O
###
@base.gf2n
@base.vectorize
class asm_input(base.IOInstruction):
r""" Receive input from player $p$ and put in register $s_i$. """
__slots__ = []
code = base.opcodes['INPUT']
arg_format = ['sw', 'p']
field_type = 'modp'
def add_usage(self, req_node):
req_node.increment((self.field_type, 'input', self.args[1]), \
self.get_size())
def execute(self):
self.args[0].value = _python_input("Enter player %d's input:" % self.args[1]) % program.P
@base.gf2n
class startinput(base.RawInputInstruction):
r""" Receive inputs from player $p$. """
__slots__ = []
code = base.opcodes['STARTINPUT']
arg_format = ['p', 'int']
field_type = 'modp'
def add_usage(self, req_node):
req_node.increment((self.field_type, 'input', self.args[0]), \
self.args[1])
class stopinput(base.RawInputInstruction):
r""" Receive inputs from player $p$ and put in registers. """
__slots__ = []
code = base.opcodes['STOPINPUT']
arg_format = tools.chain(['p'], itertools.repeat('sw'))
def has_var_args(self):
return True
class gstopinput(base.RawInputInstruction):
r""" Receive inputs from player $p$ and put in registers. """
__slots__ = []
code = 0x100 + base.opcodes['STOPINPUT']
arg_format = tools.chain(['p'], itertools.repeat('sgw'))
def has_var_args(self):
return True
@base.gf2n
@base.vectorize
class print_mem(base.IOInstruction):
r""" Print value in clear memory \verb|C[ci]| to stdout. """
__slots__ = []
code = base.opcodes['PRINTMEM']
arg_format = ['c']
def execute(self):
pass
@base.gf2n
@base.vectorize
class print_reg(base.IOInstruction):
r""" Print value of register \verb|ci| to stdout and optional 4-char comment. """
__slots__ = []
code = base.opcodes['PRINTREG']
arg_format = ['c','i']
def __init__(self, reg, comment=''):
super(print_reg_class, self).__init__(reg, self.str_to_int(comment))
def execute(self):
pass
@base.gf2n
@base.vectorize
class print_reg_plain(base.IOInstruction):
r""" Print only the value of register \verb|ci| to stdout. """
__slots__ = []
code = base.opcodes['PRINTREGPLAIN']
arg_format = ['c']
<EMAIL>
@base.vectorize
class e_print_fixed_plain(base.IOInstruction):
r""" Print only the fixed value of register \verb|ci| to stdout. """
__slots__ = []
code = base.opcodes['E_PRINTFIXEDPLAIN']
arg_format = ['c', 'int']
@base.vectorize
class print_float_plain(base.IOInstruction):
__slots__ = []
code = base.opcodes['PRINTFLOATPLAIN']
arg_format = ['c', 'c', 'c', 'c']
class print_int(base.IOInstruction):
r""" Print only the value of register \verb|ci| to stdout. """
__slots__ = []
code = base.opcodes['PRINTINT']
arg_format = ['ci']
class print_char(base.IOInstruction):
r""" Print a single character to stdout. """
code = base.opcodes['PRINTCHR']
arg_format = ['int']
def __init__(self, ch):
super(print_char, self).__init__(ord(ch))
class print_char4(base.IOInstruction):
r""" Print a 4 character string. """
code = base.opcodes['PRINTSTR']
arg_format = ['int']
def __init__(self, val):
super(print_char4, self).__init__(self.str_to_int(val))
@base.vectorize
class print_char_regint(base.IOInstruction):
r""" Print register $ci_i$ as a single character to stdout. """
code = base.opcodes['PRINTCHRINT']
arg_format = ['ci']
@base.vectorize
class print_char4_regint(base.IOInstruction):
r""" Print register $ci_i$ as a four character string to stdout. """
code = base.opcodes['PRINTSTRINT']
arg_format = ['ci']
@base.vectorize
class pubinput(base.PublicFileIOInstruction):
__slots__ = []
code = base.opcodes['PUBINPUT']
arg_format = ['ciw']
@base.vectorize
class readsocketc(base.IOInstruction):
"""Read a variable number of clear GF(p) values from socket for a specified client id and store in registers"""
__slots__ = []
code = base.opcodes['READSOCKETC']
arg_format = tools.chain(['ci'], itertools.repeat('cw'))
def has_var_args(self):
return True
@base.vectorize
class readsockets(base.IOInstruction):
"""Read a variable number of secret shares + MACs from socket for a client id and store in registers"""
__slots__ = []
code = base.opcodes['READSOCKETS']
arg_format = tools.chain(['ci'], itertools.repeat('sw'))
def has_var_args(self):
return True
@base.vectorize
class readsocketint(base.IOInstruction):
"""Read variable number of 32-bit int from socket for a client id and store in registers"""
__slots__ = []
code = base.opcodes['READSOCKETINT']
arg_format = tools.chain(['ci'], itertools.repeat('ciw'))
def has_var_args(self):
return True
@base.vectorize
class writesocketc(base.IOInstruction):
"""
Write a variable number of clear GF(p) values from registers into socket
for a specified client id, message_type
"""
__slots__ = []
code = base.opcodes['WRITESOCKETC']
arg_format = tools.chain(['ci', 'int'], itertools.repeat('c'))
def has_var_args(self):
return True
@base.vectorize
class writesockets(base.IOInstruction):
"""
Write a variable number of secret shares + MACs from registers into a socket
for a specified client id, message_type
"""
__slots__ = []
code = base.opcodes['WRITESOCKETS']
arg_format = tools.chain(['ci', 'int'], itertools.repeat('s'))
def has_var_args(self):
return True
@base.vectorize
class writesocketshare(base.IOInstruction):
"""
Write a variable number of secret shares (without MACs) from registers into socket
for a specified client id, message_type
"""
__slots__ = []
code = base.opcodes['WRITESOCKETSHARE']
arg_format = tools.chain(['ci', 'int'], itertools.repeat('s'))
def has_var_args(self):
return True
@base.vectorize
class writesocketint(base.IOInstruction):
"""
Write a variable number of 32-bit ints from registers into socket
for a specified client id, message_type
"""
__slots__ = []
code = base.opcodes['WRITESOCKETINT']
arg_format = tools.chain(['ci', 'int'], itertools.repeat('ci'))
def has_var_args(self):
return True
class listen(base.IOInstruction):
"""Open a server socket on a party specific port number and listen for client connections (non-blocking)"""
__slots__ = []
code = base.opcodes['LISTEN']
arg_format = ['int']
class acceptclientconnection(base.IOInstruction):
"""Wait for a connection at the given port and write socket handle to register """
__slots__ = []
code = base.opcodes['ACCEPTCLIENTCONNECTION']
arg_format = ['ciw', 'int']
class connectipv4(base.IOInstruction):
"""Connect to server at IPv4 address in register \verb|cj| at given port. Write socket handle to register \verb|ci|"""
__slots__ = []
code = base.opcodes['CONNECTIPV4']
arg_format = ['ciw', 'ci', 'int']
class readclientpublickey(base.IOInstruction):
"""Read a client public key as 8 32-bit ints for a specified client id"""
__slots__ = []
code = base.opcodes['READCLIENTPUBLICKEY']
arg_format = tools.chain(['ci'], itertools.repeat('ci'))
def has_var_args(self):
return True
class initsecuresocket(base.IOInstruction):
"""Read a client public key as 8 32-bit ints for a specified client id,
negotiate a shared key via STS and use it for replay resistant comms"""
__slots__ = []
code = base.opcodes['INITSECURESOCKET']
arg_format = tools.chain(['ci'], itertools.repeat('ci'))
def has_var_args(self):
return True
class respsecuresocket(base.IOInstruction):
"""Read a client public key as 8 32-bit ints for a specified client id,
negotiate a shared key via STS and use it for replay resistant comms"""
__slots__ = []
code = base.opcodes['RESPSECURESOCKET']
arg_format = tools.chain(['ci'], itertools.repeat('ci'))
def has_var_args(self):
return True
class writesharestofile(base.IOInstruction):
"""Write shares to a file"""
__slots__ = []
code = base.opcodes['WRITEFILESHARE']
arg_format = itertools.repeat('s')
def has_var_args(self):
return True
class readsharesfromfile(base.IOInstruction):
"""
Read shares from a file. Pass in start posn, return finish posn, shares.
Finish posn will return:
-2 file not found
-1 eof reached
position in file after read finished
"""
__slots__ = []
code = base.opcodes['READFILESHARE']
arg_format = tools.chain(['ci', 'ciw'], itertools.repeat('sw'))
def has_var_args(self):
return True
@base.gf2n
@base.vectorize
class raw_output(base.PublicFileIOInstruction):
r""" Raw output of register \verb|ci| to file. """
__slots__ = []
code = base.opcodes['RAWOUTPUT']
arg_format = ['c']
@base.gf2n
@base.vectorize
class startprivateoutput(base.Instruction):
r""" Initiate private output to $n$ of $s_j$ via $s_i$. """
__slots__ = []
code = base.opcodes['STARTPRIVATEOUTPUT']
arg_format = ['sw','s','p']
@base.gf2n
@base.vectorize
class stopprivateoutput(base.Instruction):
r""" Previously iniated private output to $n$ via $c_i$. """
__slots__ = []
code = base.opcodes['STOPPRIVATEOUTPUT']
arg_format = ['c','p']
@base.vectorize
class rand(base.Instruction):
__slots__ = []
code = base.opcodes['RAND']
arg_format = ['ciw','ci']
###
### Integer operations
###
@base.vectorize
class ldint(base.Instruction):
__slots__ = []
code = base.opcodes['LDINT']
arg_format = ['ciw', 'i']
@base.vectorize
class addint(base.IntegerInstruction):
__slots__ = []
code = base.opcodes['ADDINT']
@base.vectorize
class subint(base.IntegerInstruction):
__slots__ = []
code = base.opcodes['SUBINT']
@base.vectorize
class mulint(base.IntegerInstruction):
__slots__ = []
code = base.opcodes['MULINT']
@base.vectorize
class divint(base.IntegerInstruction):
__slots__ = []
code = base.opcodes['DIVINT']
###
### Clear comparison instructions
###
@base.vectorize
class eqzc(base.UnaryComparisonInstruction):
r""" Clear comparison $c_i = (c_j \stackrel{?}{==} 0)$. """
__slots__ = []
code = base.opcodes['EQZC']
def execute(self):
if self.args[1].value == 0:
self.args[0].value = 1
else:
self.args[0].value = 0
@base.vectorize
class ltzc(base.UnaryComparisonInstruction):
r""" Clear comparison $c_i = (c_j \stackrel{?}{<} 0)$. """
__slots__ = []
code = base.opcodes['LTZC']
@base.vectorize
class ltc(base.IntegerInstruction):
r""" Clear comparison $c_i = (c_j \stackrel{?}{<} c_k)$. """
__slots__ = []
code = base.opcodes['LTC']
@base.vectorize
class gtc(base.IntegerInstruction):
r""" Clear comparison $c_i = (c_j \stackrel{?}{>} c_k)$. """
__slots__ = []
code = base.opcodes['GTC']
@base.vectorize
class eqc(base.IntegerInstruction):
r""" Clear comparison $c_i = (c_j \stackrel{?}{==} c_k)$. """
__slots__ = []
code = base.opcodes['EQC']
###
### Jumps etc
###
class jmp(base.JumpInstruction):
""" Unconditional relative jump of $n+1$ instructions. """
__slots__ = []
code = base.opcodes['JMP']
arg_format = ['int']
jump_arg = 0
def execute(self):
pass
class jmpi(base.JumpInstruction):
""" Unconditional relative jump of $c_i+1$ instructions. """
__slots__ = []
code = base.opcodes['JMPI']
arg_format = ['ci']
jump_arg = 0
class jmpnz(base.JumpInstruction):
r""" Jump $n+1$ instructions if $c_i \neq 0$.
e.g.
jmpnz(c, n) : advance n+1 instructions if c is non-zero
jmpnz(c, 0) : do nothing
jmpnz(c, -1): infinite loop if c is non-zero
"""
__slots__ = []
code | |
import torch
import nrrd
import sys
from PIL import Image
from torchvision.transforms import ToTensor
from torch.utils import data
from torch.utils.data.sampler import SequentialSampler
from torch.utils.tensorboard import SummaryWriter
from torch.cuda.amp import autocast
import torchvision as tv
import torch.nn as nn
import matplotlib.pyplot as plt
import subprocess
import glob, os
import numpy as np
from datetime import datetime
import argparse
import zipfile
from shutil import copyfile
from tifffile import imsave
import utils.pytorch_shot_noise as pytorch_shot_noise
from utils.XLFMDataset import XLFMDatasetFull
from utils.misc_utils import *
from utils.XLFMDeconv import *
from nets.SLNet import *
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument('--data_folder', nargs='?', default="", help='Input images path in format /XLFM_image/XLFM_image_stack.tif and XLFM_image_stack_S.tif in case of a sparse GT stack.')
parser.add_argument('--volume_raw_reference_path', nargs='?', default= '*tif', help='For augmentation the volumes must be computed, perhaps this volumes where arleady computed?. use: path + *tif')
parser.add_argument('--volume_sparse_reference_path', nargs='?', default= '*tif', help='For augmentation the volumes must be computed, perhaps this volumes where arleady computed?.')
parser.add_argument('--lenslet_file', nargs='?', default= "lenslet_centers_python.txt")
parser.add_argument('--files_to_store', nargs='+', default=[], help='Relative paths of files to store in a zip when running this script, for backup.')
parser.add_argument('--prefix', nargs='?', default= "fishy", help='Prefix string for the output folder.')
parser.add_argument('--checkpoint', nargs='?', default= "", help='File path of checkpoint of SLNet.')
parser.add_argument('--psf_file', nargs='?', default= "PSF_2.5um_processed.mat", help='.mat matlab file with PSF stack, used for deconvolution.')
# Images related arguments
parser.add_argument('--images_to_use', nargs='+', type=int, default=list(range(0,193,1)), help='Indeces of images to train on.')
parser.add_argument('--n_simulations', type=int, default=50, help='Number of samples to generate.')
# Noise arguments
parser.add_argument('--add_noise', type=int, default=0, help='Apply noise to images? 0 or 1')
parser.add_argument('--signal_power_max', type=float, default=30**2, help='Max signal value to control signal to noise ratio when applyting noise.')
parser.add_argument('--signal_power_min', type=float, default=60**2, help='Min signal value to control signal to noise ratio when applyting noise.')
parser.add_argument('--dark_current', type=float, default=106, help='Dark current value of camera.')
parser.add_argument('--dark_current_sparse', type=float, default=0, help='Dark current value of camera.')
# z Roll augmentation
parser.add_argument('--max_z_roll_augm', type=int, default=80, help='Max roll posible for random roll')
# Sparse decomposition arguments
parser.add_argument('--temporal_shifts', nargs='+', type=int, default=[0,49,99], help='Which frames to use for training and testing.')
parser.add_argument('--SD_iterations', type=int, default=0, help='Number of iterations for Sparse Decomposition, 0 to disable.')
parser.add_argument('--frame_to_grab', type=int, default=0, help='Which frame to show from the sparse decomposition?')
# 3D deconvolution arguments
parser.add_argument('--deconv_iterations', type=int, default=50, help='Number of iterations for 3D deconvolution, for GT volume generation.')
parser.add_argument('--deconv_n_depths', type=int, default=120, help='Number of depths to create in 3D deconvolution.')
parser.add_argument('--n_depths', type=int, default=120, help='Number of depths to create in 3D deconvolution.')
parser.add_argument('--deconv_limit', type=float, default=10000, help='Maximum intensity allowed from doconvolution.')
parser.add_argument('--deconv_depth_split', type=int, default=6, help='Number of depths to simultaneously deconvolve in the gpu.')
parser.add_argument('--deconv_gpu', type=int, default=1, help='GPU to use for deconvolution, -1 to use CPU, this is very memory intensive.')
parser.add_argument('--output_path', nargs='?', default='')
# parser.add_argument('--output_path', nargs='?', default=runs_dir + '/garbage/')
parser.add_argument('--main_gpu', nargs='+', type=int, default=[0], help='List of GPUs to use: [0,1]')
# Set to zero if debuging
n_threads = 0
args = parser.parse_args()
# Select which devices to use
if len(args.main_gpu)>0:
device = "cuda:" + str(args.main_gpu[0])
else:
if args.main_gpu==-1:
device = "cpu"
else:
device = "cuda"
args.main_gpu = [0]
# Deconvolution can be heavy on the GPU, and sometimes it doesn't fit, so use -1 for CPU
if args.deconv_gpu==-1:
device_deconv = "cpu"
else:
device_deconv = "cuda:" + str(args.deconv_gpu)
if n_threads!=0:
torch.set_num_threads(n_threads)
checkpoint_path = None
if len(args.checkpoint)>0:
checkpoint = torch.load(args.checkpoint, map_location=device)
currArgs = args
argsModel = checkpoint['args']
# Get images
dataset = XLFMDatasetFull(args.data_folder, args.lenslet_file, argsModel.subimage_shape, img_shape=2*[argsModel.img_size],
images_to_use=args.images_to_use, divisor=1, isTiff=True, n_frames_net=argsModel.n_frames,
load_all=True, load_sparse=False, load_vols=False, temporal_shifts=args.temporal_shifts, eval_video=True)
# Get normalization values
max_images,max_images_sparse,max_volumes = dataset.get_max()
# Normalization from SLNet
mean_imgs,std_images,mean_vols,std_vols = checkpoint['statistics']
mean_imgs = mean_imgs.to(device)
std_images = std_images.to(device)
mean_vols = mean_vols.to(device)
std_vols = std_vols.to(device)
n_images = len(dataset)
# Get volume desired shape
output_shape = argsModel.output_shape + [args.n_depths]
if len(output_shape)==2:
output_shape = argsModel.output_shape + [args.n_depths]
# Creating data indices for training and validation splits:
dataset_size = len(dataset)
# Create dataloader
train_sampler = SequentialSampler(dataset)
# Create output directory
head, tail = os.path.split(args.checkpoint)
output_dir = head + '/Dataset_' + datetime.now().strftime('%Y_%m_%d__%H:%M:%S') + '_' + str(args.n_depths) + 'nD__' + str(args.n_simulations) + 'nS__' + args.prefix
print('Output directory: ' + output_dir)
# Create directories
# XLFM_image: Raw XLFM image
# XLFM_stack: Deconvolution of raw images (not computed here)
# XLFM_stack_S: Deconvolution of sparse image generated by SD algorithm
# XLFM_stack_SL: Deconvolution of sparse image generated by SLNet
if not os.path.exists(output_dir):
os.mkdir(output_dir)
os.mkdir(output_dir + '/XLFM_image')
os.mkdir(output_dir + '/XLFM_stack')
os.mkdir(output_dir + '/XLFM_stack_S')
os.mkdir(output_dir + '/XLFM_stack_S_SL')
# Tensorboard logger
writer = SummaryWriter(output_dir)
writer.add_text('arguments',str(vars(args)),0)
writer.add_text('arguments_model',str(vars(checkpoint['args'])),0)
writer.flush()
# Copy files and model to output folder
try:
copyfile(os.path.dirname(args.checkpoint)+'/files.zip', output_dir+'/files.zip')
copyfile(args.checkpoint, output_dir + '/' + tail)
# Extract the files used for training SLNet, a different type of version control
with zipfile.ZipFile(os.path.dirname(args.checkpoint)+'/files.zip', "r") as zip_ref:
os.makedirs('tmp_files', exist_ok=True)
zip_ref.extractall('tmp_files/')
from tmp_files.nets.SLNet import *
except:
pass
# Create net and load checkpoint
net = SLNet(dataset.n_frames, mu_sum_constraint=argsModel.SL_mu_sum_constraint, alpha_l1=argsModel.SL_alpha_l1).to(device)
net.eval()
if 'module' in list(checkpoint['model_state_dict'])[0]:
net = nn.DataParallel(net)
net.load_state_dict(checkpoint['model_state_dict'])
net = net.module
else:
net.load_state_dict(checkpoint['model_state_dict'])
# timers
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
end_time_deconv_SL = 0
end_time_deconv_net = 0
# Load measured PSF from matlab file and compute OTF
# The computetion can be splited to fit it into the GPU, spliting n_depths/n_split
psf_shape = 2*[argsModel.img_size]
if currArgs.deconv_iterations > 0:
n_split = 20
OTF,psf_shape = load_PSF_OTF(currArgs.psf_file, output_shape, n_depths=args.deconv_n_depths,
n_split=n_split, lenslet_centers_file_out="", compute_transpose=True)
# OTF = OTF[:,OTF.shape[1]//2,...].unsqueeze(1).repeat(1,OTF.shape[1],1,1,1)
# If augmentation desired, check if the deconvolved volumes are already computed
if args.max_z_roll_augm > 0:
precomputed_volume_path = {'raw' : os.path.split(args.volume_raw_reference_path)[0],
'sparse': os.path.split(args.volume_sparse_reference_path)[0]}
precomputed_volume_path_list = {'raw' : sorted(glob.glob(args.volume_raw_reference_path)),
'sparse': sorted(glob.glob(args.volume_sparse_reference_path)) }
# Create array to gather all images, which contains:
# 0: input
# 1: dense SLNet
# 2: sparse SLNet
# 3: dense SL
# 4: sparse SL
tmp = 3
if args.SD_iterations > 0:
tmp = 5
all_images = np.zeros((tmp, args.n_simulations*len(args.temporal_shifts)) + tuple(psf_shape), 'float16')
# Compute images
with torch.no_grad():
mean_time = 0
mean_time_SL = 0
min_time = 10000.0
# Training
for nSimul in range(args.n_simulations):
print('Simulating ' + str(nSimul) + ' / ' + str(args.n_simulations) )
curr_index = nSimul%n_images
# fetch current pair
curr_img_stack, local_volumes = dataset.__getitem__(curr_index)
curr_img_stack = curr_img_stack.unsqueeze(0)
curr_img_stack = curr_img_stack.float()
# curr_img_stack = curr_img_stack / curr_img_stack.max() * 3000.0
curr_img_stack = curr_img_stack.half()
curr_img_stack = curr_img_stack.to(device)
if len(curr_img_stack.shape)>=5:
# assert len(curr_img_stack.shape)>=5, "If sparse is used curr_img_stack should contain both images, dense and sparse stacked in the last dim."
curr_img_sparse = curr_img_stack[...,-1].clone().to(device)
curr_img_stack = curr_img_stack[...,0].clone()
else:
curr_img_sparse = curr_img_stack.clone()
raw_image_stack = curr_img_stack.clone()
# Remove dark current from images
curr_img_stack -= args.dark_current
curr_img_stack = F.relu(curr_img_stack).detach()
curr_img_sparse -= args.dark_current_sparse
curr_img_sparse = F.relu(curr_img_sparse).detach()
if args.add_noise==1:
curr_max = curr_img_stack.max()
# Update new signal power
signal_power = (args.signal_power_min + (args.signal_power_max-args.signal_power_min) * torch.rand(1)).item()
curr_img_stack = signal_power/curr_max * curr_img_stack
# Add noise
curr_img_stack = pytorch_shot_noise.add_camera_noise(curr_img_stack)
curr_img_stack = curr_max/signal_power * curr_img_stack.to(device)
# Normalize images with the same settings as the SLNet was trained
curr_img_stack, local_volumes = normalize_type(curr_img_stack, local_volumes, argsModel.norm_type, mean_imgs, std_images, mean_vols, std_vols, max_images, max_volumes)
with autocast():
# torch.cuda.synchronize()
start.record()
# Run batch of predicted images in discriminator
dense_part = net(curr_img_stack)
# Record training time
end.record()
torch.cuda.synchronize()
end_time = start.elapsed_time(end) / curr_img_stack.shape[0]
mean_time += end_time
min_time = min(min_time, end_time)
sparse_part = F.relu(curr_img_stack - dense_part)
# Renormalize images
dense_part, _ = normalize_type(dense_part.float(), local_volumes, argsModel.norm_type, mean_imgs, std_images, mean_vols, std_vols, max_images, max_volumes, inverse=True)
sparse_part, _ = normalize_type(sparse_part.float(), local_volumes, argsModel.norm_type, mean_imgs, std_images, mean_vols, std_vols, max_images, max_volumes, inverse=True)
curr_img_stack, _ = normalize_type(curr_img_stack.float(), local_volumes, argsModel.norm_type, mean_imgs, std_images, mean_vols, std_vols, max_images, max_volumes, inverse=True)
sparse_part = F.relu(curr_img_stack-dense_part.detach())
# Deconvolve the SLNet sparse image and store the 3D stack
if currArgs.deconv_iterations > 0:
img_shape = sparse_part.shape[-2:]
try:
deconv_vol = dataset.read_tiff_stack(precomputed_volume_path_list['sparse'][curr_index], out_datatype=np.float32).permute(2,0,1).unsqueeze(0)
print('Loaded sparse deconvolution')
except:
start.record()
img_to_deconv_net = sparse_part[:,currArgs.frame_to_grab].unsqueeze(1).float()
deconv_vol,proj_net,forward_net,_ = XLFMDeconv(OTF, img_to_deconv_net, currArgs.deconv_iterations,
device=device_deconv, all_in_device=0,
nSplitFourier=args.deconv_depth_split,max_allowed=args.deconv_limit)
end.record()
torch.cuda.synchronize()
end_time_deconv_net = start.elapsed_time(end) / curr_img_stack.shape[0]
deconv_vol = deconv_vol[:, currArgs.deconv_n_depths//2-currArgs.n_depths//2 : currArgs.deconv_n_depths//2+currArgs.n_depths//2,...]
print(end_time_deconv_net,'s ',str(deconv_vol.max()))
# Augmentation needed? We need 3 volumes for this, then shift in z then compute their forward projections
if args.max_z_roll_augm > 0:
# Create new image storages
augmented_images = {'raw' : sparse_part.clone(), 'sparse' : sparse_part.clone()}
augmented_raw_image_stack = raw_image_stack.clone()
# Temprarly store augmented volumes
augmented_sparse_volumes = deconv_vol.clone().unsqueeze(-1).repeat(1,1,1,1,len(args.temporal_shifts))
# Roll volume
roll_amount = torch.randint(-args.max_z_roll_augm, args.max_z_roll_augm, [1])[0]
for img_id in range(len(args.temporal_shifts)):
vol_id = args.temporal_shifts[img_id] + curr_index
# Select image to deconvolve
curr_image = {'raw':raw_image_stack[:,img_id,...].clone().unsqueeze(1), 'sparse':sparse_part[:,img_id,...].clone().unsqueeze(1)}
# Is it the raw or the sparse image? we need both
for type_volume in ['raw','sparse']:
volume_exists = False
# Check if volumes exist
try:
curr_vol = dataset.read_tiff_stack(precomputed_volume_path_list[type_volume][vol_id], out_datatype=np.float32).permute(2,0,1).unsqueeze(0)
volume_exists = True
print('Loaded augmented ' + type_volume + ' deconvolution')
except: # Deconvolve them
curr_vol,_,_,_ = XLFMDeconv(OTF, curr_image[type_volume].float(), currArgs.deconv_iterations,
device=device_deconv, all_in_device=0,
nSplitFourier=args.deconv_depth_split,max_allowed=args.deconv_limit)
if not volume_exists:
imsave(precomputed_volume_path[type_volume] + '/XLFM_stack_'+ "%03d" % vol_id + '.tif', curr_vol.cpu().numpy())
curr_vol = torch.roll(curr_vol, (roll_amount.item()), 1)
# Compute new forward projection
new_img = XLFM_forward_projection(OTF, curr_vol, [1,1,OTF.shape[2],OTF.shape[2]], nSplitFourier=args.deconv_depth_split)
# Crop center to match original image
new_img = new_img[:,:,new_img.shape[2]//2-img_shape[0]//2 : new_img.shape[2]//2+img_shape[0]//2, \
new_img.shape[3]//2-img_shape[1]//2 : new_img.shape[3]//2+img_shape[1]//2]
# Update sparse image and store it
augmented_images[type_volume][:,img_id,...] = new_img[:,0,...]/new_img.sum() * curr_image[type_volume].cpu().float().sum()
# Store augmented volumes
augmented_sparse_volumes[...,img_id] = curr_vol.clone()
# Overwrite output images
| |
<filename>scripts/base_network.py<gh_stars>1-10
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
# coding: utf-8
"""
Creates the network topology from a `ENTSO-E map extract <https://github.com/PyPSA/GridKit/tree/master/entsoe>`_ (March 2022) as a PyPSA network.
Relevant Settings
-----------------
.. code:: yaml
snapshots:
countries:
electricity:
voltages:
lines:
types:
s_max_pu:
under_construction:
links:
p_max_pu:
under_construction:
include_tyndp:
transformers:
x:
s_nom:
type:
.. seealso::
Documentation of the configuration file ``config.yaml`` at
:ref:`snapshots_cf`, :ref:`toplevel_cf`, :ref:`electricity_cf`, :ref:`load_cf`,
:ref:`lines_cf`, :ref:`links_cf`, :ref:`transformers_cf`
Inputs
------
- ``data/entsoegridkit``: Extract from the geographical vector data of the online `ENTSO-E Interactive Map <https://www.entsoe.eu/data/map/>`_ by the `GridKit <https://github.com/martacki/gridkit>`_ toolkit dating back to March 2022.
- ``data/parameter_corrections.yaml``: Corrections for ``data/entsoegridkit``
- ``data/links_p_nom.csv``: confer :ref:`links`
- ``data/links_tyndp.csv``: List of projects in the `TYNDP 2018 <https://tyndp.entsoe.eu/tyndp2018/>`_ that are at least *in permitting* with fields for start- and endpoint (names and coordinates), length, capacity, construction status, and project reference ID.
- ``resources/country_shapes.geojson``: confer :ref:`shapes`
- ``resources/offshore_shapes.geojson``: confer :ref:`shapes`
- ``resources/europe_shape.geojson``: confer :ref:`shapes`
Outputs
-------
- ``networks/base.nc``
.. image:: ../img/base.png
:scale: 33 %
Description
-----------
"""
import logging
from _helpers import configure_logging
import pypsa
import yaml
import pandas as pd
import geopandas as gpd
import numpy as np
import networkx as nx
from scipy import spatial
from scipy.sparse import csgraph
from itertools import product
from shapely.geometry import Point, LineString
import shapely, shapely.prepared, shapely.wkt
logger = logging.getLogger(__name__)
def _get_oid(df):
if "tags" in df.columns:
return df.tags.str.extract('"oid"=>"(\d+)"', expand=False)
else:
return pd.Series(np.nan, df.index)
def _get_country(df):
if "tags" in df.columns:
return df.tags.str.extract('"country"=>"([A-Z]{2})"', expand=False)
else:
return pd.Series(np.nan, df.index)
def _find_closest_links(links, new_links, distance_upper_bound=1.5):
treecoords = np.asarray([np.asarray(shapely.wkt.loads(s).coords)[[0, -1]].flatten()
for s in links.geometry])
querycoords = np.vstack([new_links[['x1', 'y1', 'x2', 'y2']],
new_links[['x2', 'y2', 'x1', 'y1']]])
tree = spatial.KDTree(treecoords)
dist, ind = tree.query(querycoords, distance_upper_bound=distance_upper_bound)
found_b = ind < len(links)
found_i = np.arange(len(new_links)*2)[found_b] % len(new_links)
return pd.DataFrame(dict(D=dist[found_b],
i=links.index[ind[found_b] % len(links)]),
index=new_links.index[found_i]).sort_values(by='D')\
[lambda ds: ~ds.index.duplicated(keep='first')]\
.sort_index()['i']
def _load_buses_from_eg(eg_buses, europe_shape, config_elec):
buses = (pd.read_csv(eg_buses, quotechar="'",
true_values=['t'], false_values=['f'],
dtype=dict(bus_id="str"))
.set_index("bus_id")
.drop(['station_id'], axis=1)
.rename(columns=dict(voltage='v_nom')))
buses['carrier'] = buses.pop('dc').map({True: 'DC', False: 'AC'})
buses['under_construction'] = buses['under_construction'].fillna(False).astype(bool)
# remove all buses outside of all countries including exclusive economic zones (offshore)
europe_shape = gpd.read_file(europe_shape).loc[0, 'geometry']
europe_shape_prepped = shapely.prepared.prep(europe_shape)
buses_in_europe_b = buses[['x', 'y']].apply(lambda p: europe_shape_prepped.contains(Point(p)), axis=1)
buses_with_v_nom_to_keep_b = buses.v_nom.isin(config_elec['voltages']) | buses.v_nom.isnull()
logger.info("Removing buses with voltages {}".format(pd.Index(buses.v_nom.unique()).dropna().difference(config_elec['voltages'])))
return pd.DataFrame(buses.loc[buses_in_europe_b & buses_with_v_nom_to_keep_b])
def _load_transformers_from_eg(buses, eg_transformers):
transformers = (pd.read_csv(eg_transformers, quotechar="'",
true_values=['t'], false_values=['f'],
dtype=dict(transformer_id='str', bus0='str', bus1='str'))
.set_index('transformer_id'))
transformers = _remove_dangling_branches(transformers, buses)
return transformers
def _load_converters_from_eg(buses, eg_converters):
converters = (pd.read_csv(eg_converters, quotechar="'",
true_values=['t'], false_values=['f'],
dtype=dict(converter_id='str', bus0='str', bus1='str'))
.set_index('converter_id'))
converters = _remove_dangling_branches(converters, buses)
converters['carrier'] = 'B2B'
return converters
def _load_links_from_eg(buses, eg_links):
links = (pd.read_csv(eg_links, quotechar="'", true_values=['t'], false_values=['f'],
dtype=dict(link_id='str', bus0='str', bus1='str', under_construction="bool"))
.set_index('link_id'))
links['length'] /= 1e3
# Skagerrak Link is connected to 132kV bus which is removed in _load_buses_from_eg.
# Connect to neighboring 380kV bus
links.loc[links.bus1=='6396', 'bus1'] = '6398'
links = _remove_dangling_branches(links, buses)
# Add DC line parameters
links['carrier'] = 'DC'
return links
def _add_links_from_tyndp(buses, links, links_tyndp, europe_shape):
links_tyndp = pd.read_csv(links_tyndp)
# remove all links from list which lie outside all of the desired countries
europe_shape = gpd.read_file(europe_shape).loc[0, 'geometry']
europe_shape_prepped = shapely.prepared.prep(europe_shape)
x1y1_in_europe_b = links_tyndp[['x1', 'y1']].apply(lambda p: europe_shape_prepped.contains(Point(p)), axis=1)
x2y2_in_europe_b = links_tyndp[['x2', 'y2']].apply(lambda p: europe_shape_prepped.contains(Point(p)), axis=1)
is_within_covered_countries_b = x1y1_in_europe_b & x2y2_in_europe_b
if not is_within_covered_countries_b.all():
logger.info("TYNDP links outside of the covered area (skipping): " +
", ".join(links_tyndp.loc[~ is_within_covered_countries_b, "Name"]))
links_tyndp = links_tyndp.loc[is_within_covered_countries_b]
if links_tyndp.empty:
return buses, links
has_replaces_b = links_tyndp.replaces.notnull()
oids = dict(Bus=_get_oid(buses), Link=_get_oid(links))
keep_b = dict(Bus=pd.Series(True, index=buses.index),
Link=pd.Series(True, index=links.index))
for reps in links_tyndp.loc[has_replaces_b, 'replaces']:
for comps in reps.split(':'):
oids_to_remove = comps.split('.')
c = oids_to_remove.pop(0)
keep_b[c] &= ~oids[c].isin(oids_to_remove)
buses = buses.loc[keep_b['Bus']]
links = links.loc[keep_b['Link']]
links_tyndp["j"] = _find_closest_links(links, links_tyndp, distance_upper_bound=0.20)
# Corresponds approximately to 20km tolerances
if links_tyndp["j"].notnull().any():
logger.info("TYNDP links already in the dataset (skipping): " + ", ".join(links_tyndp.loc[links_tyndp["j"].notnull(), "Name"]))
links_tyndp = links_tyndp.loc[links_tyndp["j"].isnull()]
if links_tyndp.empty: return buses, links
tree = spatial.KDTree(buses[['x', 'y']])
_, ind0 = tree.query(links_tyndp[["x1", "y1"]])
ind0_b = ind0 < len(buses)
links_tyndp.loc[ind0_b, "bus0"] = buses.index[ind0[ind0_b]]
_, ind1 = tree.query(links_tyndp[["x2", "y2"]])
ind1_b = ind1 < len(buses)
links_tyndp.loc[ind1_b, "bus1"] = buses.index[ind1[ind1_b]]
links_tyndp_located_b = links_tyndp["bus0"].notnull() & links_tyndp["bus1"].notnull()
if not links_tyndp_located_b.all():
logger.warning("Did not find connected buses for TYNDP links (skipping): " + ", ".join(links_tyndp.loc[~links_tyndp_located_b, "Name"]))
links_tyndp = links_tyndp.loc[links_tyndp_located_b]
logger.info("Adding the following TYNDP links: " + ", ".join(links_tyndp["Name"]))
links_tyndp = links_tyndp[["bus0", "bus1"]].assign(
carrier='DC',
p_nom=links_tyndp["Power (MW)"],
length=links_tyndp["Length (given) (km)"].fillna(links_tyndp["Length (distance*1.2) (km)"]),
under_construction=True,
underground=False,
geometry=(links_tyndp[["x1", "y1", "x2", "y2"]]
.apply(lambda s: str(LineString([[s.x1, s.y1], [s.x2, s.y2]])), axis=1)),
tags=('"name"=>"' + links_tyndp["Name"] + '", ' +
'"ref"=>"' + links_tyndp["Ref"] + '", ' +
'"status"=>"' + links_tyndp["status"] + '"')
)
links_tyndp.index = "T" + links_tyndp.index.astype(str)
links = pd.concat([links, links_tyndp], sort=True)
return buses, links
def _load_lines_from_eg(buses, eg_lines):
lines = (pd.read_csv(eg_lines, quotechar="'", true_values=['t'], false_values=['f'],
dtype=dict(line_id='str', bus0='str', bus1='str',
underground="bool", under_construction="bool"))
.set_index('line_id')
.rename(columns=dict(voltage='v_nom', circuits='num_parallel')))
lines['length'] /= 1e3
lines = _remove_dangling_branches(lines, buses)
return lines
def _apply_parameter_corrections(n, parameter_corrections):
with open(parameter_corrections) as f:
corrections = yaml.safe_load(f)
if corrections is None: return
for component, attrs in corrections.items():
df = n.df(component)
oid = _get_oid(df)
if attrs is None: continue
for attr, repls in attrs.items():
for i, r in repls.items():
if i == 'oid':
r = oid.map(repls["oid"]).dropna()
elif i == 'index':
r = pd.Series(repls["index"])
else:
raise NotImplementedError()
inds = r.index.intersection(df.index)
df.loc[inds, attr] = r[inds].astype(df[attr].dtype)
def _set_electrical_parameters_lines(lines, config):
v_noms = config['electricity']['voltages']
linetypes = config['lines']['types']
for v_nom in v_noms:
lines.loc[lines["v_nom"] == v_nom, 'type'] = linetypes[v_nom]
lines['s_max_pu'] = config['lines']['s_max_pu']
return lines
def _set_lines_s_nom_from_linetypes(n):
n.lines['s_nom'] = (
np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) *
n.lines['v_nom'] * n.lines.num_parallel
)
def _set_electrical_parameters_links(links, config, links_p_nom):
if links.empty: return links
p_max_pu = config['links'].get('p_max_pu', 1.)
links['p_max_pu'] = p_max_pu
links['p_min_pu'] = -p_max_pu
links_p_nom = pd.read_csv(links_p_nom)
# filter links that are not in operation anymore
removed_b = links_p_nom.Remarks.str.contains('Shut down|Replaced', na=False)
links_p_nom = links_p_nom[~removed_b]
# find closest link for all links in links_p_nom
links_p_nom['j'] = _find_closest_links(links, links_p_nom)
links_p_nom = links_p_nom.groupby(['j'],as_index=False).agg({'Power (MW)': 'sum'})
p_nom = links_p_nom.dropna(subset=["j"]).set_index("j")["Power (MW)"]
# Don't update p_nom if it's already set
p_nom_unset = p_nom.drop(links.index[links.p_nom.notnull()], errors='ignore') if "p_nom" in links else p_nom
links.loc[p_nom_unset.index, "p_nom"] = p_nom_unset
return links
def _set_electrical_parameters_converters(converters, config):
p_max_pu = config['links'].get('p_max_pu', 1.)
converters['p_max_pu'] = p_max_pu
converters['p_min_pu'] = -p_max_pu
converters['p_nom'] = 2000
# Converters are combined with links
converters['under_construction'] = False
converters['underground'] = False
return converters
def _set_electrical_parameters_transformers(transformers, config):
config = config['transformers']
## Add transformer parameters
transformers["x"] = config.get('x', 0.1)
transformers["s_nom"] = config.get('s_nom', 2000)
transformers['type'] = config.get('type', '')
return transformers
def _remove_dangling_branches(branches, buses):
return pd.DataFrame(branches.loc[branches.bus0.isin(buses.index) & branches.bus1.isin(buses.index)])
def _remove_unconnected_components(network):
_, labels = csgraph.connected_components(network.adjacency_matrix(), directed=False)
component = pd.Series(labels, index=network.buses.index)
component_sizes = component.value_counts()
components_to_remove = component_sizes.iloc[1:]
logger.info("Removing {} unconnected network components with less than {} buses. In total {} buses."
.format(len(components_to_remove), components_to_remove.max(), components_to_remove.sum()))
return network[component == component_sizes.index[0]]
def _set_countries_and_substations(n, config, country_shapes, offshore_shapes):
buses = n.buses
def buses_in_shape(shape):
shape = shapely.prepared.prep(shape)
return pd.Series(
np.fromiter((shape.contains(Point(x, y))
for x, y in buses.loc[:,["x", "y"]].values),
dtype=bool, count=len(buses)),
index=buses.index
)
countries = config['countries']
country_shapes = gpd.read_file(country_shapes).set_index('name')['geometry']
offshore_shapes = gpd.read_file(offshore_shapes).set_index('name')['geometry']
substation_b = buses['symbol'].str.contains('substation|converter station', case=False)
def prefer_voltage(x, which):
index = x.index
if len(index) == 1:
return pd.Series(index, index)
key = (x.index[0]
if x['v_nom'].isnull().all()
else getattr(x['v_nom'], 'idx' + which)())
return pd.Series(key, index)
gb = buses.loc[substation_b].groupby(['x', 'y'], as_index=False,
group_keys=False, sort=False)
bus_map_low = gb.apply(prefer_voltage, 'min')
lv_b = (bus_map_low == bus_map_low.index).reindex(buses.index, fill_value=False)
bus_map_high = gb.apply(prefer_voltage, 'max')
hv_b = (bus_map_high == bus_map_high.index).reindex(buses.index, fill_value=False)
onshore_b = pd.Series(False, buses.index)
offshore_b = pd.Series(False, buses.index)
for country in countries:
onshore_shape = country_shapes[country]
onshore_country_b = buses_in_shape(onshore_shape)
onshore_b |= onshore_country_b
buses.loc[onshore_country_b, 'country'] = country
if country not in offshore_shapes.index: continue
offshore_country_b = buses_in_shape(offshore_shapes[country])
offshore_b |= offshore_country_b
buses.loc[offshore_country_b, 'country'] = country
# Only accept buses as low-voltage substations (where load is attached), if
# they have at least one connection which is not under_construction
has_connections_b = pd.Series(False, index=buses.index)
for b, df in product(('bus0', 'bus1'), (n.lines, n.links)):
has_connections_b |= ~ df.groupby(b).under_construction.min()
buses['substation_lv'] = lv_b & onshore_b & (~ buses['under_construction']) & has_connections_b
buses['substation_off'] = (offshore_b | (hv_b & onshore_b)) & (~ buses['under_construction'])
c_nan_b = buses.country.isnull()
if c_nan_b.sum() > 0:
c_tag = _get_country(buses.loc[c_nan_b])
c_tag.loc[~c_tag.isin(countries)] = np.nan
n.buses.loc[c_nan_b, 'country'] = c_tag
c_tag_nan_b = n.buses.country.isnull()
# Nearest country in path length defines country of still homeless buses
# Work-around until commit 705119 lands in pypsa release
n.transformers['length'] = 0.
graph = n.graph(weight='length')
n.transformers.drop('length', axis=1, inplace=True)
for b in n.buses.index[c_tag_nan_b]:
df = (pd.DataFrame(dict(pathlength=nx.single_source_dijkstra_path_length(graph, b, cutoff=200)))
.join(n.buses.country).dropna())
assert not df.empty, "No buses with defined country within 200km of bus `{}`".format(b)
n.buses.at[b, 'country'] = df.loc[df.pathlength.idxmin(), 'country']
logger.warning("{} buses are not in any country or offshore shape,"
" {} have been assigned from the tag of the entsoe map,"
" the rest from the next bus in terms of pathlength."
.format(c_nan_b.sum(), c_nan_b.sum() - c_tag_nan_b.sum()))
| |
# coding: utf-8
import numpy as np
import tensorflow as tf
from tensorflow.contrib.rnn import RNNCell
from tensorflow.python.ops import rnn_cell_impl
#from tensorflow.contrib.data.python.util import nest
from tensorflow.contrib.framework import nest
from tensorflow.contrib.seq2seq.python.ops.attention_wrapper import _bahdanau_score, _BaseAttentionMechanism, BahdanauAttention, \
AttentionWrapperState, AttentionMechanism, _BaseMonotonicAttentionMechanism,_maybe_mask_score,_prepare_memory,_monotonic_probability_fn
from tensorflow.python.layers.core import Dense
from .modules import prenet
import functools
_zero_state_tensors = rnn_cell_impl._zero_state_tensors
class AttentionWrapper(RNNCell):
"""Wraps another `RNNCell` with attention.
"""
def __init__(self,
cell,
attention_mechanism,
is_manual_attention, # 추가된 argument
manual_alignments, # 추가된 argument
attention_layer_size=None,
alignment_history=False,
cell_input_fn=None,
output_attention=True,
initial_cell_state=None,
name=None):
"""Construct the `AttentionWrapper`.
**NOTE** If you are using the `BeamSearchDecoder` with a cell wrapped in
`AttentionWrapper`, then you must ensure that:
- The encoder output has been tiled to `beam_width` via
@{tf.contrib.seq2seq.tile_batch} (NOT `tf.tile`).
- The `batch_size` argument passed to the `zero_state` method of this
wrapper is equal to `true_batch_size * beam_width`.
- The initial state created with `zero_state` above contains a
`cell_state` value containing properly tiled final state from the
encoder.
An example:
```
tiled_encoder_outputs = tf.contrib.seq2seq.tile_batch(
encoder_outputs, multiplier=beam_width)
tiled_encoder_final_state = tf.conrib.seq2seq.tile_batch(
encoder_final_state, multiplier=beam_width)
tiled_sequence_length = tf.contrib.seq2seq.tile_batch(
sequence_length, multiplier=beam_width)
attention_mechanism = MyFavoriteAttentionMechanism(
num_units=attention_depth,
memory=tiled_inputs,
memory_sequence_length=tiled_sequence_length)
attention_cell = AttentionWrapper(cell, attention_mechanism, ...)
decoder_initial_state = attention_cell.zero_state(
dtype, batch_size=true_batch_size * beam_width)
decoder_initial_state = decoder_initial_state.clone(
cell_state=tiled_encoder_final_state)
```
Args:
cell: An instance of `RNNCell`.
attention_mechanism: A list of `AttentionMechanism` instances or a single
instance.
attention_layer_size: A list of Python integers or a single Python
integer, the depth of the attention (output) layer(s). If None
(default), use the context as attention at each time step. Otherwise,
feed the context and cell output into the attention layer to generate
attention at each time step. If attention_mechanism is a list,
attention_layer_size must be a list of the same length.
alignment_history: Python boolean, whether to store alignment history
from all time steps in the final output state (currently stored as a
time major `TensorArray` on which you must call `stack()`).
cell_input_fn: (optional) A `callable`. The default is:
`lambda inputs, attention: tf.concat([inputs, attention], -1)`.
output_attention: Python bool. If `True` (default), the output at each
time step is the attention value. This is the behavior of Luong-style
attention mechanisms. If `False`, the output at each time step is
the output of `cell`. This is the behavior of Bhadanau-style
attention mechanisms. In both cases, the `attention` tensor is
propagated to the next time step via the state and is used there.
This flag only controls whether the attention mechanism is propagated
up to the next cell in an RNN stack or to the top RNN output.
initial_cell_state: The initial state value to use for the cell when
the user calls `zero_state()`. Note that if this value is provided
now, and the user uses a `batch_size` argument of `zero_state` which
does not match the batch size of `initial_cell_state`, proper
behavior is not guaranteed.
name: Name to use when creating ops.
Raises:
TypeError: `attention_layer_size` is not None and (`attention_mechanism`
is a list but `attention_layer_size` is not; or vice versa).
ValueError: if `attention_layer_size` is not None, `attention_mechanism`
is a list, and its length does not match that of `attention_layer_size`.
"""
super(AttentionWrapper, self).__init__(name=name)
self.is_manual_attention = is_manual_attention
self.manual_alignments = manual_alignments
rnn_cell_impl.assert_like_rnncell("cell", cell)
if isinstance(attention_mechanism, (list, tuple)):
self._is_multi = True
attention_mechanisms = attention_mechanism
for attention_mechanism in attention_mechanisms:
if not isinstance(attention_mechanism, AttentionMechanism):
raise TypeError(
"attention_mechanism must contain only instances of "
"AttentionMechanism, saw type: %s"
% type(attention_mechanism).__name__)
else:
self._is_multi = False
if not isinstance(attention_mechanism, AttentionMechanism):
raise TypeError(
"attention_mechanism must be an AttentionMechanism or list of "
"multiple AttentionMechanism instances, saw type: %s"
% type(attention_mechanism).__name__)
attention_mechanisms = (attention_mechanism,)
if cell_input_fn is None:
cell_input_fn = (
lambda inputs, attention: tf.concat([inputs, attention], -1))
else:
if not callable(cell_input_fn):
raise TypeError(
"cell_input_fn must be callable, saw type: %s"
% type(cell_input_fn).__name__)
if attention_layer_size is not None:
attention_layer_sizes = tuple(
attention_layer_size
if isinstance(attention_layer_size, (list, tuple))
else (attention_layer_size,))
if len(attention_layer_sizes) != len(attention_mechanisms):
raise ValueError(
"If provided, attention_layer_size must contain exactly one "
"integer per attention_mechanism, saw: %d vs %d"
% (len(attention_layer_sizes), len(attention_mechanisms)))
self._attention_layers = tuple(
layers_core.Dense(
attention_layer_size,
name="attention_layer",
use_bias=False,
dtype=attention_mechanisms[i].dtype)
for i, attention_layer_size in enumerate(attention_layer_sizes))
self._attention_layer_size = sum(attention_layer_sizes)
else:
self._attention_layers = None
self._attention_layer_size = sum(
attention_mechanism.values.get_shape()[-1].value
for attention_mechanism in attention_mechanisms)
self._cell = cell
self._attention_mechanisms = attention_mechanisms
self._cell_input_fn = cell_input_fn
self._output_attention = output_attention
self._alignment_history = alignment_history
with tf.name_scope(name, "AttentionWrapperInit"):
if initial_cell_state is None:
self._initial_cell_state = None
else:
final_state_tensor = nest.flatten(initial_cell_state)[-1]
state_batch_size = (
final_state_tensor.shape[0].value
or tf.shape(final_state_tensor)[0])
error_message = (
"When constructing AttentionWrapper %s: " % self._base_name +
"Non-matching batch sizes between the memory "
"(encoder output) and initial_cell_state. Are you using "
"the BeamSearchDecoder? You may need to tile your initial state "
"via the tf.contrib.seq2seq.tile_batch function with argument "
"multiple=beam_width.")
with tf.control_dependencies(
self._batch_size_checks(state_batch_size, error_message)):
self._initial_cell_state = nest.map_structure(
lambda s: tf.identity(s, name="check_initial_cell_state"),
initial_cell_state)
def _batch_size_checks(self, batch_size, error_message):
return [tf.assert_equal(batch_size,
attention_mechanism.batch_size,
message=error_message)
for attention_mechanism in self._attention_mechanisms]
def _item_or_tuple(self, seq):
"""Returns `seq` as tuple or the singular element.
Which is returned is determined by how the AttentionMechanism(s) were passed
to the constructor.
Args:
seq: A non-empty sequence of items or generator.
Returns:
Either the values in the sequence as a tuple if AttentionMechanism(s)
were passed to the constructor as a sequence or the singular element.
"""
t = tuple(seq)
if self._is_multi:
return t
else:
return t[0]
@property
def output_size(self):
if self._output_attention:
return self._attention_layer_size
else:
return self._cell.output_size
@property
def state_size(self):
"""The `state_size` property of `AttentionWrapper`.
Returns:
An `AttentionWrapperState` tuple containing shapes used by this object.
"""
return AttentionWrapperState(
cell_state=self._cell.state_size,
time=tf.TensorShape([]),
attention=self._attention_layer_size,
alignments=self._item_or_tuple(
a.alignments_size for a in self._attention_mechanisms),
attention_state=self._item_or_tuple(
a.state_size for a in self._attention_mechanisms),
alignment_history=self._item_or_tuple(
a.alignments_size if self._alignment_history else ()
for a in self._attention_mechanisms)) # sometimes a TensorArray
def zero_state(self, batch_size, dtype):
"""Return an initial (zero) state tuple for this `AttentionWrapper`.
**NOTE** Please see the initializer documentation for details of how
to call `zero_state` if using an `AttentionWrapper` with a
`BeamSearchDecoder`.
Args:
batch_size: `0D` integer tensor: the batch size.
dtype: The internal state data type.
Returns:
An `AttentionWrapperState` tuple containing zeroed out tensors and,
possibly, empty `TensorArray` objects.
Raises:
ValueError: (or, possibly at runtime, InvalidArgument), if
`batch_size` does not match the output size of the encoder passed
to the wrapper object at initialization time.
"""
with tf.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
if self._initial_cell_state is not None:
cell_state = self._initial_cell_state
else:
cell_state = self._cell.zero_state(batch_size, dtype)
error_message = (
"When calling zero_state of AttentionWrapper %s: " % self._base_name +
"Non-matching batch sizes between the memory "
"(encoder output) and the requested batch size. Are you using "
"the BeamSearchDecoder? If so, make sure your encoder output has "
"been tiled to beam_width via tf.contrib.seq2seq.tile_batch, and "
"the batch_size= argument passed to zero_state is "
"batch_size * beam_width.")
with tf.control_dependencies(
self._batch_size_checks(batch_size, error_message)):
cell_state = nest.map_structure(
lambda s: tf.identity(s, name="checked_cell_state"),
cell_state)
initial_alignments = [
attention_mechanism.initial_alignments(batch_size, dtype)
for attention_mechanism in self._attention_mechanisms]
return AttentionWrapperState(
cell_state=cell_state,
time=tf.zeros([], dtype=tf.int32),
attention=_zero_state_tensors(self._attention_layer_size, batch_size,
dtype),
alignments=self._item_or_tuple(initial_alignments),
attention_state=self._item_or_tuple(
attention_mechanism.initial_state(batch_size, dtype)
for attention_mechanism in self._attention_mechanisms),
alignment_history=self._item_or_tuple(
tf.TensorArray(
dtype,
size=0,
dynamic_size=True,
element_shape=alignment.shape)
if self._alignment_history else ()
for alignment in initial_alignments))
def call(self, inputs, state):
"""Perform a step of attention-wrapped RNN.
- Step 1: Mix the `inputs` and previous step's `attention` output via
`cell_input_fn`.
- Step 2: Call the wrapped `cell` with this input and its previous state.
- Step 3: Score the cell's output with `attention_mechanism`.
- Step 4: Calculate the alignments by passing the score through the
`normalizer`.
- Step 5: Calculate the context vector as the inner product between the
alignments and the attention_mechanism's values (memory).
- Step 6: Calculate the attention output by concatenating the cell output
and context through the attention layer (a linear layer with
`attention_layer_size` outputs).
Args:
inputs: (Possibly nested tuple of) Tensor, the input at this time step.
state: An instance of `AttentionWrapperState` containing
tensors from the previous time step.
Returns:
A tuple `(attention_or_cell_output, next_state)`, where:
- `attention_or_cell_output` depending on `output_attention`.
- `next_state` is an instance of `AttentionWrapperState`
containing the state calculated at this time step.
Raises:
TypeError: If `state` is not an instance of `AttentionWrapperState`.
"""
if not isinstance(state, AttentionWrapperState):
raise TypeError("Expected state to be instance of AttentionWrapperState. "
"Received type %s instead." | |
<reponame>brianhuey/vissim<filename>vissim_v5/vissim_v5.py<gh_stars>10-100
#!/usr/bin/env python
""" VISSIM Tools v2.0 """
from re import findall as _findall, match as _match, search as _search
from re import split as _split
from math import sqrt as _sqrt
from copy import copy as _copy
def _median(lst):
""" Stock median function from: http://stackoverflow.
com/questions/24101524/finding-median-of-list-in-python
"""
lst = sorted(lst)
if len(lst) < 1:
return None
if len(lst) % 2 == 1:
return lst[((len(lst) + 1) / 2) - 1]
else:
return float(sum(lst[(len(lst) / 2) - 1:(len(lst) / 2) + 1])) / 2.0
def _flatten(coll):
""" Flatten list and convert elements to int
"""
if isinstance(coll, list):
return [int(a) for i in coll for a in _flatten(i)]
else:
return [coll]
def _importSection(obj, filename):
""" Imports section's syntax into dictionary.
"""
name = obj.name
try:
with open(filename, 'r') as data:
section = None
for row in data:
found = _findall(r'(?<=\- )(.*?)(?=\: -)', row)
if row in ['\n', '\r\n']:
continue
elif found:
section = found[0]
elif section == name and row[0] is not '-':
obj._readSection(row)
else:
continue
except IOError:
print 'cannot open', filename
def _updateFile(orgFile, newFile, name, printData):
""" Update a section of a VISSIM .INP file.
Inputs: filename, object type(Inputs, Routing Decision, etc.) and list
of strings with VISSIM syntax.
Outputs: new file
"""
search = '-- ' + name + ': --'
f = open(orgFile, 'r')
lines = f.readlines()
f.close()
f = open(newFile, 'w')
startDelete = False
print 'Writing %s to %s ...' % (name, newFile)
for line in lines:
if startDelete is True and line[:3] == '-- ':
startDelete = False
f.write('\n\n\n')
if line[:len(search)] == search:
startDelete = True
for newLine in printData:
f.write(str(newLine) + '\n')
if startDelete is False:
f.write(line)
f.close()
def _exportSection(obj, filename):
""" Prepare for export all dictionaries within a given object
Input: Dict object
Output: List of all items for a given class in VISSIM syntax
"""
first_line = '-- ' + obj.name + ': --\n'
second_line = '-' * (len(first_line) - 1) + '\n'
print_data = [first_line + second_line]
print 'Reading %s ...' % obj.name
for value in obj.data.values():
print_data += obj._output(value)
print 'Updating %s ...' % obj.name
_updateFile(obj.filename, filename, obj.name, print_data)
def _checkKeyData(obj, key, label):
""" Checks whether key and label exist
"""
if key is not None:
if key not in obj.data:
raise KeyError('%s not a valid key for object %s' %
(key, obj.name))
if label not in obj.types:
raise KeyError('%s not a valid label for object %s' %
(label, obj.name))
return True
def _getData(obj, key, label):
""" Returns requested value from vissim object
"""
_checkKeyData(obj, key, label)
if key is None:
new = obj.data[label]
else:
new = obj.data[key][label]
return _copy(new)
def _setData(obj, key, label, value):
""" Sets given value in vissim object
"""
_checkKeyData(obj, key, label)
if not isinstance(value, obj.types[label]):
value = obj.types[label](value)
if key is None:
obj.data[label] = value
else:
obj.data[key][label] = value
def _updateData(obj, key, label, value, pos=None, newKey=None):
""" Updates vissim object
Input: object, key, label and value
Output: adds value to object.
"""
_checkKeyData(obj, key, label)
if key is None:
if (isinstance(obj.data[label], list) is True) and (pos is None):
obj.data[label].append(value)
obj.data[label] = list(_flatten(obj.data[label]))
elif isinstance(obj.data[label], list) and pos:
obj.data[label].insert(pos, value)
obj.data[label] = list(_flatten(obj.data[label]))
else:
if isinstance(obj.data[key][label], dict):
obj.data[key][label][newKey] = value
if (isinstance(obj.data[key][label], list) is True) and (pos is None):
obj.data[label].append(value)
obj.data[label] = list(_flatten(obj.data[label]))
elif isinstance(obj.data[key][label], list) and pos:
obj.data[label].insert(pos, value)
obj.data[label] = list(_flatten(obj.data[label]))
def _convertType(iterable, newType):
""" Converts element type within iterable.
"""
iterType = type(iterable)
return iterType([newType(i) for i in iterable])
class Inputs:
""" Handles Inputs section of .INP file.
"""
def __init__(self, filename):
self.filename = filename
self.name = 'Inputs'
self.data = {}
self.currData = None
self.types = {'composition': int, 'exact': bool, 'from': float,
'input': int, 'label': tuple, 'link': int, 'name': str,
'q': float, 'until': float}
_importSection(self, filename)
def get(self, inputNum, label, string=True):
""" Get value from Input.
Input: Input number, Value label
Output: Value
"""
if string:
return str(_getData(self, inputNum, label))
else:
return _getData(self, inputNum, label)
def set(self, inputNum, label, value):
""" Set value from Input.
Input: Input number, Value label, value
Output: Change is made in place
"""
_setData(self, inputNum, label, value)
def getInputNumByLink(self, linkNum):
""" Get value from Input by link number
Input: Link number
Output: List of input numbers
"""
result = [k for k, v in self.data.items() if v['link'] == linkNum]
if len(result) == 0:
raise KeyError('%s not in data' % (linkNum))
else:
return result
def create(self, linkNum, demand, comp, **kwargs):
""" Create new Input
Input: link number, demand, vehicle composition
Output: Creates Input object
"""
if self.data.keys():
num = max(self.data.keys()) + 1
else:
num = 1
inputNum = kwargs.get('input', num)
self.data[inputNum] = {}
self.set(inputNum, 'input', inputNum)
self.set(inputNum, 'q', demand)
self.set(inputNum, 'link', linkNum)
self.set(inputNum, 'composition', comp)
# Default values
self.set(inputNum, 'name', kwargs.get('name', '""'))
self.set(inputNum, 'label', kwargs.get('label', ('0.00', '0.00')))
self.set(inputNum, 'from', kwargs.get('from', '0.0'))
self.set(inputNum, 'until', kwargs.get('until', '3600.0'))
self.set(inputNum, 'exact', kwargs.get('exact', False))
def _readSection(self, line):
""" Process the Input section of the INP file.
"""
if _match('^INPUT\s+\d+', line):
inputNum = self.types['input'](_findall('INPUT\s+(\d+)', line)[0])
self.currData = {'input': inputNum}
elif _match('^\s+NAME', line):
self.currData['name'] = _findall('NAME\s+(".+"|"")', line)[0]
self.currData['label'] = _findall('LABEL\s+(-?\d+.\d+)\s(-?\d+.\d+)', line)[0]
elif _match('^\s+LINK', line):
self.currData['link'] = _findall('LINK\s+(\d+)', line)[0]
if _search('EXACT', line):
self.currData['exact'] = True
self.currData['q'] = _findall('Q EXACT (.+) COMPOSITION',
line)[0]
else:
self.currData['exact'] = False
self.currData['q'] = _findall('Q (.+) COMPOSITION', line)[0]
self.currData['composition'] = _findall('COMPOSITION (\d)',
line)[0]
elif _match('^\s+TIME', line):
self.currData['from'] = _findall('FROM (\d+.\d+)', line)[0]
self.currData['until'] = _findall('UNTIL (\d+.\d+)', line)[0]
# Last line, create Input object
self.create(self.currData['link'], self.currData['q'],
self.currData['composition'], **self.currData)
else:
print 'Non-Input data provided: %s' % line
def _output(self, inputs):
""" Outputs Inputs syntax to VISSIM.
Input: A single Inputs dictionary
Output: Inputs back into VISSIM syntax
"""
vissimOut = []
inputNum = inputs['input']
def _out(label, s=True):
return self.get(inputNum, label, string=s)
vissimOut.append('INPUT ' + _out('input').rjust(6))
vissimOut.append('NAME '.rjust(10) + _out('name') + ' LABEL ' +
_out('label', s=False)[0] + ' ' +
_out('label', s=False)[1])
if _out('exact', s=False) is True:
vissimOut.append('LINK '.rjust(10) + _out('link') + ' Q EXACT ' +
_out('q') + ' COMPOSITION ' +
_out('composition'))
else:
vissimOut.append('LINK '.rjust(10) + _out('link') + ' Q ' +
_out('q') + ' COMPOSITION ' + _out('composition'))
vissimOut.append('TIME FROM '.rjust(15) + _out('from') + ' UNTIL ' +
_out('until'))
return vissimOut
def export(self, filename):
""" Prepare for export all inputs in a given inputs object
Input: Inputs object
Output: List of all inputs in VISSIM syntax
"""
_exportSection(self, filename)
class Links:
""" Handles Links section of .INP file.
"""
def __init__(self, filename):
self.filename = filename
self.name = 'Links'
self.data = {}
self.over = None
self.currData = None
self.closed = None
self.types = {'link': int, 'name': str, 'label': tuple,
'behaviortype': int, 'displaytype': int, 'length': float,
'lanes': int, 'lane_width': list, 'gradient': float,
'cost': float, 'surcharges': list,
'segment_length': float, 'evaluation': bool,
'from': list, 'over': list, 'to': list, 'closed': dict}
_importSection(self, filename)
def get(self, linkNum, label, string=True):
""" Get value from Link.
Input: Link number, Value label
Output: Value
"""
if string:
return str(_getData(self, linkNum, label))
else:
return _getData(self, linkNum, label)
def set(self, linkNum, label, value):
""" Set value from Link.
Input: Link number, Value label, value
Output: Change is made in place
"""
_setData(self, linkNum, label, value)
def create(self, coordFrom, coordTo, **kwargs):
if self.data.keys():
num = max(self.data.keys()) + 1
else:
num = 1
linkNum = kwargs.get('link', num)
self.data[linkNum] = {}
self.set(linkNum, 'link', linkNum)
self.set(linkNum, 'from', coordFrom)
self.set(linkNum, 'to', coordTo)
# Default values
self.set(linkNum, 'name', kwargs.get('name', '""'))
self.set(linkNum, 'behaviortype', kwargs.get('behaviortype', 1))
self.set(linkNum, 'cost', kwargs.get('cost', 0.00000))
self.set(linkNum, 'displaytype', kwargs.get('displaytype', 1))
self.set(linkNum, 'gradient', kwargs.get('gradient', 0.00000))
self.set(linkNum, 'label', kwargs.get('label', ('0.00', '0.00')))
self.set(linkNum, 'lane_width', kwargs.get('lane_width', [3.66]))
self.set(linkNum, 'lanes', kwargs.get('lanes', 1))
self.set(linkNum, 'segment_length', kwargs.get('segment_length',
10.000))
self.set(linkNum, 'surcharges', kwargs.get('surcharges',
[0.00000, 0.00000]))
self.set(linkNum, 'evaluation', kwargs.get('evaluation', False))
self.set(linkNum, 'over', kwargs.get('over', []))
self.set(linkNum, 'length', kwargs.get('length', 0))
if self.get(linkNum, 'over') and self.get(linkNum, 'length') == 0:
x1, y1 = self.get(linkNum, 'from')
z1 = 0
length = 0
for coord in self.get(linkNum, 'over'):
x2, y2, z2 = _convertType(coord, float)
dx, dy, dz = x2 - x1, y2 - y1, z2 - z1
length += _sqrt(dx**2 + dy**2 + dz**2)
x1, y1, z1 = x2, y2, z2
self.set(linkNum, | |
reaction dataset.
processed_dataset = pd.read_pickle(args.dataset_config.output_folder + "final_training_dataset.pkl")
folds = [[] for _ in range(args.dataset_config.num_folds)]
for cls in np.unique(processed_dataset["reaction_class"].values):
# Select the subset of data with the respective class label.
class_subset = processed_dataset.loc[processed_dataset["reaction_class"] == cls]
# Shuffle this subset with a specified seed value.
class_subset = class_subset.sample(frac=1, random_state=args.dataset_config.random_seed)
# Split the subset into multiple folds and save the indices of the rows.
for fold_index, current_fold in enumerate(np.array_split(class_subset.index.values,
args.dataset_config.num_folds)):
folds[fold_index].extend(current_fold.tolist())
# Generate training and validation data and save all of the datasets.
for fold_index, test_indices in enumerate(folds):
print("Generating data for fold {}...".format(fold_index + 1), end="", flush=True)
# If a fold directory does nto exist for a specific fold, create it.
directory_path = args.dataset_config.output_folder + "fold_{}/".format(fold_index + 1)
if not os.path.exists(directory_path):
os.makedirs(directory_path)
# Split the remaining indices into training and validation sets.
training_indices = set(processed_dataset.index.values).difference(test_indices)
validation_indices = random.sample(training_indices,
k=round(len(processed_dataset) * args.dataset_config.validation_split))
training_indices = list(training_indices.difference(validation_indices))
# Save all of the datasets for each respective fold.
processed_dataset.iloc[training_indices, :].sort_values("reaction_class"). \
to_pickle(directory_path + "training_data.pkl".format(fold_index + 1))
processed_dataset.iloc[validation_indices, :].sort_values("reaction_class"). \
to_pickle(directory_path + "validation_data.pkl".format(fold_index + 1))
processed_dataset.iloc[test_indices, :].sort_values("reaction_class"). \
to_pickle(directory_path + "test_data.pkl".format(fold_index + 1))
print("done.")
def generate_fps_from_reaction_products(reaction_smiles, fp_data_configs):
""" Generates specified fingerprints for the both reactive and non-reactive substructures of the reactant and
product molecules that are the participating in the chemical reaction. """
# Generate the RDKit Mol representations of the product molecules and generate the reaction cores.
reactants, _, products = parse_reaction_roles(reaction_smiles, as_what="mol_no_maps")
reaction_cores = get_reaction_core_atoms(reaction_smiles)
# Separate the reaction cores if they consist out of multiple non-neighbouring parts.
separated_cores = get_separated_cores(reaction_smiles, reaction_cores)
# Define variables which will be used for storing the results.
total_reactive_fps, total_non_reactive_fps = [], []
# Iterate through the product molecules and generate fingerprints for all reactive and non-reactive substructures.
for p_ind, product in enumerate(products):
# Iterate through all of the dataset configurations.
for fp_config in fp_data_configs:
reactive_fps, non_reactive_fps = [], []
# Generate fingerprints from the reactive substructures i.e. the reaction core(s).
for core in separated_cores[1][p_ind]:
# Generate reactive EC fingerprints and add them to the list.
if fp_config["type"] == "ecfp":
reactive_fps.append(construct_ecfp(product, radius=fp_config["radius"], bits=fp_config["bits"],
from_atoms=core, output_type="np_array", as_type="np_float"))
# Generate reactive HS fingerprints and add them to the list.
else:
reactive_fps.append(construct_hsfp(product, radius=fp_config["radius"], bits=fp_config["bits"],
from_atoms=core, neighbourhood_ext=fp_config["ext"]))
# Generate the extended environment of the reaction core.
extended_core_env = get_atom_environment(reaction_cores[1][p_ind], product, degree=1)
# Generate fingerprints from the non-reactive substructures i.e. non-reaction core substructures.
for bond in product.GetBonds():
# Generate the extended environment of the focus bond.
extended_bond_env = get_bond_environment(bond, product, degree=1)
# If the extended environment of the non-reactive substructure does not overlap with the extended
# reaction core, generate a non-reactive fingerprint representation.
if not extended_bond_env.intersection(extended_core_env):
# Generate non-reactive EC fingerprints and add them to the list.
if fp_config["type"] == "ecfp":
non_reactive_fps.append(
construct_ecfp(product, radius=fp_config["radius"], bits=fp_config["bits"],
from_atoms=[bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()],
output_type="np_array", as_type="np_float"))
# Generate non-reactive HS fingerprints and add them to the list.
else:
non_reactive_fps.append(
construct_hsfp(product, radius=fp_config["radius"], bits=fp_config["bits"],
from_atoms=[bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()],
neighbourhood_ext=fp_config["ext"]))
# Append the generated fingerprints to the final list.
total_reactive_fps.append(reactive_fps)
total_non_reactive_fps.append(non_reactive_fps)
# Return all of the generated fingerprints and labels.
return total_reactive_fps, total_non_reactive_fps
def save_fingerprints_to_file(output_folder_path, fp_parameters, file_name, file_role, file_ext, fp_content, mode="w"):
""" Generates a descriptive name and saves the file to the specified output location. """
# Generate the name for the file.
file_name = "{}_{}_{}_{}_{}.{}".format(fp_parameters["type"], fp_parameters["radius"], fp_parameters["bits"],
file_name[0:-4], file_role, file_ext) \
if fp_parameters["type"] == "ecfp" else "{}_{}_{}_{}_{}_{}.{}".format(fp_parameters["type"],
fp_parameters["radius"],
fp_parameters["ext"],
fp_parameters["bits"], file_name[0:-4],
file_role, file_ext)
# Save the new file in the specified format.
if mode == "w":
if file_ext == "pkl":
pd.DataFrame(fp_content).to_pickle(output_folder_path + "/" + file_name)
elif file_ext == "csv":
pd.DataFrame(fp_content).to_csv(output_folder_path + "/" + file_name)
else:
raise Exception("Extension needs to be either 'pkl' or 'csv'.")
# Append the content to an already existing dataset.
else:
old_dataset = pd.read_pickle(output_folder_path + "/" + file_name) \
if file_ext == "pkl" else pd.read_csv(output_folder_path + "/" + file_name)
old_dataset = old_dataset.append(pd.DataFrame(fp_content))
old_dataset.to_pickle(output_folder_path + "/" + file_name)
def generate_fingerprint_datasets(args):
""" Generates fingerprint representations for all of the previously constructed data splits. """
# Iterate through all of the generated 'n-fold' folders.
for directory_name in os.listdir(args.dataset_config.output_folder):
if "fold" in directory_name:
# Create folder for the type of fingerprints dataset which is specified in the input parameters.
fold_dir_path = args.dataset_config.output_folder + directory_name + "/"
# Create folders for all of the fingerprint configurations.
for fp_config in args.descriptor_config.model_training:
if not os.path.exists(fold_dir_path + fp_config["folder_name"]):
os.makedirs(fold_dir_path + fp_config["folder_name"])
# Read all of the dataset splits for the current fold.
for file_name in os.listdir(fold_dir_path):
if file_name.endswith(".pkl"):
current_dataset = pd.read_pickle(fold_dir_path + file_name)
reactive_fps = [[] for _ in range(0, len(args.descriptor_config.model_training))]
non_reactive_fps = [[] for _ in range(0, len(args.descriptor_config.model_training))]
mc_lab = []
# Iterate through all of the rows of each dataset.
for row_ind, row in tqdm(current_dataset.iterrows(), total=len(current_dataset.index), ascii=True,
desc="Generating data for '{}' - '{}'".format(directory_name, file_name)):
# Fetch the reactive and non-reactive substructures from the products of this reaction.
r_fps, nr_fps = generate_fps_from_reaction_products(row["reaction_smiles"],
args.descriptor_config.model_training)
# Generate multi-class labels because they are the same for every fingerprint.
mc_lab.extend(np.array([encode_one_hot(row["reaction_class"],
args.dataset_config.final_classes), ] * len(r_fps[0])))
# Iterate through all of the specified configurations.
for fpc_ind, fp_config in enumerate(args.descriptor_config.model_training):
# Append the reactive data and an equal amount of multi-class labels for the configuration.
reactive_fps[fpc_ind].extend(r_fps[fpc_ind])
# Append the non-reactive data for the configuration.
non_reactive_fps[fpc_ind].extend(nr_fps[fpc_ind])
# Save the reactive data and the labels, as well as the rest of the non-reactive data.
for fpc_ind, fp_config in enumerate(args.descriptor_config.model_training):
# Save the reactive data.
save_fingerprints_to_file(fold_dir_path + fp_config["folder_name"], fp_config, file_name, "r",
"pkl", reactive_fps[fpc_ind])
# Save the non-reactive data.
save_fingerprints_to_file(fold_dir_path + fp_config["folder_name"], fp_config, file_name, "nr",
"pkl", non_reactive_fps[fpc_ind])
# Save the binary and multi-class labels for the reactive parts of the data.
save_fingerprints_to_file(fold_dir_path + fp_config["folder_name"], fp_config, file_name, "bc",
"pkl", np.full((len(reactive_fps[fpc_ind]), 1), 1, np.float))
save_fingerprints_to_file(fold_dir_path + fp_config["folder_name"], fp_config, file_name, "mc",
"pkl", mc_lab)
def create_model_training_datasets(args):
""" Aggregates the reactive and non-reactive parts to create the final input dataset for the network. """
# Iterate through all of the generated 'n-fold' folders.
for fold_dir in os.listdir(args.dataset_config.output_folder):
if "fold" in fold_dir:
fold_dir_path = args.dataset_config.output_folder + fold_dir + "/"
# Iterate through all of the generated dataset variant folders in the current fold.
for data_dir in os.listdir(fold_dir_path):
if not data_dir.endswith(".pkl"):
data_dir_path = fold_dir_path + data_dir + "/"
print("Reading datasets from the '{}' folder.".format("/" + fold_dir + "/" + data_dir + "/"))
# Finally, iterate through all of the files in the current dataset variant folder and read the
# reactive and non-reactive parts.
for dataset_split in ["training", "validation", "test"]:
r_fp, nr_fp, r_bc, r_mc = None, None, None, None
for file_name in os.listdir(data_dir_path):
if dataset_split in file_name and "data_r" in file_name:
r_fp = pd.read_pickle(data_dir_path + file_name).values
if dataset_split in file_name and "data_nr" in file_name:
nr_fp = pd.read_pickle(data_dir_path + file_name).values
if dataset_split in file_name and "data_bc" in file_name:
r_bc = pd.read_pickle(data_dir_path + file_name).values
if dataset_split in file_name and "data_mc" in file_name:
r_mc = pd.read_pickle(data_dir_path + file_name).values
# Filter the negative samples to the amount of the highest populated positive class.
print("Filtering negative samples for the {} set...".format(dataset_split), end="", flush=True)
nr_samples = sorted(Counter([np.argmax(r) for r in r_mc]).values(), reverse=True)[0]
nr_fp = nr_fp[get_n_most_frequent_rows(nr_fp, nr_samples)]
# Generate the labels for the saved non-reactive fingerprints.
nr_bc = np.full((len(nr_fp), 1), 0, np.float)
nr_mc = np.full((len(nr_fp), 11), 0, np.float)
nr_mc[:, 0] = 1.
print("done.")
# Aggregate the reactive and non-reactive fingerprints.
print("Aggregating and saving {} set data...".format(dataset_split), end="", flush=True)
x_fp = np.vstack((r_fp, nr_fp))
pd.to_pickle(pd.DataFrame(x_fp), data_dir_path + "x_{}.pkl".format(dataset_split))
print("done. Shape: {}".format(str(x_fp.shape)))
# Aggregate the reactive and non-reactive labels.
print("Aggregating and saving {} set labels...".format(dataset_split), end="", flush=True)
y_bc = np.vstack((r_bc, nr_bc))
pd.to_pickle(pd.DataFrame(y_bc), data_dir_path + "y_bc_{}.pkl".format(dataset_split))
y_mc = np.vstack((r_mc, nr_mc))
pd.to_pickle(pd.DataFrame(y_mc), data_dir_path + "y_mc_{}.pkl".format(dataset_split))
print("done. Shapes: {} and {}.".format(str(y_mc.shape), str(y_bc.shape)))
def create_final_evaluation_dataset(args):
""" Creates a version of the test dataset where the non-reactive substructures are not filtered out and the
compounds are treated like real unknown input compounds without mapping or known reaction class. """
# Read the test dataset from the specified fold.
test_dataset = pd.read_pickle(args.dataset_config.output_folder +
"fold_{}/test_data.pkl".format(args.evaluation_config.best_fold))
final_data_tuples = []
# Iterate through the test dataset and generate the necessary data.
for row_ind, row in tqdm(test_dataset.iterrows(), total=len(test_dataset.index), ascii=True,
desc="Generating the non-filtered version | |
71);\n"
"border-radius: 8px;")
self.generate3.setIconSize(QtCore.QSize(30, 30))
self.generate3.setObjectName("generate3")
self.insert3 = QtWidgets.QPushButton(self.frame_4)
self.insert3.setGeometry(QtCore.QRect(20, 150, 161, 41))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(12)
self.insert3.setFont(font)
self.insert3.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.insert3.setStyleSheet("background-color: rgb(226, 250, 254);\n"
"color: rgb(0, 0, 71);\n"
"border-radius: 8px;")
self.insert3.setIconSize(QtCore.QSize(30, 30))
self.insert3.setObjectName("insert3")
self.manage3 = QtWidgets.QPushButton(self.frame_4)
self.manage3.setGeometry(QtCore.QRect(20, 210, 161, 41))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(12)
self.manage3.setFont(font)
self.manage3.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.manage3.setStyleSheet("background-color: #4866c4;\n"
"color: rgb(226, 250, 254);\n"
"border-radius: 8px;")
self.manage3.setIconSize(QtCore.QSize(30, 30))
self.manage3.setObjectName("manage3")
self.setting3 = QtWidgets.QPushButton(self.frame_4)
self.setting3.setGeometry(QtCore.QRect(20, 270, 161, 41))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(12)
self.setting3.setFont(font)
self.setting3.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.setting3.setStyleSheet("background-color: rgb(226, 250, 254);\n"
"color: rgb(0, 0, 71);\n"
"border-radius: 8px;")
self.setting3.setIconSize(QtCore.QSize(30, 30))
self.setting3.setObjectName("setting3")
self.about3 = QtWidgets.QPushButton(self.frame_4)
self.about3.setGeometry(QtCore.QRect(20, 330, 161, 41))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(12)
self.about3.setFont(font)
self.about3.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.about3.setStyleSheet("background-color: rgb(226, 250, 254);\n"
"color: rgb(0, 0, 71);\n"
"border-radius: 8px;")
self.about3.setIconSize(QtCore.QSize(30, 30))
self.about3.setObjectName("about3")
self.logout3 = QtWidgets.QPushButton(self.frame_4)
self.logout3.setGeometry(QtCore.QRect(20, 390, 161, 41))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(12)
self.logout3.setFont(font)
self.logout3.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.logout3.setStyleSheet("background-color: rgb(226, 250, 254);\n"
"color: rgb(0, 0, 71);\n"
"border-radius: 8px;")
self.logout3.setIconSize(QtCore.QSize(30, 30))
self.logout3.setObjectName("logout3")
self.mode3 = QtWidgets.QPushButton(self.frame_4)
self.mode3.setGeometry(QtCore.QRect(20, 450, 161, 41))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(12)
self.mode3.setFont(font)
self.mode3.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.mode3.setStyleSheet("background-color: rgb(226, 250, 254);\n"
"color: rgb(0, 0, 71);\n"
"border-radius: 8px;")
self.mode3.setText("")
self.mode3.setIconSize(QtCore.QSize(30, 30))
self.mode3.setObjectName("mode3")
self.mode_dr3 = QtWidgets.QPushButton(self.frame_4)
self.mode_dr3.setGeometry(QtCore.QRect(20, 450, 51, 41))
self.mode_dr3.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.mode_dr3.setStyleSheet("background-color: none;")
self.mode_dr3.setText("")
self.mode_dr3.setIcon(icon)
self.mode_dr3.setIconSize(QtCore.QSize(27, 27))
self.mode_dr3.setObjectName("mode_dr3")
self.mode_light11_3 = QtWidgets.QPushButton(self.frame_4)
self.mode_light11_3.setGeometry(QtCore.QRect(70, 450, 111, 41))
self.mode_light11_3.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.mode_light11_3.setStyleSheet("background-color: #4866c4;\n"
"border-radius: 8px;\n"
"border-top-left-radius: 0px;\n"
"border-bottom-left-radius: 0px;")
self.mode_light11_3.setText("")
self.mode_light11_3.setIcon(icon1)
self.mode_light11_3.setIconSize(QtCore.QSize(31, 32))
self.mode_light11_3.setObjectName("mode_light11_3")
self.label_3 = QtWidgets.QLabel(self.three)
self.label_3.setGeometry(QtCore.QRect(230, 30, 451, 41))
font = QtGui.QFont()
font.setFamily("Barlow Condensed ExtraBold")
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setStyleSheet("background-color: none;\n"
"color: #4866c4;")
self.label_3.setObjectName("label_3")
self.label_20 = QtWidgets.QLabel(self.three)
self.label_20.setGeometry(QtCore.QRect(230, 100, 141, 31))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(11)
self.label_20.setFont(font)
self.label_20.setStyleSheet("background-color: rgba(224, 224, 224, 60);")
self.label_20.setAlignment(QtCore.Qt.AlignCenter)
self.label_20.setObjectName("label_20")
self.label_21 = QtWidgets.QLabel(self.three)
self.label_21.setGeometry(QtCore.QRect(390, 100, 141, 31))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(11)
self.label_21.setFont(font)
self.label_21.setStyleSheet("background-color: rgba(224, 224, 224, 60);")
self.label_21.setAlignment(QtCore.Qt.AlignCenter)
self.label_21.setObjectName("label_21")
self.label_22 = QtWidgets.QLabel(self.three)
self.label_22.setGeometry(QtCore.QRect(540, 100, 211, 31))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(11)
self.label_22.setFont(font)
self.label_22.setStyleSheet("background-color: rgba(224, 224, 224, 60);")
self.label_22.setAlignment(QtCore.Qt.AlignCenter)
self.label_22.setObjectName("label_22")
self.a0 = QtWidgets.QLabel(self.three)
self.a0.setGeometry(QtCore.QRect(390, 150, 141, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.a0.setFont(font)
self.a0.setStyleSheet("background-color: rgba(224, 224, 224, 60);")
self.a0.setText("")
self.a0.setAlignment(QtCore.Qt.AlignCenter)
self.a0.setObjectName("a0")
self.n0 = QtWidgets.QLabel(self.three)
self.n0.setGeometry(QtCore.QRect(230, 150, 141, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.n0.setFont(font)
self.n0.setStyleSheet("background-color: rgba(224, 224, 224, 60);")
self.n0.setText("")
self.n0.setAlignment(QtCore.Qt.AlignCenter)
self.n0.setObjectName("n0")
self.no0 = QtWidgets.QLabel(self.three)
self.no0.setGeometry(QtCore.QRect(540, 150, 211, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.no0.setFont(font)
self.no0.setStyleSheet("background-color: rgba(224, 224, 224, 60);")
self.no0.setText("")
self.no0.setAlignment(QtCore.Qt.AlignCenter)
self.no0.setObjectName("no0")
self.no2 = QtWidgets.QLabel(self.three)
self.no2.setGeometry(QtCore.QRect(540, 250, 211, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.no2.setFont(font)
self.no2.setStyleSheet("background-color: rgba(224, 224, 224, 60);")
self.no2.setText("")
self.no2.setAlignment(QtCore.Qt.AlignCenter)
self.no2.setObjectName("no2")
self.a1 = QtWidgets.QLabel(self.three)
self.a1.setGeometry(QtCore.QRect(390, 200, 141, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.a1.setFont(font)
self.a1.setStyleSheet("background-color: rgba(224, 224, 224, 60);")
self.a1.setText("")
self.a1.setAlignment(QtCore.Qt.AlignCenter)
self.a1.setObjectName("a1")
self.a2 = QtWidgets.QLabel(self.three)
self.a2.setGeometry(QtCore.QRect(390, 250, 141, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.a2.setFont(font)
self.a2.setStyleSheet("background-color: rgba(224, 224, 224, 60);")
self.a2.setText("")
self.a2.setAlignment(QtCore.Qt.AlignCenter)
self.a2.setObjectName("a2")
self.n1 = QtWidgets.QLabel(self.three)
self.n1.setGeometry(QtCore.QRect(230, 200, 141, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.n1.setFont(font)
self.n1.setStyleSheet("background-color: rgba(224, 224, 224, 60);")
self.n1.setText("")
self.n1.setAlignment(QtCore.Qt.AlignCenter)
self.n1.setObjectName("n1")
self.n2 = QtWidgets.QLabel(self.three)
self.n2.setGeometry(QtCore.QRect(230, 250, 141, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.n2.setFont(font)
self.n2.setStyleSheet("background-color: rgba(224, 224, 224, 60);")
self.n2.setText("")
self.n2.setAlignment(QtCore.Qt.AlignCenter)
self.n2.setObjectName("n2")
self.no1 = QtWidgets.QLabel(self.three)
self.no1.setGeometry(QtCore.QRect(540, 200, 211, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.no1.setFont(font)
self.no1.setStyleSheet("background-color: rgba(224, 224, 224, 60);")
self.no1.setText("")
self.no1.setAlignment(QtCore.Qt.AlignCenter)
self.no1.setObjectName("no1")
self.no4 = QtWidgets.QLabel(self.three)
self.no4.setGeometry(QtCore.QRect(540, 350, 211, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.no4.setFont(font)
self.no4.setStyleSheet("background-color: rgba(224, 224, 224, 60);")
self.no4.setText("")
self.no4.setAlignment(QtCore.Qt.AlignCenter)
self.no4.setObjectName("no4")
self.a3 = QtWidgets.QLabel(self.three)
self.a3.setGeometry(QtCore.QRect(390, 300, 141, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.a3.setFont(font)
self.a3.setStyleSheet("background-color: rgba(224, 224, 224, 60);")
self.a3.setText("")
self.a3.setAlignment(QtCore.Qt.AlignCenter)
self.a3.setObjectName("a3")
self.n5 = QtWidgets.QLabel(self.three)
self.n5.setGeometry(QtCore.QRect(230, 400, 141, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.n5.setFont(font)
self.n5.setStyleSheet("background-color: rgba(224, 224, 224, 60);")
self.n5.setText("")
self.n5.setAlignment(QtCore.Qt.AlignCenter)
self.n5.setObjectName("n5")
self.a4 = QtWidgets.QLabel(self.three)
self.a4.setGeometry(QtCore.QRect(390, 350, 141, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.a4.setFont(font)
self.a4.setStyleSheet("background-color: rgba(224, 224, 224, 60);")
self.a4.setText("")
self.a4.setAlignment(QtCore.Qt.AlignCenter)
self.a4.setObjectName("a4")
self.a5 = QtWidgets.QLabel(self.three)
self.a5.setGeometry(QtCore.QRect(390, 400, 141, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.a5.setFont(font)
self.a5.setStyleSheet("background-color: rgba(224, 224, 224, 60);")
self.a5.setText("")
self.a5.setAlignment(QtCore.Qt.AlignCenter)
self.a5.setObjectName("a5")
self.no6 = QtWidgets.QLabel(self.three)
self.no6.setGeometry(QtCore.QRect(540, 450, 211, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.no6.setFont(font)
self.no6.setStyleSheet("background-color: rgba(224, 224, 224, 60);")
self.no6.setText("")
self.no6.setAlignment(QtCore.Qt.AlignCenter)
self.no6.setObjectName("no6")
self.n3 = QtWidgets.QLabel(self.three)
self.n3.setGeometry(QtCore.QRect(230, 300, 141, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.n3.setFont(font)
self.n3.setStyleSheet("background-color: rgba(224, 224, 224, 60);")
self.n3.setText("")
self.n3.setAlignment(QtCore.Qt.AlignCenter)
self.n3.setObjectName("n3")
self.no5 = QtWidgets.QLabel(self.three)
self.no5.setGeometry(QtCore.QRect(540, 400, 211, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.no5.setFont(font)
self.no5.setStyleSheet("background-color: rgba(224, 224, 224, 60);")
self.no5.setText("")
self.no5.setAlignment(QtCore.Qt.AlignCenter)
self.no5.setObjectName("no5")
self.n6 = QtWidgets.QLabel(self.three)
self.n6.setGeometry(QtCore.QRect(230, 450, 141, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.n6.setFont(font)
self.n6.setStyleSheet("background-color: rgba(224, 224, 224, 60);")
self.n6.setText("")
self.n6.setAlignment(QtCore.Qt.AlignCenter)
self.n6.setObjectName("n6")
self.n4 = QtWidgets.QLabel(self.three)
self.n4.setGeometry(QtCore.QRect(230, 350, 141, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.n4.setFont(font)
self.n4.setStyleSheet("background-color: rgba(224, 224, 224, 60);")
self.n4.setText("")
self.n4.setAlignment(QtCore.Qt.AlignCenter)
self.n4.setObjectName("n4")
self.a6 = QtWidgets.QLabel(self.three)
self.a6.setGeometry(QtCore.QRect(390, 450, 141, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.a6.setFont(font)
self.a6.setStyleSheet("background-color: rgba(224, 224, 224, 60);")
self.a6.setText("")
self.a6.setAlignment(QtCore.Qt.AlignCenter)
self.a6.setObjectName("a6")
self.no3 = QtWidgets.QLabel(self.three)
self.no3.setGeometry(QtCore.QRect(540, 300, 211, 31))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(11)
self.no3.setFont(font)
self.no3.setStyleSheet("background-color: rgba(224, 224, 224, 60);")
self.no3.setText("")
self.no3.setAlignment(QtCore.Qt.AlignCenter)
self.no3.setObjectName("no3")
self.copy_manage1 = QtWidgets.QPushButton(self.three)
self.copy_manage1.setGeometry(QtCore.QRect(760, 150, 31, 31))
self.copy_manage1.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.copy_manage1.setStyleSheet("background-color: rgba(255, 255, 255, 100);\n"
"border-radius: 10px;\n"
"")
self.copy_manage1.setText("")
self.copy_manage1.setIcon(icon2)
self.copy_manage1.setIconSize(QtCore.QSize(32, 30))
self.copy_manage1.setObjectName("copy_manage1")
self.copy_manage2 = QtWidgets.QPushButton(self.three)
self.copy_manage2.setGeometry(QtCore.QRect(760, 200, 31, 31))
self.copy_manage2.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.copy_manage2.setStyleSheet("background-color: rgba(255, 255, 255, 100);\n"
"border-radius: 10px;\n"
"")
self.copy_manage2.setText("")
self.copy_manage2.setIcon(icon2)
self.copy_manage2.setIconSize(QtCore.QSize(30, 30))
self.copy_manage2.setObjectName("copy_manage2")
self.copy_manage3 = QtWidgets.QPushButton(self.three)
self.copy_manage3.setGeometry(QtCore.QRect(760, 250, 31, 31))
self.copy_manage3.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.copy_manage3.setStyleSheet("background-color: rgba(255, 255, 255, 100);\n"
"border-radius: 10px;\n"
"")
self.copy_manage3.setText("")
self.copy_manage3.setIcon(icon2)
self.copy_manage3.setIconSize(QtCore.QSize(30, 30))
self.copy_manage3.setObjectName("copy_manage3")
self.copy_manage4 = QtWidgets.QPushButton(self.three)
self.copy_manage4.setGeometry(QtCore.QRect(760, 300, 31, 31))
self.copy_manage4.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.copy_manage4.setStyleSheet("background-color: rgba(255, 255, 255, 100);\n"
"border-radius: 10px;\n"
"")
self.copy_manage4.setText("")
self.copy_manage4.setIcon(icon2)
self.copy_manage4.setIconSize(QtCore.QSize(30, 30))
self.copy_manage4.setObjectName("copy_manage4")
self.copy_manage5 = QtWidgets.QPushButton(self.three)
self.copy_manage5.setGeometry(QtCore.QRect(760, 350, 31, 31))
self.copy_manage5.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.copy_manage5.setStyleSheet("background-color: rgba(255, 255, 255, 100);\n"
"border-radius: 10px;\n"
"")
self.copy_manage5.setText("")
self.copy_manage5.setIcon(icon2)
self.copy_manage5.setIconSize(QtCore.QSize(30, 30))
self.copy_manage5.setObjectName("copy_manage5")
self.copy_manage6 = QtWidgets.QPushButton(self.three)
self.copy_manage6.setGeometry(QtCore.QRect(760, 400, 31, 31))
self.copy_manage6.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.copy_manage6.setStyleSheet("background-color: rgba(255, 255, 255, 100);\n"
"border-radius: 10px;\n"
"")
self.copy_manage6.setText("")
self.copy_manage6.setIcon(icon2)
self.copy_manage6.setIconSize(QtCore.QSize(30, 30))
self.copy_manage6.setObjectName("copy_manage6")
self.copy_manage7 = QtWidgets.QPushButton(self.three)
self.copy_manage7.setGeometry(QtCore.QRect(760, 450, 31, 31))
self.copy_manage7.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.copy_manage7.setStyleSheet("background-color: rgba(255, 255, 255, 100);\n"
"border-radius: 10px;\n"
"")
self.copy_manage7.setText("")
self.copy_manage7.setIcon(icon2)
self.copy_manage7.setIconSize(QtCore.QSize(30, 30))
self.copy_manage7.setObjectName("copy_manage7")
self.showall = QtWidgets.QPushButton(self.three)
self.showall.setGeometry(QtCore.QRect(410, 500, 161, 41))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(13)
self.showall.setFont(font)
self.showall.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.showall.setStyleSheet("background-color: qlineargradient(spread:pad, x1:0.518, y1:0, x2:0.487562, y2:1, stop:0 #2a66ff, stop:1 #4c6bff);\n"
"color: rgb(255, 255, 255);\n"
"border-radius: 8px;")
self.showall.setIconSize(QtCore.QSize(30, 30))
self.showall.setObjectName("showall")
self.closegradient_3 = QtWidgets.QPushButton(self.three)
self.closegradient_3.setGeometry(QtCore.QRect(770, 10, 31, 21))
self.closegradient_3.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.closegradient_3.setStyleSheet("background-color: none;\n"
"")
self.closegradient_3.setText("")
self.closegradient_3.setIcon(icon3)
self.closegradient_3.setIconSize(QtCore.QSize(36, 37))
self.closegradient_3.setObjectName("closegradient_3")
self.mingradient_3 = QtWidgets.QPushButton(self.three)
self.mingradient_3.setGeometry(QtCore.QRect(730, 0, 41, 31))
self.mingradient_3.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.mingradient_3.setStyleSheet("background-color: none;\n"
"")
self.mingradient_3.setText("")
self.mingradient_3.setIcon(icon4)
self.mingradient_3.setIconSize(QtCore.QSize(31, 37))
self.mingradient_3.setObjectName("mingradient_3")
self.stackedWidget.addWidget(self.three)
self.for_2 = QtWidgets.QWidget()
self.for_2.setStyleSheet("background-color: qlineargradient(spread:pad, x1:0, y1:0.488636, x2:1, y2:0.483, stop:0 #e2fcfe, stop:0.5 #ffffff, stop:1 #e2f7fe);\n"
"border-radius: 15px;")
self.for_2.setObjectName("for_2")
self.frame_5 = QtWidgets.QFrame(self.for_2)
self.frame_5.setGeometry(QtCore.QRect(0, 0, 201, 561))
self.frame_5.setStyleSheet("border-radius:15px;\n"
"background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0 #C9D6FF, stop:1 #E2E2E2);")
self.frame_5.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_5.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_5.setObjectName("frame_5")
self.label_23 = QtWidgets.QLabel(self.frame_5)
self.label_23.setGeometry(QtCore.QRect(0, 0, 201, 51))
font = QtGui.QFont()
font.setFamily("Bauhaus 93")
font.setPointSize(16)
font.setUnderline(False)
self.label_23.setFont(font)
self.label_23.setStyleSheet("color: qlineargradient(spread:pad, x1:0.518, y1:0, x2:0.487562, y2:1, stop:0 rgba(0, 63, 225, 200), stop:1 rgba(17, 84, 255, 200));\n"
"border-radius: 8px;")
self.label_23.setAlignment(QtCore.Qt.AlignCenter)
self.label_23.setObjectName("label_23")
self.generate4 = QtWidgets.QPushButton(self.frame_5)
self.generate4.setGeometry(QtCore.QRect(20, 90, 161, 41))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(12)
self.generate4.setFont(font)
self.generate4.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.generate4.setStyleSheet("background-color: rgb(226, 250, 254);\n"
"color: rgb(0, 0, 71);\n"
"border-radius: 8px;")
self.generate4.setIconSize(QtCore.QSize(30, 30))
self.generate4.setObjectName("generate4")
self.insert4 = QtWidgets.QPushButton(self.frame_5)
self.insert4.setGeometry(QtCore.QRect(20, 150, 161, 41))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(12)
self.insert4.setFont(font)
self.insert4.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.insert4.setStyleSheet("background-color: rgb(226, 250, 254);\n"
"color: rgb(0, 0, 71);\n"
"border-radius: 8px;")
self.insert4.setIconSize(QtCore.QSize(30, 30))
self.insert4.setObjectName("insert4")
self.manage4 = QtWidgets.QPushButton(self.frame_5)
self.manage4.setGeometry(QtCore.QRect(20, 210, 161, 41))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(12)
self.manage4.setFont(font)
self.manage4.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.manage4.setStyleSheet("background-color: rgb(226, 250, 254);\n"
"color: rgb(0, 0, 71);\n"
"border-radius: 8px;")
self.manage4.setIconSize(QtCore.QSize(30, 30))
self.manage4.setObjectName("manage4")
self.setting4 = QtWidgets.QPushButton(self.frame_5)
self.setting4.setGeometry(QtCore.QRect(20, 270, 161, 41))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(12)
self.setting4.setFont(font)
self.setting4.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.setting4.setStyleSheet("background-color: #4866c4;\n"
"color: rgb(226, 250, 254);\n"
"border-radius: 8px;")
self.setting4.setIconSize(QtCore.QSize(30, 30))
self.setting4.setObjectName("setting4")
self.about4 = QtWidgets.QPushButton(self.frame_5)
self.about4.setGeometry(QtCore.QRect(20, 330, 161, 41))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(12)
self.about4.setFont(font)
self.about4.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.about4.setStyleSheet("background-color: rgb(226, 250, 254);\n"
"color: rgb(0, 0, 71);\n"
"border-radius: 8px;")
self.about4.setIconSize(QtCore.QSize(30, 30))
self.about4.setObjectName("about4")
self.logout4 = QtWidgets.QPushButton(self.frame_5)
self.logout4.setGeometry(QtCore.QRect(20, 390, 161, 41))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(12)
self.logout4.setFont(font)
self.logout4.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.logout4.setStyleSheet("background-color: rgb(226, 250, 254);\n"
"color: rgb(0, 0, 71);\n"
"border-radius: 8px;")
self.logout4.setIconSize(QtCore.QSize(30, 30))
self.logout4.setObjectName("logout4")
self.mode4 = QtWidgets.QPushButton(self.frame_5)
self.mode4.setGeometry(QtCore.QRect(20, 450, 161, 41))
font = QtGui.QFont()
font.setFamily("Open Sans")
font.setPointSize(12)
self.mode4.setFont(font)
self.mode4.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.mode4.setStyleSheet("background-color: rgb(226, 250, 254);\n"
"color: rgb(0, 0, 71);\n"
"border-radius: 8px;")
self.mode4.setText("")
self.mode4.setIconSize(QtCore.QSize(30, 30))
self.mode4.setObjectName("mode4")
self.mode_dr4 = QtWidgets.QPushButton(self.frame_5)
self.mode_dr4.setGeometry(QtCore.QRect(20, 450, 51, 41))
self.mode_dr4.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.mode_dr4.setStyleSheet("background-color: none;")
self.mode_dr4.setText("")
self.mode_dr4.setIcon(icon)
self.mode_dr4.setIconSize(QtCore.QSize(27, 27))
self.mode_dr4.setObjectName("mode_dr4")
self.mode_light11_4 = QtWidgets.QPushButton(self.frame_5)
self.mode_light11_4.setGeometry(QtCore.QRect(70, 450, 111, 41))
self.mode_light11_4.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.mode_light11_4.setStyleSheet("background-color: #4866c4;\n"
"border-radius: 8px;\n"
"border-top-left-radius: 0px;\n"
"border-bottom-left-radius: 0px;")
self.mode_light11_4.setText("")
self.mode_light11_4.setIcon(icon1)
self.mode_light11_4.setIconSize(QtCore.QSize(31, 32))
self.mode_light11_4.setObjectName("mode_light11_4")
self.label_createquick = QtWidgets.QLabel(self.for_2)
self.label_createquick.setGeometry(QtCore.QRect(230, 30, 451, 41))
font = QtGui.QFont()
font.setFamily("Barlow Condensed ExtraBold")
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.label_createquick.setFont(font)
self.label_createquick.setStyleSheet("background-color: none;\n"
"color: #4866c4;")
self.label_createquick.setObjectName("label_createquick")
self.lineEdit_confirmid = QtWidgets.QLineEdit(self.for_2)
self.lineEdit_confirmid.setGeometry(QtCore.QRect(330, 160, 471, 41))
font = QtGui.QFont()
font.setFamily("Open Sans Light")
font.setPointSize(10)
self.lineEdit_confirmid.setFont(font)
self.lineEdit_confirmid.setStyleSheet("background-color: rgba(224, 224, | |
"""Utilities for creating XDMF metadata from results stored in HDF5.
XDMF utilities are separate from the core xdmf to reduce core dependencies.
See:
- core.xdmf
license: HDF5Application/license.txt
"""
import xml.etree.ElementTree as ET
import os
import warnings
from itertools import chain
from contextlib import contextmanager
import re
import KratosMultiphysics
import KratosMultiphysics.HDF5Application as KratosHDF5
from KratosMultiphysics.HDF5Application.core.xdmf import SpatialGrid
from KratosMultiphysics.HDF5Application.core.xdmf import HDF5UniformDataItem
from KratosMultiphysics.HDF5Application.core.xdmf import Geometry
from KratosMultiphysics.HDF5Application.core.xdmf import TopologyCellType
from KratosMultiphysics.HDF5Application.core.xdmf import UniformMeshTopology
from KratosMultiphysics.HDF5Application.core.xdmf import UniformGrid
from KratosMultiphysics.HDF5Application.core.xdmf import NodalData
from KratosMultiphysics.HDF5Application.core.xdmf import ElementData
from KratosMultiphysics.HDF5Application.core.xdmf import ConditionData
from KratosMultiphysics.HDF5Application.core.xdmf import TemporalGrid
from KratosMultiphysics.HDF5Application.core.xdmf import Time
from KratosMultiphysics.HDF5Application.core.xdmf import Domain
from KratosMultiphysics.HDF5Application.core.xdmf import Xdmf
try:
with warnings.catch_warnings():
# suppressing an import-related warning from h5py
# problem appears when using it in a test with python >=3.6
warnings.simplefilter('ignore', category=ImportWarning)
import h5py
except ModuleNotFoundError:
# If h5py is not found, then we delay the exception until name lookup is
# performed on the module. This allows the current module to still be used
# for testing purposes. Otherwise the tests must be skipped.
warn_msg = "h5py module was not found!"
KratosMultiphysics.Logger.PrintWarning(__name__, warn_msg)
class NonExistingModule(object):
def __init__(self, module_name):
self.module_name = module_name
def __getattr__(self, name):
raise ModuleNotFoundError(
"No module named '" + self.module_name + "'")
h5py = NonExistingModule('h5py')
@contextmanager
def TryOpenH5File(name, mode=None, driver=None, **kwds):
"""A context manager wrapper for the opened file.
In case the file cannot be opened, yield None rather than raise an
exception. This can be the case if the file is already opened.
"""
try:
with h5py.File(name, mode, driver=driver, **kwds) as f:
yield f
except OSError:
warn_msg = 'No xdmf-data was written for file:\n"' + name + '"'
KratosMultiphysics.Logger.PrintWarning("XDMF", warn_msg)
yield None
def RenumberConnectivitiesForXdmf(filename_or_list_of_filenames, h5path_to_mesh):
"""Renumber mesh connectivities for XDMF.
Keyword arguments:
filename_or_list_of_filenames -- the HDF5 file(s) to renumber
h5path_to_mesh -- the internal HDF5 file path to the mesh
The mesh connectivities must be renumbered for XDMF by the node's array
index rather than its ID. The renumbered connectivities are stored in
HDF5 and referenced by the XDMF Grid. If a file cannot be opened, it is
skipped.
See:
- XdmfConnectivitiesWriterProcess.
"""
for path in list(filename_or_list_of_filenames):
skip = True
with TryOpenH5File(path, "r") as f:
if not f:
continue
if h5path_to_mesh in f:
skip = "Xdmf" in f[h5path_to_mesh]
if not skip:
KratosHDF5.HDF5XdmfConnectivitiesWriterProcess(
path, h5path_to_mesh).Execute()
def GetListOfSpatialGrids(spatial_grids_list, h5_model_part, current_path):
for key in h5_model_part.keys():
if (key == "Conditions" or key == "Elements"):
spatial_grids_list.append([str(h5_model_part.name) + "/" + str(key), current_path + "." + str(key)])
else:
GetListOfSpatialGrids(spatial_grids_list, h5_model_part[key], current_path + "." + str(key))
def CreateXdmfSpatialGrid(h5_model_part):
"""Return an XDMF Grid object corresponding to a mesh in an HDF5 file.
Keyword arguments:
h5_model_part -- the HDF5 group containing the model part
Expects:
- element connectivities in h5_model_part["Xdmf/Elements/<element-name>"].
Each connectivities has attributes "Dimension" and "NumberOfNodes". For
example, "Element2D3N" has "Dimension" 2 and "NumberOfNodes" 3. The
connectivities differ from the normal mdpa connectivities in that they
directly index the array of nodal coordinates. Currently there is
no other way to post-process the mesh with Xdmf.
See:
- core.operations.ModelPartOutput,
- core.operations.PartitionedModelPartOutput,
- RenumberConnectivitiesForXdmf.
"""
sgrid = SpatialGrid()
geom = Geometry(HDF5UniformDataItem(
h5_model_part["Nodes/Local/Coordinates"]))
spatial_grids_list = []
GetListOfSpatialGrids(spatial_grids_list, h5_model_part["Xdmf"], "RootModelPart")
for spatial_grid in spatial_grids_list:
spatial_grid_location = spatial_grid[0]
spatial_grid_name = spatial_grid[1]
for name, value in h5_model_part[spatial_grid_location].items():
cell_type = TopologyCellType(
value.attrs["Dimension"], value.attrs["NumberOfNodes"])
connectivities = HDF5UniformDataItem(value["Connectivities"])
topology = UniformMeshTopology(cell_type, connectivities)
sgrid.add_grid(UniformGrid(spatial_grid_name + "." + name, geom, topology))
KratosMultiphysics.Logger.PrintInfo("XDMF", "Added " + spatial_grid_name + "." + name + " spatial grid.")
return sgrid
def Has_dtype(item): return hasattr(item[1], 'dtype')
def XdmfNodalResults(h5_results):
"""Return a list of XDMF Attribute objects for nodal results in an HDF5 file.
Keyword arguments:
h5_results -- the HDF5 group containing the results
Checks for results stored in data sets by variable name in:
- h5_results["NodalSolutionStepData/<variable-name>"]
- h5_results["NodalDataValues/<variable-name>"]
Expects:
- each result variable occurs only once
If no results are found, returns an empty list.
See:
- core.operations.NodalSolutionStepDataOutput,
- core.operations.NodalDataValueOutput.
"""
results = {}
for path in ["NodalSolutionStepData", "NodalDataValues"]:
try:
grp = h5_results[path]
except KeyError:
continue
for variable, data in filter(Has_dtype, grp.items()):
if variable in results:
# A variable can exist in the nodal solution step data or
# non-historical nodal data value container, but not both.
raise RuntimeError('Nodal result variable "' +
variable + '" already exists.')
results[variable] = NodalData(variable, HDF5UniformDataItem(data))
return list(results.values())
def XdmfNodalFlags(h5_results):
"""Return a list of XDMF Attribute objects for nodal flags in an HDF5 file.
Keyword arguments:
h5_results -- the HDF5 group containing the flags
Checks for flags stored in data sets by variable name in:
- h5_flags["NodalFlagValues/<flag-name>"]
Expects:
- each flag variable occurs only once
If no flags are found, returns an empty list.
See:
- core.operations.NodalFlagsValueOutput.
"""
results_path = "NodalFlagValues"
results = []
try:
grp = h5_results[results_path]
except KeyError:
return results
for variable, data in filter(Has_dtype, grp.items()):
r = NodalData(variable, HDF5UniformDataItem(data))
results.append(r)
return results
def XdmfElementResults(h5_results):
"""Return a list of XDMF Attribute objects for element results in an HDF5 file.
Keyword arguments:
h5_results -- the HDF5 group containing the results
Checks for results stored by variable name in:
- h5_results["ElementDataValues/<variable>"]
If no results are found, returns an empty list.
See:
- core.operations.ElementDataValueOutput.
"""
results_path = "ElementDataValues"
results = []
try:
grp = h5_results[results_path]
except KeyError:
return results
for variable, data in filter(Has_dtype, grp.items()):
r = ElementData(variable, HDF5UniformDataItem(data))
results.append(r)
return results
def XdmfElementFlags(h5_results):
"""Return a list of XDMF Attribute objects for element flags in an HDF5 file.
Keyword arguments:
h5_flags -- the HDF5 group containing the flags
Checks for flags stored by variable name in:
- h5_flags["ElementFlagValues/<flag-name>"]
If no flags are found, returns an empty list.
See:
- core.operations.ElementFlagValueOutput.
"""
results_path = "ElementFlagValues"
results = []
try:
grp = h5_results[results_path]
except KeyError:
return results
for variable, data in filter(Has_dtype, grp.items()):
r = ElementData(variable, HDF5UniformDataItem(data))
results.append(r)
return results
def XdmfElementGaussPointValues(h5_results):
"""Return a list of XDMF Attribute objects for element integration point values in an HDF5 file.
Keyword arguments:
h5_results -- the HDF5 group containing the results
Checks for results stored by variable name in:
- h5_results["ElementGaussPointValues/<variable>"]
If no results are found, returns an empty list.
See:
- core.operations.ElementGaussPointOutput.
"""
results_path = "ElementGaussPointValues"
results = []
try:
grp = h5_results[results_path]
except KeyError:
return results
for variable, data in filter(Has_dtype, grp.items()):
r = ElementData(variable, HDF5UniformDataItem(data))
results.append(r)
return results
def XdmfConditionResults(h5_results):
"""Return a list of XDMF Attribute objects for element results in an HDF5 file.
Keyword arguments:
h5_results -- the HDF5 group containing the results
Checks for results stored by variable name in:
- h5_results["ConditionDataValues/<variable>"]
If no results are found, returns an empty list.
See:
- core.operations.ConditionDataValueOutput.
"""
results_path = "ConditionDataValues"
results = []
try:
grp = h5_results[results_path]
except KeyError:
return results
for variable, data in filter(Has_dtype, grp.items()):
r = ConditionData(variable, HDF5UniformDataItem(data))
results.append(r)
return results
def XdmfConditionFlags(h5_results):
"""Return a list of XDMF Attribute objects for element flags in an HDF5 file.
Keyword arguments:
h5_flags -- the HDF5 group containing the flags
Checks for flags stored by variable name in:
- h5_flags["ConditionFlagValues/<flag-name>"]
If no flags are found, returns an empty list.
See:
- core.operations.ConditionFlagValueOutput.
"""
results_path = "ConditionFlagValues"
results = []
try:
grp = h5_results[results_path]
except KeyError:
return results
for variable, data in filter(Has_dtype, grp.items()):
r = ConditionData(variable, HDF5UniformDataItem(data))
results.append(r)
return results
def XdmfConditionGaussPointValues(h5_results):
"""Return a list of XDMF Attribute objects for element integration point values in an HDF5 file.
Keyword arguments:
h5_results -- the HDF5 group containing the results
Checks for results stored by variable name in:
- h5_results["ConditionGaussPointValues/<variable>"]
If no results are found, returns an empty list.
See:
- core.operations.ConditionGaussPointOutput.
"""
results_path = "ConditionGaussPointValues"
results = []
try:
grp = h5_results[results_path]
except KeyError:
return results
for variable, data in filter(Has_dtype, grp.items()):
r = ElementData(variable, HDF5UniformDataItem(data))
results.append(r)
return results
def XdmfResults(h5_results):
"""Return a list of XDMF Attribute objects for results in an HDF5 file.
Keyword arguments:
h5_results -- the HDF5 group containing the results
"""
return list(
chain(
XdmfNodalResults(h5_results),
XdmfNodalFlags(h5_results),
XdmfElementResults(h5_results),
XdmfElementFlags(h5_results),
XdmfElementGaussPointValues(h5_results),
XdmfConditionResults(h5_results),
XdmfConditionFlags(h5_results),
XdmfConditionGaussPointValues(h5_results)
)
)
def TimeLabel(file_path):
"""Return the time string from the file name.
E.g.:
'kratos-123.h5' -> '123'
'kratos-1.2.h5' -> '1.2'
'kratos-1.2e+00.h5' -> '1.2e+00'
Returns empty string if not found.
"""
# Is there a better way to do this?
temp_file_path = file_path.replace("E-", "E*")
temp_file_path = temp_file_path.replace("e-", "e*")
dash_split = temp_file_path[:temp_file_path.rfind(".")].split("-")
dash_split[-1] = dash_split[-1].replace("E*", "E-")
| |
VM roles \""
_msg += ','.join(_required_imageid_list[_imageid]) + "\": \""
_msg += _imageid + "\" "
if _imageid in map_id_to_name :
_msg += "(\"" + map_id_to_name[_imageid].strip() + "\") "
_msg += "is NOT registered "
_msg += "(attaching VMs with any of these roles will result in error).\n"
if not len(_detected_imageids) :
_msg = "WARNING! None of the image ids used by any VM \"role\" were detected"
_msg += " in this " + self.get_description() + " !"
# _msg += "of the following images: " + ','.join(_undetected_imageids.keys())
cbwarn(_msg, True)
else :
_msg = _msg.replace("yx",'')
_msg = _msg.replace("x "," ")
_msg = _msg[:-2]
if len(_msg) :
cbdebug(_msg, True)
return _detected_imageids
@trace
def populate_cloudconfig(self, obj_attr_list) :
'''
TBD
'''
if ("userdata" not in obj_attr_list or str(obj_attr_list["userdata"]).lower() == "false") and obj_attr_list["use_vpn_ip"].lower() == "false" :
#cbdebug("Skipping userdata: " + str(obj_attr_list["userdata"]), True)
return None
obj_attr_list["cloudinit_keys"] += ',' + obj_attr_list["pubkey_contents"]
obj_attr_list["cloudinit_keys"] = obj_attr_list["cloudinit_keys"].replace("____",' ').split(',')
cloudconfig = "#cloud-config\n"
cloudconfig += "ssh_pwauth: true\n"
cloudconfig += "disable_root: false\n"
cloudconfig += "\nusers:\n"
cloudconfig += "- name: " + obj_attr_list["login"] + "\n"
if str(obj_attr_list["password"]).lower() != "false" :
cloudconfig += " lock-passwd: false\n"
cloudconfig += " lock_passwd: <PASSWORD>\n"
cloudconfig += " passwd: " + obj_attr_list["password"].replace("<PASSWORD>", "$") + "\n"
else :
cloudconfig += " lock-passwd: true\n"
cloudconfig += " home: /home/" + obj_attr_list["login"] + "\n"
cloudconfig += " shell: /bin/bash\n"
cloudconfig += " sudo: ALL=(ALL) NOPASSWD:ALL\n"
if obj_attr_list["userdata_ssh"].lower() == "true" :
cloudconfig += " ssh_authorized_keys:\n"
for _k in obj_attr_list["cloudinit_keys"] :
if len(_k) > 370 :
cloudconfig += " - " + _k + '\n'
if obj_attr_list["userdata_ssh"].lower() == "true" :
cloudconfig += "\n\n"
cloudconfig += " ssh_authorized_keys:\n"
for _k in obj_attr_list["cloudinit_keys"] :
if len(_k) > 370 :
cloudconfig += " - " + _k + '\n'
cloudconfig += "\n"
cloudconfig += "\nwrite_files:\n"
cloudconfig += " - path: /tmp/cb_post_boot.sh\n"
cloudconfig += " content: |\n"
if obj_attr_list["userdata_post_boot"].lower() == "true" :
cloudconfig += self.create_bootstrap_script(obj_attr_list)
else :
cloudconfig += " #!/bin/bash\n"
cloudconfig += " /bin/true\n"
if obj_attr_list["use_vpn_ip"].lower() == "true" :
if "cloudinit_packages" in obj_attr_list and obj_attr_list["cloudinit_packages"].lower() != "false" :
obj_attr_list["cloudinit_packages"] += ";openvpn;redis-tools"
else :
obj_attr_list["cloudinit_packages"] = "openvpn;redis-tools"
conf_destination = "/etc/openvpn/" + obj_attr_list["cloud_name"] + "_client-cb-openvpn-cloud.conf"
targets = []
targets.append(("/configs/generated/" + obj_attr_list["cloud_name"] + "_client-cb-openvpn.conf", conf_destination))
targets.append(("/util/openvpn/client_connected.sh", "/etc/openvpn/client_connected.sh"))
for target in targets :
(src, dest) = target
cbdebug("src: " + src + " dest: " + dest)
cloudconfig += """
- path: """ + dest + """
content: |
"""
fhname = cwd + src
cbdebug("Opening: " + fhname)
fh = open(fhname, 'r')
while True :
line = fh.readline()
if not line :
break
line = line.replace("USER", obj_attr_list["username"])
line = line.replace("CLOUD_NAME", obj_attr_list["cloud_name"])
line = line.replace("SERVER_BOOTSTRAP", obj_attr_list["vpn_server_bootstrap"])
line = line.replace("UUID", obj_attr_list["uuid"])
line = line.replace("OSCI_PORT", str(self.osci.port))
line = line.replace("OSCI_DBID", str(self.osci.dbid))
if line.count("remote ") :
line = "remote " + obj_attr_list["vpn_server_ip"] + " " + obj_attr_list["vpn_server_port"] + "\n"
cloudconfig += " " + line
if line.count("remote ") :
cloudconfig += " up /etc/openvpn/client_connected.sh\n"
fh.close()
if obj_attr_list["userdata_post_boot"].lower() == "true" or obj_attr_list["use_vpn_ip"].lower() != "false" or obj_attr_list["cloudinit_commands"].lower() != "false" :
cloudconfig += "\nruncmd:\n"
cloudconfig += " - chmod +x /tmp/cb_post_boot.sh\n"
if obj_attr_list["cloudinit_commands"].lower() != "false" :
for _cmd in obj_attr_list["cloudinit_commands"].split(';') :
cloudconfig += " - " + _cmd.replace("____",' ') + '\n'
# We can't run the userdata from cloudbench until the VPN is connected,
# so only run it if we're not using the VPN.
# Otherwise, /etc/openvpn/client_connected.sh will do it.
if obj_attr_list["use_vpn_ip"].lower() == "false" :
cloudconfig += " - /tmp/cb_post_boot.sh\n"
else :
cloudconfig += " - chmod +x /etc/openvpn/client_connected.sh\n"
cloudconfig += " - mv " + conf_destination + " /tmp/cbvpn.conf\n"
cloudconfig += " - rm -f /etc/openvpn/*.conf /etc/openvpn/*.ovpn\n"
cloudconfig += " - mv /tmp/cbvpn.conf " + conf_destination + "\n"
cloudconfig += " - mkdir -p /var/log/openvpn\n"
#cloudconfig += " - openvpn --daemon --config " + conf_destination + "\n"
cloudconfig += " - systemctl start openvpn@" + obj_attr_list["cloud_name"] + "_client-cb-openvpn-cloud.service\n"
cloudconfig += " - service openvpn start\n"
# Check to see if the user requested packages to be installed for this VM role via cloud-init
if obj_attr_list["cloudinit_packages"].lower() != "false" :
cbdebug("Will instruct cloud-init to install: " + obj_attr_list["cloudinit_packages"] + " on " + obj_attr_list["log_string"], True)
cloudconfig += """
packages:"""
for package in obj_attr_list["cloudinit_packages"].split(";") :
cloudconfig += """
- """ + package
#cbdebug("Final userdata: \n" + str(cloudconfig))
return cloudconfig
def create_bootstrap_script(self, obj_attr_list) :
'''
TBD
'''
_attempts = str(5)
_sleep = str(2)
_fshn = obj_attr_list["filestore_host"]
_fspn = obj_attr_list["filestore_port"]
_fsun = obj_attr_list["filestore_username"]
_ldn = obj_attr_list["local_dir_name"]
_rln = obj_attr_list["login"]
_ohn = obj_attr_list["objectstore_host"]
_opn = obj_attr_list["objectstore_port"]
_odb = obj_attr_list["objectstore_dbid"]
_cn = obj_attr_list["cloud_name"]
_ohn = obj_attr_list["vpn_server_bootstrap"]
_fshn = obj_attr_list["vpn_server_bootstrap"]
_pad = " "
_bootstrap_script = _pad + "#!/bin/bash\n\n"
_bootstrap_script += _pad + "# This VM is part of experiment id \"" + obj_attr_list["experiment_id"] + "\""
_bootstrap_script += _pad + "\n"
_bootstrap_script += _pad + "mkdir -p /var/log/cloudbench\n"
_bootstrap_script += _pad + "\n"
_bootstrap_script += _pad + "chmod 777 /var/log/cloudbench\n"
_bootstrap_script += _pad + "\n"
_bootstrap_script += get_boostrap_command(obj_attr_list, True)
_bootstrap_script += _pad + "if [[ $(cat " + obj_attr_list["remote_dir_home"] + "/cb_os_parameters.txt | grep -c \"#OSOI-" + "TEST_" + obj_attr_list["username"] + ":" + obj_attr_list["cloud_name"] + "\") -ne 0 ]]\n"
_bootstrap_script += _pad + "then\n"
_bootstrap_script += _pad + " redis-cli -h " + _ohn + " -n " + str(_odb) + " -p " + str(_opn) + " hset TEST_" + _fsun + ':' + obj_attr_list["cloud_name"] + ":VM:PENDING:" + obj_attr_list["uuid"] + " cloud_init_bootstrap true\n"
_bootstrap_script += _pad + "fi\n"
_bootstrap_script += _pad + "\n"
_bootstrap_script += _pad + "counter=0\n\n"
_bootstrap_script += _pad + "while [[ \"$counter\" -le " + _attempts + " ]]\n"
_bootstrap_script += _pad + "do\n"
_bootstrap_script += _pad + " rsync -az --delete --no-o --no-g --inplace rsync://" + _fshn + ':' + _fspn + '/' + _fsun + "_cb" + "/exclude_list.txt /tmp/exclude_list\n"
_bootstrap_script += _pad + " if [[ $? -eq 0 ]]\n"
_bootstrap_script += _pad + " then\n"
_bootstrap_script += _pad + " break\n"
_bootstrap_script += _pad + " else\n"
_bootstrap_script += _pad + " sleep " + _sleep + "\n"
_bootstrap_script += _pad + " counter=\"$(( $counter + 1 ))\"\n"
_bootstrap_script += _pad + " fi\n"
_bootstrap_script += _pad + "done\n"
_bootstrap_script += _pad + "counter=0\n\n"
_bootstrap_script += _pad + "while [[ \"$counter\" -le " + _attempts + " ]]\n"
_bootstrap_script += _pad + "do\n"
_bootstrap_script += _pad + " rsync -az --exclude-from '/tmp/exclude_list' --delete --no-o --no-g --inplace rsync://" + _fshn + ':' + _fspn + '/' + _fsun + "_cb/ " + obj_attr_list["remote_dir_path"] + "/\n"
_bootstrap_script += _pad + " if [[ $? -eq 0 ]]\n"
_bootstrap_script += _pad + " then\n"
_bootstrap_script += _pad + " redis-cli -h " + _ohn + " -n " + str(_odb) + " -p " + str(_opn) + " hset TEST_" + _fsun + ':' + obj_attr_list["cloud_name"] + ":VM:PENDING:" + obj_attr_list["uuid"] + " cloud_init_rsync true\n"
_bootstrap_script += _pad + " break\n"
_bootstrap_script += _pad + " else\n"
_bootstrap_script += _pad + " sleep " + _sleep + "\n"
_bootstrap_script += _pad + " counter=\"$(( $counter + 1 ))\"\n"
_bootstrap_script += _pad + " fi\n"
_bootstrap_script += _pad + "done\n"
_bootstrap_script += _pad + "chown -R " + _rln + ':' + _rln + ' ' + obj_attr_list["remote_dir_path"] + "/\n"
_bootstrap_script += _pad + "\n"
_bootstrap_script += _pad + "\n"
_bootstrap_script += _pad + "VMUUID=$(grep -ri " + obj_attr_list["experiment_id"] + " /var/lib/cloud/ | grep user-data | cut -d '/' -f 6 | head -n 1)\n"
_bootstrap_script += _pad + "redis-cli -h " + _ohn + " -n " + str(_odb) + " -p " + str(_opn) + " publish TEST_" + _fsun + ':' + obj_attr_list["cloud_name"] + ":VM:BOOT " + "\"VM $VMUUID is booted\"\n"
_bootstrap_script | |
SystemService(self.com_object.SystemService)
@property
def top(self) -> float:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property Top() As float
|
| Returns or sets the distance from the application'si frame window top to
| the top of the screen. This distance is expressed in
| pixels.
|
| Example:
| This example sets the distance from the CATIA application's frame
| window top to the top of the screen to 50 pixels.
|
| CATIA.Top = 50
:return: float
:rtype: float
"""
return self.com_object.Top
@top.setter
def top(self, value: float):
"""
:param float value:
"""
self.com_object.Top = value
@property
def undo_redo_lock(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property UndoRedoLock() As boolean
|
| Returns or sets the application status about Undo/Redo.
| True if the Undo/Redo mechanism is locked.
| False is the default. Since Undo/Redo mechanism uses lots of memory, it can
| be useful to disable it during consuming operations. Then Undo and Redo stacks
| are flushed and no model modification is kept until the Undo/Redo mechanism is
| unlocked. It is mandatory to unlock it before the end of the
| macro.
|
| Example:
|
| This example disables Undo/Redo mechanism until it is
| unlocked.
|
|
| CATIA.UndoRedoLock = True
:return: bool
:rtype: bool
"""
return self.com_object.UndoRedoLock
@undo_redo_lock.setter
def undo_redo_lock(self, value: bool):
"""
:param bool value:
"""
self.com_object.UndoRedoLock = value
@property
def visible(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property Visible() As boolean
|
| Returns or sets the application's window visibility.
| True if the application's window is visible to the end
| user.
|
| Example:
| This example makes the CATIA application's window
| visible.
|
| CATIA.Visibility = True
:return: bool
:rtype: bool
"""
return self.com_object.Visible
@visible.setter
def visible(self, value: bool):
"""
:param bool value:
"""
self.com_object.Visible = value
@property
def width(self) -> float:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property Width() As float
|
| Returns or sets the width of the application's frame window. The width is
| expressed in pixels.
|
| Example:
| This example sets the width of the CATIA application's frame window to
| 350 pixels.
|
| CATIA.Width = 350
:return: float
:rtype: float
"""
return self.com_object.Width
@width.setter
def width(self, value: float):
"""
:param float value:
"""
self.com_object.Width = value
@property
def windows(self) -> Windows:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property Windows() As Windows (Read Only)
|
| Returns the collection of windows currently managed by the
| application.
|
| Example:
| This example retrieves in WinCollection the collection of windows
| currently managed by the CATIA application.
|
| Dim WinCollection As Windows
| Set WinCollection = CATIA.Windows
:return: Windows
:rtype: Windows
"""
return Windows(self.com_object.Windows)
def create_send_to(self) -> SendToService:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Func CreateSendTo() As SendToService
|
| Creates a Send TO.
| Role:This method creates a SendToService instance.
| Warning : CATIASendToService interface requires the
| installation of CATIA - PPR xPDM Gateway 1 Product (PX1)
| In case this product is not granted, the first invocation
| to one of CATIASendToService methods will fail.
:return: SendToService
:rtype: SendToService
"""
return SendToService(self.com_object.CreateSendTo())
def disable_new_undo_redo_transaction(self) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Sub DisableNewUndoRedoTransaction()
|
| Prevents new Undo/Redo transaction creation.
| If too many Undo/Redo transactions are created during macro execution, it
| may affect performance. So it is valuable to prevent Undo/Redo transaction
| creation during macro execution when lots of data are created, deleted or
| modified.
| Note: preventing Undo/Redo transaction creation must not be done when a
| selection is required in the macro
| Do not forget to call EnableNewUndoRedoTransaction at the end of the macro
| or before selection to restore the common behavior.
|
| Example:
| This example prevents new transactions to be created, which may
| increase performance.
|
| CATIA.DisableNewUndoRedoTransaction()
:return: None
:rtype: None
"""
return self.com_object.DisableNewUndoRedoTransaction()
def enable_new_undo_redo_transaction(self) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Sub EnableNewUndoRedoTransaction()
|
| Allows new Undo/Redo transaction creation.
|
| Example:
| This example restores the common behavior after
| DisableNewUndoRedoTransaction has been called.
|
| CATIA.EnableNewUndoRedoTransaction()
:return: None
:rtype: None
"""
return self.com_object.EnableNewUndoRedoTransaction()
def file_selection_box(self, i_title: str, i_extension: str, i_mode: int) -> str:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Func FileSelectionBox(CATBSTR iTitle,
| CATBSTR iExtension,
| CatFileSelectionMode iMode) As CATBSTR
|
| Displays a modal dialog box which can be used to select / enter the name of
| a file to open / save.
|
| Parameters:
|
| iTitle
| The title of the dialog box.
| iExtension
| A file extension filter.
| iMode
| The mode in which to run the dialog box (either
| CatFileSelectionModeOpen or CatFileSelectionModeSave.
|
| oFilePath
| The return string containing the full path of the selected file, or
| a zero-length string if the user selects Cancel.
|
| Example:
| This example asks the user to select a text file and prints the
| path of the selected file.
|
| filepath = CATIA.FileSelectionBox("Select a text file",
| "\\*.txt", CatFileSelectionModeOpen)
| CATIA.SystemServices.Print "The selected file is " &
| filepath
:param str i_title:
:param str i_extension:
:param CatFileSelectionMode i_mode:
:return: str
:rtype: str
"""
return self.com_object.FileSelectionBox(i_title, i_extension, i_mode)
def get_workbench_id(self) -> str:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Func GetWorkbenchId() As CATBSTR
|
| Returns the identifier of the CATIA current workbench.
|
| Parameters:
|
| oworkbenchId
| The id of the current workbench.
:return: str
:rtype: str
"""
return self.com_object.GetWorkbenchId()
def help(self, i_help_id: str) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Sub Help(CATBSTR iHelpID)
|
| Displays application's online help.
|
| Parameters:
|
| iHelpID
| Identifier of the help message to display
|
| Example:
| This example displays the string referred to by the HelpKey message key
| in the message catalog concatenation.
|
| CATIA.Help("HelpKey")
:param str i_help_id:
:return: None
:rtype: None
"""
return self.com_object.Help(i_help_id)
def message_box(self, message_text: str, buttons: int = 0, title: str = ""):
"""
The button values
===================== ======= ========================================
Constant Value Description
--------------------- ------- ----------------------------------------
vbOKOnly 0 Display OK button only.
vbOKCancel 1 Display OK and Cancel buttons.
vbAbortRetryIgnore 2 Display Abort, Retry, and Ignore buttons
vbYesNoCancel 3 Display Yes, No, and Cancel buttons.
vbYesNo 4 Display Yes and No buttons.
vbRetryCancel 5 Display Retry and Cancel buttons.
vbCritical 16 Display Critical Message icon.
vbQuestion 32 Display Warning Query icon.
vbExclamation 48 Display Warning Message icon.
vbInformation 64 Display Information Message icon.
vbDefaultButton1 0 First button is default.
vbDefaultButton2 256 Second button is default.
vbDefaultButton3 512 Third button is default.
vbDefaultButton4 768 Fourth button is default.
vbApplicationModal 0 Application modal; the user must respond
to the message box before continuing
work in the current application.
vbSystemModal 4096 System modal; all applications are
suspended until the user responds to the
message box.
vbMsgBoxHelpButton 16384 Adds Help button to the message box.
vbMsgBoxSetForeground 65536 Specifies the message box window as the
foreground window.
vbMsgBoxRight 524288 Text is right-aligned.
vbMsgBoxRtlReading 1048576 Specifies text should appear as
right-to-left reading on Hebrew and
Arabic systems.
===================== ======= ========================================
Return values
======== ===== ===========
Constant Value Description
-------- ----- -----------
vbOK 1 OK
| |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Reports builder for BIDS-Apps.
Generalizes report generation across BIDS-Apps
"""
from pathlib import Path
import re
from itertools import product
from collections import defaultdict
from pkg_resources import resource_filename as pkgrf
from bids.layout import BIDSLayout, add_config_paths
import jinja2
from nipype.utils.filemanip import copyfile
# Add a new figures spec
try:
add_config_paths(figures=pkgrf('niworkflows', 'reports/figures.json'))
except ValueError as e:
if "Configuration 'figures' already exists" != str(e):
raise
PLURAL_SUFFIX = defaultdict(str('s').format, [('echo', 'es')])
SVG_SNIPPET = ["""\
<object class="svg-reportlet" type="image/svg+xml" data="./{0}">
Problem loading figure {0}. If the link below works, please try \
reloading the report in your browser.</object>
</div>
<div class="elem-filename">
Get figure file: <a href="./{0}" target="_blank">{0}</a>
</div>
""", """\
<img class="svg-reportlet" src="./{0}" style="width: 100%" />
</div>
<div class="elem-filename">
Get figure file: <a href="./{0}" target="_blank">{0}</a>
</div>
"""]
class Element(object):
"""Just a basic component of a report"""
def __init__(self, name, title=None):
self.name = name
self.title = title
class Reportlet(Element):
"""
A reportlet has title, description and a list of components with either an
HTML fragment or a path to an SVG file, and possibly a caption. This is a
factory class to generate Reportlets reusing the layout from a ``Report``
object.
.. testsetup::
>>> cwd = os.getcwd()
>>> os.chdir(tmpdir)
>>> from pkg_resources import resource_filename
>>> from shutil import copytree
>>> from bids.layout import BIDSLayout
>>> test_data_path = resource_filename('niworkflows', 'data/tests/work')
>>> testdir = Path(tmpdir)
>>> data_dir = copytree(test_data_path, str(testdir / 'work'))
>>> out_figs = testdir / 'out' / 'fmriprep'
>>> bl = BIDSLayout(str(testdir / 'work' / 'reportlets'),
... config='figures', validate=False)
.. doctest::
>>> bl.get(subject='01', desc='reconall') # doctest: +ELLIPSIS
[<BIDSFile filename='.../fmriprep/sub-01/anat/sub-01_desc-reconall_T1w.svg'>]
>>> len(bl.get(subject='01', space='.*', regex_search=True))
2
>>> r = Reportlet(bl, out_figs, config={
... 'title': 'Some Title', 'bids': {'datatype': 'anat', 'desc': 'reconall'},
... 'description': 'Some description'})
>>> r.name
'datatype-anat_desc-reconall'
>>> r.components[0][0].startswith('<img')
True
>>> r = Reportlet(bl, out_figs, config={
... 'title': 'Some Title', 'bids': {'datatype': 'anat', 'desc': 'reconall'},
... 'description': 'Some description', 'static': False})
>>> r.name
'datatype-anat_desc-reconall'
>>> r.components[0][0].startswith('<object')
True
>>> r = Reportlet(bl, out_figs, config={
... 'title': 'Some Title', 'bids': {'datatype': 'anat', 'desc': 'summary'},
... 'description': 'Some description'})
>>> r.components[0][0].startswith('<h3')
True
>>> r.components[0][1] is None
True
>>> r = Reportlet(bl, out_figs, config={
... 'title': 'Some Title',
... 'bids': {'datatype': 'anat', 'space': '.*', 'regex_search': True},
... 'caption': 'Some description {space}'})
>>> sorted(r.components)[0][1]
'Some description MNI152NLin2009cAsym'
>>> sorted(r.components)[1][1]
'Some description MNI152NLin6Asym'
>>> r = Reportlet(bl, out_figs, config={
... 'title': 'Some Title',
... 'bids': {'datatype': 'fmap', 'space': '.*', 'regex_search': True},
... 'caption': 'Some description {space}'})
>>> r.is_empty()
True
.. testcleanup::
>>> os.chdir(cwd)
"""
def __init__(self, layout, out_dir, config=None):
if not config:
raise RuntimeError('Reportlet must have a config object')
# PY35: Sorted config dict for consistent behavior
self.name = config.get(
'name', '_'.join('%s-%s' % i for i in sorted(config['bids'].items())))
self.title = config.get('title')
self.subtitle = config.get('subtitle')
self.description = config.get('description')
# Query the BIDS layout of reportlets
files = layout.get(**config['bids'])
self.components = []
for bidsfile in files:
src = Path(bidsfile.path)
ext = ''.join(src.suffixes)
desc_text = config.get('caption')
contents = None
if ext == '.html':
contents = src.read_text().strip()
elif ext == '.svg':
entities = dict(bidsfile.entities)
if desc_text:
desc_text = desc_text.format(**entities)
entities['extension'] = 'svg'
entities['datatype'] = 'figures'
linked_svg = layout.build_path(entities)
if linked_svg is None:
raise ValueError("Could not generate SVG path to copy {src}"
" to. Entities: {entities}".format(src=src,
entities=entities))
out_file = out_dir / linked_svg
out_file.parent.mkdir(parents=True, exist_ok=True)
# PY35: Coerce to str to pacify os.* functions that don't take Paths until 3.6
copyfile(str(src), str(out_file), copy=True, use_hardlink=True)
is_static = config.get('static', True)
contents = SVG_SNIPPET[is_static].format(linked_svg)
# Our current implementations of dynamic reportlets do this themselves,
# however I'll leave the code here since this is potentially something we
# will want to transfer from every figure generator to this location.
# The following code misses setting preserveAspecRatio="xMidYMid meet"
# if not is_static:
# # Remove height and width attributes from initial <svg> tag
# svglines = out_file.read_text().splitlines()
# expr = re.compile(r' (height|width)=["\'][0-9]+(\.[0-9]*)?[a-z]*["\']')
# for l, line in enumerate(svglines[:6]):
# if line.strip().startswith('<svg'):
# newline = expr.sub('', line)
# svglines[l] = newline
# out_file.write_text('\n'.join(svglines))
# break
if contents:
self.components.append((contents, desc_text))
def is_empty(self):
return len(self.components) == 0
class SubReport(Element):
"""SubReports are sections within a Report."""
def __init__(self, name, isnested=False, reportlets=None, title=''):
self.name = name
self.title = title
self.reportlets = reportlets or []
self.isnested = isnested
class Report(object):
"""
The full report object. This object maintains a BIDSLayout to index
all reportlets.
.. testsetup::
>>> cwd = os.getcwd()
>>> os.chdir(tmpdir)
>>> from pkg_resources import resource_filename
>>> from shutil import copytree
>>> from bids.layout import BIDSLayout
>>> test_data_path = resource_filename('niworkflows', 'data/tests/work')
>>> testdir = Path(tmpdir)
>>> data_dir = copytree(test_data_path, str(testdir / 'work'))
>>> out_figs = testdir / 'out' / 'fmriprep'
.. doctest::
>>> robj = Report(testdir / 'work' / 'reportlets', testdir / 'out',
... 'madeoutuuid', subject_id='01', packagename='fmriprep')
>>> robj.layout.get(subject='01', desc='reconall') # doctest: +ELLIPSIS
[<BIDSFile filename='.../anat/sub-01_desc-reconall_T1w.svg'>]
>>> robj.generate_report()
0
>>> len((testdir / 'out' / 'fmriprep' / 'sub-01.html').read_text())
19369
.. testcleanup::
>>> os.chdir(cwd)
"""
def __init__(self, reportlets_dir, out_dir, run_uuid, config=None,
subject_id=None, out_filename='report.html',
packagename=None):
self.root = reportlets_dir
# Initialize structuring elements
self.sections = []
self.errors = []
self.out_dir = Path(out_dir)
self.out_filename = out_filename
self.run_uuid = run_uuid
self.template_path = None
self.packagename = packagename
self.subject_id = subject_id
if subject_id is not None and subject_id.startswith('sub-'):
self.subject_id = self.subject_id[4:]
if self.subject_id is not None:
self.out_filename = 'sub-{}.html'.format(self.subject_id)
if config is None:
config = pkgrf('niworkflows', 'reports/fmriprep.yml')
self._load_config(Path(config))
def _load_config(self, config):
from yaml import safe_load as load
settings = load(config.read_text())
self.packagename = self.packagename or settings.get('package', None)
if self.packagename is not None:
self.root = self.root / self.packagename
self.out_dir = self.out_dir / self.packagename
if self.subject_id is not None:
self.root = self.root / 'sub-{}'.format(self.subject_id)
template_path = Path(settings.get('template_path', 'report.tpl'))
if not template_path.is_absolute():
template_path = config.parent / template_path
self.template_path = template_path.resolve()
self.index(settings['sections'])
def index(self, config):
"""
Traverse the reports config definition and instantiate reportlets.
This method also places figures in their final location.
"""
# Initialize a BIDS layout
self.layout = BIDSLayout(self.root, config='figures', validate=False)
for subrep_cfg in config:
# First determine whether we need to split by some ordering
# (ie. sessions / tasks / runs), which are separated by commas.
orderings = [s for s in subrep_cfg.get('ordering', '').strip().split(',') if s]
queries = []
for key in orderings:
values = getattr(self.layout, 'get_%s%s' % (key, PLURAL_SUFFIX[key]))()
if values:
queries.append((key, values))
if not queries: # E.g. this is an anatomical reportlet
reportlets = [Reportlet(self.layout, self.out_dir, config=cfg)
for cfg in subrep_cfg['reportlets']]
else:
# Do not use dictionary for queries, as we need to preserve ordering
# of ordering columns.
reportlets = []
entities, values = zip(*queries)
combinations = list(product(*values)) # e.g.: [('rest', 1), ('rest', 2)]
for c in combinations:
# Set a common title for this particular combination c
title = 'Reports for: %s.' % ', '.join(
['%s <span class="bids-entity">%s</span>' % (entities[i], c[i])
for i in range(len(c))])
for cfg in subrep_cfg['reportlets']:
cfg['bids'].update({entities[i]: c[i] for i in range(len(c))})
rlet = Reportlet(self.layout, self.out_dir, config=cfg)
if not rlet.is_empty():
rlet.title = title
title = None
reportlets.append(rlet)
# Filter out empty reportlets
reportlets = [r for r in reportlets if not r.is_empty()]
if reportlets:
sub_report = SubReport(
subrep_cfg['name'],
isnested=len(queries) > 0,
reportlets=reportlets,
title=subrep_cfg.get('title'))
self.sections.append(sub_report)
# Populate errors section
error_dir = self.out_dir / 'sub-{}'.format(self.subject_id) / 'log' / self.run_uuid
if error_dir.is_dir():
from ..utils.misc import read_crashfile
self.errors = [read_crashfile(str(f)) for f in error_dir.glob('crash*.*')]
def generate_report(self):
"""Once the Report has been indexed, the final HTML can be generated"""
logs_path = self.out_dir / 'logs'
boilerplate = []
boiler_idx = 0
if (logs_path / 'CITATION.html').exists():
text = (logs_path / 'CITATION.html').read_text(encoding='UTF-8')
text = '<div class="boiler-html">%s</div>' % re.compile(
'<body>(.*?)</body>',
re.DOTALL | re.IGNORECASE).findall(text)[0].strip()
boilerplate.append((boiler_idx, 'HTML', text))
boiler_idx += 1
if (logs_path / 'CITATION.md').exists():
text = '<pre>%s</pre>\n' % (logs_path / 'CITATION.md').read_text(encoding='UTF-8')
boilerplate.append((boiler_idx, 'Markdown', text))
boiler_idx += 1
if (logs_path / 'CITATION.tex').exists():
text = (logs_path / 'CITATION.tex').read_text(encoding='UTF-8')
text = re.compile(
r'\\begin{document}(.*?)\\end{document}',
re.DOTALL | re.IGNORECASE).findall(text)[0].strip()
text = '<pre>%s</pre>\n' % text
text += '<h3>Bibliography</h3>\n'
text += '<pre>%s</pre>\n' % Path(
pkgrf(self.packagename, 'data/boilerplate.bib')).read_text(encoding='UTF-8')
boilerplate.append((boiler_idx, 'LaTeX', text))
boiler_idx += 1
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(searchpath=str(self.template_path.parent)),
trim_blocks=True, lstrip_blocks=True
)
report_tpl = env.get_template(self.template_path.name)
report_render = report_tpl.render(sections=self.sections, errors=self.errors,
boilerplate=boilerplate)
# Write out report
(self.out_dir / self.out_filename).write_text(report_render, encoding='UTF-8')
return len(self.errors)
def run_reports(reportlets_dir, out_dir, subject_label, run_uuid, config=None,
packagename=None):
"""
| |
<gh_stars>0
''' CFFI interface to libpq the library '''
from __future__ import print_function
from distutils import sysconfig
import os.path
import re
import sys
import subprocess
from cffi import FFI
PLATFORM_IS_WINDOWS = sys.platform.lower().startswith('win')
LIBRARY_NAME = 'pq' if not PLATFORM_IS_WINDOWS else 'libpq'
class PostgresConfig:
def __init__(self):
try:
from psycopg2cffi import _config
except ImportError:
self.pg_config_exe = None
if not self.pg_config_exe:
self.pg_config_exe = self.autodetect_pg_config_path()
if self.pg_config_exe is None:
# FIXME - do we need some way to set it?
sys.stderr.write("""\
Error: pg_config executable not found.
Please add the directory containing pg_config to the PATH.
""")
sys.exit(1)
self.libpq_include_dir = self.query('includedir') or None
self.libpq_lib_dir = self.query('libdir') or None
self.libpq_version = self.find_version()
else:
self.libpq_include_dir = _config.PG_INCLUDE_DIR
self.libpq_lib_dir = _config.PG_LIB_DIR
self.libpq_version = _config.PG_VERSION
def query(self, attr_name):
"""Spawn the pg_config executable, querying for the given config
name, and return the printed value, sanitized. """
try:
pg_config_process = subprocess.Popen(
[self.pg_config_exe, "--" + attr_name],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
raise Warning("Unable to find 'pg_config' file in '%s'" %
self.pg_config_exe)
pg_config_process.stdin.close()
result = pg_config_process.stdout.readline().strip()
if not result:
raise Warning(pg_config_process.stderr.readline())
if not isinstance(result, str):
result = result.decode('ascii')
return result
def find_on_path(self, exename, path_directories=None):
if not path_directories:
path_directories = os.environ['PATH'].split(os.pathsep)
for dir_name in path_directories:
fullpath = os.path.join(dir_name, exename)
if os.path.isfile(fullpath):
return fullpath
return None
def autodetect_pg_config_path(self):
"""Find and return the path to the pg_config executable."""
if PLATFORM_IS_WINDOWS:
return self.autodetect_pg_config_path_windows()
else:
return self.find_on_path('pg_config')
def autodetect_pg_config_path_windows(self):
"""Attempt several different ways of finding the pg_config
executable on Windows, and return its full path, if found."""
# This code only runs if they have not specified a pg_config option
# in the config file or via the commandline.
# First, check for pg_config.exe on the PATH, and use that if found.
pg_config_exe = self.find_on_path('pg_config.exe')
if pg_config_exe:
return pg_config_exe
# Now, try looking in the Windows Registry to find a PostgreSQL
# installation, and infer the path from that.
pg_config_exe = self._get_pg_config_from_registry()
if pg_config_exe:
return pg_config_exe
return None
def _get_pg_config_from_registry(self):
try:
import winreg
except ImportError:
import _winreg as winreg
reg = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
try:
pg_inst_list_key = winreg.OpenKey(reg,
'SOFTWARE\\PostgreSQL\\Installations')
except EnvironmentError:
# No PostgreSQL installation, as best as we can tell.
return None
try:
# Determine the name of the first subkey, if any:
try:
first_sub_key_name = winreg.EnumKey(pg_inst_list_key, 0)
except EnvironmentError:
return None
pg_first_inst_key = winreg.OpenKey(reg,
'SOFTWARE\\PostgreSQL\\Installations\\'
+ first_sub_key_name)
try:
pg_inst_base_dir = winreg.QueryValueEx(
pg_first_inst_key, 'Base Directory')[0]
finally:
winreg.CloseKey(pg_first_inst_key)
finally:
winreg.CloseKey(pg_inst_list_key)
pg_config_path = os.path.join(
pg_inst_base_dir, 'bin', 'pg_config.exe')
if not os.path.exists(pg_config_path):
return None
# Support unicode paths, if this version of Python provides the
# necessary infrastructure:
if sys.version_info[0] < 3 \
and hasattr(sys, 'getfilesystemencoding'):
pg_config_path = pg_config_path.encode(
sys.getfilesystemencoding())
return pg_config_path
def find_version(self):
try:
# Here we take a conservative approach: we suppose that
# *at least* PostgreSQL 7.4 is available (this is the only
# 7.x series supported by psycopg 2)
pgversion = self.query('version').split()[1]
except:
pgversion = '7.4.0'
verre = re.compile(
r'(\d+)\.(\d+)(?:(?:\.(\d+))|(devel|(alpha|beta|rc)\d+)?)')
m = verre.match(pgversion)
if m:
pgmajor, pgminor, pgpatch = m.group(1, 2, 3)
if pgpatch is None or not pgpatch.isdigit():
pgpatch = 0
else:
sys.stderr.write(
"Error: could not determine PostgreSQL version from '%s'"
% pgversion)
sys.exit(1)
return int(
'%02X%02X%02X' % (int(pgmajor), int(pgminor), int(pgpatch)), 16)
_config = PostgresConfig()
ffi = FFI()
# order and comments taken from libpq (ctypes impl)
ffi.cdef('''
static int const _PG_VERSION;
// postgres_ext.h
typedef unsigned int Oid;
// See comment below.
static int const LIBPQ_DIAG_SEVERITY;
static int const LIBPQ_DIAG_SQLSTATE;
static int const LIBPQ_DIAG_MESSAGE_PRIMARY;
static int const LIBPQ_DIAG_MESSAGE_DETAIL;
static int const LIBPQ_DIAG_MESSAGE_HINT;
static int const LIBPQ_DIAG_STATEMENT_POSITION;
static int const LIBPQ_DIAG_INTERNAL_POSITION;
static int const LIBPQ_DIAG_INTERNAL_QUERY;
static int const LIBPQ_DIAG_CONTEXT;
static int const LIBPQ_DIAG_SOURCE_FILE;
static int const LIBPQ_DIAG_SCHEMA_NAME;
static int const LIBPQ_DIAG_TABLE_NAME;
static int const LIBPQ_DIAG_COLUMN_NAME;
static int const LIBPQ_DIAG_DATATYPE_NAME ;
static int const LIBPQ_DIAG_CONSTRAINT_NAME;
static int const LIBPQ_DIAG_SOURCE_LINE;
static int const LIBPQ_DIAG_SOURCE_FUNCTION;
// libpq-fe.h
typedef enum
{
/*
* Although it is okay to add to this list, values which become unused
* should never be removed, nor should constants be redefined - that would
* break compatibility with existing code.
*/
CONNECTION_OK,
CONNECTION_BAD,
/* Non-blocking mode only below here */
/*
* The existence of these should never be relied upon - they should only
* be used for user feedback or similar purposes.
*/
CONNECTION_STARTED, /* Waiting for connection to be made. */
CONNECTION_MADE, /* Connection OK; waiting to send. */
CONNECTION_AWAITING_RESPONSE, /* Waiting for a response from the
* postmaster. */
CONNECTION_AUTH_OK, /* Received authentication; waiting for
* backend startup. */
CONNECTION_SETENV, /* Negotiating environment. */
CONNECTION_SSL_STARTUP, /* Negotiating SSL. */
CONNECTION_NEEDED /* Internal state: connect() needed */
} ConnStatusType;
typedef enum
{
PGRES_POLLING_FAILED = 0,
PGRES_POLLING_READING, /* These two indicate that one may */
PGRES_POLLING_WRITING, /* use select before polling again. */
PGRES_POLLING_OK,
PGRES_POLLING_ACTIVE /* unused; keep for awhile for backwards
* compatibility */
} PostgresPollingStatusType;
typedef enum
{
PGRES_EMPTY_QUERY = 0, /* empty query string was executed */
PGRES_COMMAND_OK, /* a query command that doesn't return
* anything was executed properly by the
* backend */
PGRES_TUPLES_OK, /* a query command that returns tuples was
* executed properly by the backend, PGresult
* contains the result tuples */
PGRES_COPY_OUT, /* Copy Out data transfer in progress */
PGRES_COPY_IN, /* Copy In data transfer in progress */
PGRES_BAD_RESPONSE, /* an unexpected response was recv'd from the
* backend */
PGRES_NONFATAL_ERROR, /* notice or warning message */
PGRES_FATAL_ERROR, /* query failed */
} ExecStatusType;
typedef enum
{
PQTRANS_IDLE, /* connection idle */
PQTRANS_ACTIVE, /* command in progress */
PQTRANS_INTRANS, /* idle, within transaction block */
PQTRANS_INERROR, /* idle, within failed transaction */
PQTRANS_UNKNOWN /* cannot determine status */
} PGTransactionStatusType;
typedef ... PGconn;
typedef ... PGresult;
typedef ... PGcancel;
typedef struct pgNotify
{
char *relname; /* notification condition name */
int be_pid; /* process ID of notifying server process */
char *extra; /* notification parameter */
...;
} PGnotify;
// Database connection control functions
extern PGconn *PQconnectdb(const char *conninfo);
extern PGconn *PQconnectStart(const char *conninfo);
extern /*PostgresPollingStatusType*/ int PQconnectPoll(PGconn *conn);
extern void PQfinish(PGconn *conn);
// Connection status functions
extern /*ConnStatusType*/ int PQstatus(const PGconn *conn);
extern /*PGTransactionStatusType*/ int PQtransactionStatus(const PGconn *conn);
extern const char *PQparameterStatus(const PGconn *conn, const char *paramName);
extern int PQprotocolVersion(const PGconn *conn);
extern int PQserverVersion(const PGconn *conn);
extern char *PQerrorMessage(const PGconn *conn);
extern int PQsocket(const PGconn *conn);
extern int PQbackendPID(const PGconn *conn);
// Command execution functions
extern PGresult *PQexec(PGconn *conn, const char *query);
extern /*ExecStatusType*/ int PQresultStatus(const PGresult *res);
extern char *PQresultErrorMessage(const PGresult *res);
extern char *PQresultErrorField(const PGresult *res, int fieldcode);
extern void PQclear(PGresult *res);
// Retrieving query result information
extern int PQntuples(const PGresult *res);
extern int PQnfields(const PGresult *res);
extern char *PQfname(const PGresult *res, int field_num);
extern Oid PQftype(const PGresult *res, int field_num);
extern int PQfsize(const PGresult *res, int field_num);
extern int PQfmod(const PGresult *res, int field_num);
extern int PQgetisnull(const PGresult *res, int tup_num, int field_num);
extern int PQgetlength(const PGresult *res, int tup_num, int field_num);
extern char *PQgetvalue(const PGresult *res, int tup_num, int field_num);
// direct parsers - not part of libpq
int PQEgetlong(int64_t *val, const PGresult *res, int tup_num, int field_num);
int PQEgetint(int32_t *val, const PGresult *res, int tup_num, int field_num);
int PQEgetfloat(float *val, const PGresult *res, int tup_num, int field_num);
int PQEgetdouble(double *val, const PGresult *res, int tup_num, int field_num);
// Retrieving other result information
extern char *PQcmdStatus(PGresult *res);
extern char *PQcmdTuples(PGresult *res);
extern Oid PQoidValue(const PGresult *res); /* new and improved */
''')
if _config.libpq_version >= 0x090000:
ffi.cdef('''
// Escaping string for inclusion in sql commands
extern char *PQescapeLiteral(PGconn *conn, const char *str, size_t len);
''')
ffi.cdef('''
// Escaping string for inclusion in sql commands
extern size_t PQescapeStringConn(PGconn *conn,
char *to, const char *from, size_t length,
int *error);
extern size_t PQescapeString(char *to, const char *from, size_t length);
extern unsigned char *PQescapeByteaConn(PGconn *conn,
const unsigned char *from, size_t from_length,
size_t *to_length);
extern unsigned char *PQescapeBytea(const unsigned char *from, size_t from_length,
size_t *to_length);
extern unsigned char *PQunescapeBytea(const unsigned char *strtext,
size_t *retbuflen);
// Asynchronous Command Processing
extern int PQsendQuery(PGconn *conn, const char *query);
extern PGresult *PQgetResult(PGconn *conn);
extern int PQconsumeInput(PGconn *conn);
extern int PQisBusy(PGconn *conn);
extern int PQsetnonblocking(PGconn *conn, int arg);
extern int PQflush(PGconn *conn);
// Cancelling queries in progress
extern PGcancel *PQgetCancel(PGconn *conn);
extern void PQfreeCancel(PGcancel *cancel);
extern int PQcancel(PGcancel *cancel, char *errbuf, int errbufsize);
extern int PQrequestCancel(PGconn *conn);
// Functions Associated with the COPY Command
extern int PQgetCopyData(PGconn *conn, char **buffer, int async);
extern int PQputCopyEnd(PGconn *conn, const char *errormsg);
extern int PQputCopyData(PGconn *conn, const char *buffer, int nbytes);
// Miscellaneous functions
extern void PQfreemem(void *ptr);
// Notice processing
typedef void (*PQnoticeProcessor) (void *arg, const char *message);
extern PQnoticeProcessor PQsetNoticeProcessor(PGconn *conn,
PQnoticeProcessor proc,
void *arg);
extern PGnotify *PQnotifies(PGconn *conn);
// Large object
extern int lo_open(PGconn *conn, Oid lobjId, int mode);
extern Oid lo_create(PGconn *conn, | |
#! /usr/bin/env python3
import argparse
import cmd
import csv
import glob
import json
import os
import platform
import re
import sys
import textwrap
import traceback
from collections import OrderedDict
import configparser
import subprocess
import tempfile
try:
import readline
import atexit
except ImportError:
readline = None
try: import prettytable
except:
print('\nPlease install python pretty table (pip3 install ptable)\n')
sys.exit(1)
#--senzing python classes
try:
import G2Paths
from G2Product import G2Product
from G2Database import G2Database
from G2Diagnostic import G2Diagnostic
from G2Engine import G2Engine
from G2IniParams import G2IniParams
from G2ConfigMgr import G2ConfigMgr
from G2Exception import G2Exception
except:
print('\nPlease export PYTHONPATH=<path to senzing python directory>\n')
sys.exit(1)
# ==============================
class colors:
code = {}
#--styles
code['reset'] = '\033[0m'
code['bold'] ='\033[01m'
code['dim'] = '\033[02m'
code['italics'] = '\033[03m'
code['underline'] = '\033[04m'
code['blink'] = '\033[05m'
code['reverse'] = '\033[07m'
code['strikethrough'] = '\033[09m'
code['invisible'] = '\033[08m'
#--foregrounds
code['fg.black'] = '\033[30m'
code['fg.red'] = '\033[31m'
code['fg.green'] = '\033[32m'
code['fg.yellow'] = '\033[33m'
code['fg.blue'] = '\033[34m'
code['fg.magenta'] = '\033[35m'
code['fg.cyan'] = '\033[36m'
code['fg.lightgrey'] = '\033[37m'
code['fg.darkgrey'] = '\033[90m'
code['fg.lightred'] = '\033[91m'
code['fg.lightgreen'] = '\033[92m'
code['fg.lightyellow'] = '\033[93m'
code['fg.lightblue'] = '\033[94m'
code['fg.lightmagenta'] = '\033[95m'
code['fg.lightcyan'] = '\033[96m'
code['fg.white'] = '\033[97m'
#--backgrounds
code['bg.black'] = '\033[40m'
code['bg.red'] = '\033[41m'
code['bg.green'] = '\033[42m'
code['bg.orange'] = '\033[43m'
code['bg.blue'] = '\033[44m'
code['bg.magenta'] = '\033[45m'
code['bg.cyan'] = '\033[46m'
code['bg.lightgrey'] = '\033[47m'
code['bg.darkgrey'] = '\033[100m'
code['bg.lightred'] = '\033[101m'
code['bg.lightgreen'] = '\033[102m'
code['bg.yellow'] = '\033[103m'
code['bg.lightblue'] = '\033[104m'
code['bg.lightmagenta'] = '\033[105m'
code['bg.lightcyan'] = '\033[106m'
code['bg.white'] = '\033[107m'
def colorize(string, colorList = None):
if colorList:
prefix = ''.join([colors.code[i.strip().lower()] for i in colorList.split(',')])
suffix = colors.code['reset']
return '{}{}{}'.format(prefix, string, suffix)
return string
# ==============================
class ColoredTable(prettytable.PrettyTable):
def __init__(self, field_names=None, **kwargs):
new_options = ['title_color', 'header_color', 'title_justify']
super(ColoredTable, self).__init__(field_names, **kwargs)
self._title_color = kwargs['title_color'] or None
self._header_color = kwargs['header_color'] or None
self._title_justify = kwargs['title_justify'] or 'c'
self._options.extend(new_options)
# hrule styles
self.FRAME = 0
self.ALL = 1
def _stringify_title(self, title, options):
lines = []
lpad, rpad = self._get_padding_widths(options)
if options["border"]:
if options["vrules"] == self.ALL:
options["vrules"] = self.FRAME
lines.append(self._stringify_hrule(options))
options["vrules"] = self.ALL
elif options["vrules"] == self.FRAME:
lines.append(self._stringify_hrule(options))
bits = []
endpoint = options["vertical_char"] if options["vrules"] in (self.ALL, self.FRAME) else " "
bits.append(endpoint)
title = " " * lpad + title + " " * rpad
if options['title_color']:
bits.append(colorize(self._justify(title, len(self._hrule) - 2, options['title_justify']), options['title_color']))
else:
bits.append(self._justify(title, len(self._hrule) - 2, options['title_justify']))
bits.append(endpoint)
lines.append("".join(bits))
return "\n".join(lines)
def _stringify_header(self, options):
bits = []
lpad, rpad = self._get_padding_widths(options)
if options["border"]:
if options["hrules"] in (self.ALL, self.FRAME):
bits.append(self._hrule)
bits.append("\n")
if options["vrules"] in (self.ALL, self.FRAME):
bits.append(options["vertical_char"])
else:
bits.append(" ")
# For tables with no data or field names
if not self._field_names:
if options["vrules"] in (self.ALL, self.FRAME):
bits.append(options["vertical_char"])
else:
bits.append(" ")
for field, width, in zip(self._field_names, self._widths):
if options["fields"] and field not in options["fields"]:
continue
if self._header_style == "cap":
fieldname = field.capitalize()
elif self._header_style == "title":
fieldname = field.title()
elif self._header_style == "upper":
fieldname = field.upper()
elif self._header_style == "lower":
fieldname = field.lower()
else:
fieldname = field
#if options['header_color']:
# fieldname = colorify(fieldname, options['header_color'])
if options['header_color']:
bits.append(colorize(" " * lpad
+ self._justify(fieldname, width, self._align[field])
+ " " * rpad, options['header_color']))
else:
bits.append(" " * lpad
+ self._justify(fieldname, width, self._align[field])
+ " " * rpad)
if options["border"]:
if options["vrules"] == self.ALL:
bits.append(options["vertical_char"])
else:
bits.append(" ")
# If vrules is FRAME, then we just appended a space at the end
# of the last field, when we really want a vertical character
if options["border"] and options["vrules"] == self.FRAME:
bits.pop()
bits.append(options["vertical_char"])
if options["border"] and options["hrules"] is not None:
bits.append("\n")
bits.append(self._hrule)
return "".join(bits)
# ==============================
class G2CmdShell(cmd.Cmd):
#Override function from cmd module to make command completion case insensitive
def completenames(self, text, *ignored):
dotext = 'do_'+text
return [a[3:] for a in self.get_names() if a.lower().startswith(dotext.lower())]
#Hide functions from available list of Commands. Seperate help sections for some
def get_names(self):
return [n for n in dir(self.__class__) if n not in self.__hidden_methods]
def __init__(self):
cmd.Cmd.__init__(self)
readline.set_completer_delims(' ')
self.intro = '\nType help or ? to list commands.\n'
self.prompt = prompt
#--store config dicts for fast lookup
self.cfgData = cfgData
self.dsrcLookup = {}
for cfgRecord in self.cfgData['G2_CONFIG']['CFG_DSRC']:
self.dsrcLookup[cfgRecord['DSRC_ID']] = cfgRecord
self.dsrcCodeLookup = {}
for cfgRecord in self.cfgData['G2_CONFIG']['CFG_DSRC']:
self.dsrcCodeLookup[cfgRecord['DSRC_CODE']] = cfgRecord
self.etypeLookup = {}
for cfgRecord in self.cfgData['G2_CONFIG']['CFG_ETYPE']:
self.etypeLookup[cfgRecord['ETYPE_ID']] = cfgRecord
self.erruleLookup = {}
for cfgRecord in self.cfgData['G2_CONFIG']['CFG_ERRULE']:
self.erruleLookup[cfgRecord['ERRULE_ID']] = cfgRecord
self.erruleCodeLookup = {}
for cfgRecord in self.cfgData['G2_CONFIG']['CFG_ERRULE']:
self.erruleCodeLookup[cfgRecord['ERRULE_CODE']] = cfgRecord
self.ftypeLookup = {}
for cfgRecord in self.cfgData['G2_CONFIG']['CFG_FTYPE']:
self.ftypeLookup[cfgRecord['FTYPE_ID']] = cfgRecord
self.ftypeCodeLookup = {}
for cfgRecord in self.cfgData['G2_CONFIG']['CFG_FTYPE']:
self.ftypeCodeLookup[cfgRecord['FTYPE_CODE']] = cfgRecord
self.cfuncLookup = {}
for cfgRecord in self.cfgData['G2_CONFIG']['CFG_CFUNC']:
self.cfuncLookup[cfgRecord['CFUNC_ID']] = cfgRecord
self.cfrtnLookup = {}
for cfgRecord in self.cfgData['G2_CONFIG']['CFG_CFRTN']:
self.cfrtnLookup[cfgRecord['CFUNC_ID']] = cfgRecord
self.scoredFtypeCodes = {}
for cfgRecord in self.cfgData['G2_CONFIG']['CFG_CFCALL']:
cfgRecord['FTYPE_CODE'] = self.ftypeLookup[cfgRecord['FTYPE_ID']]['FTYPE_CODE']
cfgRecord['CFUNC_CODE'] = self.cfuncLookup[cfgRecord['CFUNC_ID']]['CFUNC_CODE']
self.scoredFtypeCodes[cfgRecord['FTYPE_CODE']] = cfgRecord
self.ambiguousFtypeID = self.ftypeCodeLookup['AMBIGUOUS_ENTITY']['FTYPE_ID']
#--set feature display sequence
self.featureSequence = {}
self.featureSequence[self.ambiguousFtypeID] = 1 #--ambiguous is first
featureSequence = 2
#--scored features second
for cfgRecord in sorted(self.cfgData['G2_CONFIG']['CFG_CFCALL'], key=lambda k: k['FTYPE_ID']):
if cfgRecord['FTYPE_ID'] not in self.featureSequence:
self.featureSequence[cfgRecord['FTYPE_ID']] = featureSequence
featureSequence += 1
#--then the rest
for cfgRecord in sorted(self.cfgData['G2_CONFIG']['CFG_FTYPE'], key=lambda k: k['FTYPE_ID']):
if cfgRecord['FTYPE_ID'] not in self.featureSequence:
self.featureSequence[cfgRecord['FTYPE_ID']] = featureSequence
featureSequence += 1
#--misc
self.sqlCommitSize = 1000
self.__hidden_methods = ('do_shell')
self.doDebug = False
self.searchMatchLevels = {1: 'Match', 2: 'Possible Match', 3: 'Possibly Related', 4: 'Name Only'}
self.relatedMatchLevels = {1: 'Ambiguous Match', 2: 'Possible Match', 3: 'Possibly Related', 4: 'Name Only', 11: 'Disclosed Relation'}
self.validMatchLevelParameters = {}
self.validMatchLevelParameters['0'] = 'SINGLE_SAMPLE'
self.validMatchLevelParameters['1'] = 'DUPLICATE_SAMPLE'
self.validMatchLevelParameters['2'] = 'AMBIGUOUS_MATCH_SAMPLE'
self.validMatchLevelParameters['3'] = 'POSSIBLE_MATCH_SAMPLE'
self.validMatchLevelParameters['4'] = 'POSSIBLY_RELATED_SAMPLE'
self.validMatchLevelParameters['SINGLE'] = 'SINGLE_SAMPLE'
self.validMatchLevelParameters['DUPLICATE'] = 'DUPLICATE_SAMPLE'
self.validMatchLevelParameters['AMBIGUOUS'] = 'AMBIGUOUS_MATCH_SAMPLE'
self.validMatchLevelParameters['POSSIBLE'] = 'POSSIBLE_MATCH_SAMPLE'
self.validMatchLevelParameters['POSSIBLY'] = 'POSSIBLY_RELATED_SAMPLE'
self.validMatchLevelParameters['RELATED'] = 'POSSIBLY_RELATED_SAMPLE'
self.validMatchLevelParameters['S'] = 'SINGLE_SAMPLE'
self.validMatchLevelParameters['D'] = 'DUPLICATE_SAMPLE'
self.validMatchLevelParameters['A'] = 'AMBIGUOUS_MATCH_SAMPLE'
self.validMatchLevelParameters['P'] = 'POSSIBLE_MATCH_SAMPLE'
self.validMatchLevelParameters['R'] = 'POSSIBLY_RELATED_SAMPLE'
self.lastSearchResult = []
self.usePrettyTable = True
self.currentReviewList = None
#--get settings
settingsFileName = '.' + os.path.basename(sys.argv[0].lower().replace('.py','')) + '_settings'
self.settingsFileName = os.path.join(os.path.expanduser("~"), settingsFileName)
try: self.settingsFileData = json.load(open(self.settingsFileName))
except: self.settingsFileData = {}
#--set the color scheme
self.colors = {}
if not ('colorScheme' in self.settingsFileData and self.settingsFileData['colorScheme'].upper() in ('DARK', 'LIGHT')):
self.settingsFileData['colorScheme'] = 'dark'
self.do_setColorScheme(self.settingsFileData['colorScheme'])
#--default last snapshot/audit file from parameters
if args.snapshot_file_name:
self.settingsFileData['snapshotFile'] = args.snapshot_file_name
if args.audit_file_name:
self.settingsFileData['auditFile'] = args.audit_file_name
#--load prior snapshot file
if 'snapshotFile' in self.settingsFileData and os.path.exists(self.settingsFileData['snapshotFile']):
self.do_load(self.settingsFileData['snapshotFile'])
else:
self.snapshotFile = None
self.snapshotData = {}
#--load prior audit file
if 'auditFile' in self.settingsFileData and os.path.exists(self.settingsFileData['auditFile']):
self.do_load(self.settingsFileData['auditFile'])
else:
self.auditFile = None
self.auditData = {}
#--history
self.readlineAvail = True if 'readline' in sys.modules else False
self.histDisable = hist_disable
self.histCheck()
# -----------------------------
def do_quit(self, arg):
return True
# -----------------------------
def emptyline(self):
return
# -----------------------------
def cmdloop(self):
while True:
try:
cmd.Cmd.cmdloop(self)
break
except KeyboardInterrupt:
ans = input('\n\nAre you sure you want to exit? ')
if ans in ['y','Y', 'yes', 'YES']:
break
except TypeError as ex:
printWithNewLines("ERROR: " + str(ex))
type_, value_, traceback_ = sys.exc_info()
for item in traceback.format_tb(traceback_):
printWithNewLines(item)
def postloop(self):
try:
with open(self.settingsFileName, 'w') as f:
json.dump(self.settingsFileData, f)
except: pass
#Hide do_shell from list of APIs. Seperate help section for it
def get_names(self):
return [n for n in dir(self.__class__) if n not in self.__hidden_methods]
def help_KnowledgeCenter(self):
printWithNewLines('Senzing Knowledge Center: https://senzing.zendesk.com/hc/en-us', 'B')
def help_Support(self):
printWithNewLines('Senzing Support Request: https://senzing.zendesk.com/hc/en-us/requests/new', 'B')
def help_Arguments(self):
print(
'\nWhere you see <value> in the help output replace <value> with your value.\n' \
'\nFor example the help for addAttribute is: \n' \
'\taddAttribute {"attribute": "<attribute_name>"}\n' \
'\nReplace <attribute_name> to be the name of your new attribute\n' \
'\taddAttribute {"attribute": "myNewAttribute"}\n' \
)
def help_Shell(self):
printWithNewLines('Run OS shell commands: ! <command>', 'B')
def help_History(self):
printWithNewLines(textwrap.dedent(f'''\
- Use shell like history, requires Python readline module.
- Tries to create a history file in the users home directory for use across instances of G2ConfigTool.
- If a history file can't be created in the users home, /tmp is tried for temporary session history.
- Ctrl-r can be used to search history when history is available
- Commands to manage history
- histClear = Clears the current working session history and the history file. This deletes all history, be careful!
- histDedupe = The history can accumulate duplicate entries over time, use this to remove them
- histShow = Display all history
- History Status:
| |
in idxs})
return nhyd_dct
# # # charge layers
def charge(chi):
""" Determine charge from the ChI string
:param chi: ChI string
:type chi: str
:rtype: int
"""
char_lyr_dct = charge_layers(chi)
char = int(char_lyr_dct['q'])
return char
# # # stereo layers
def bond_stereo_parities(chi, one_indexed=False):
""" Parse the bond stereo parities from the stereochemistry layers.
:param chi: ChI string
:type chi: str
:param one_indexed: Return indices in one-indexing?
:type one_indexed: bool
:returns: A dictionary mapping bond keys onto parities
:rtype: dict[frozenset[int]: bool]
"""
ste_lyr_dct = stereo_layers(chi)
bnd_ste_dct = _bond_stereo_parities(ste_lyr_dct, one_indexed=one_indexed)
return bnd_ste_dct
def atom_stereo_parities(chi, one_indexed=False):
""" Parse the atom stereo parities from the stereochemistry layers.
:param chi: ChI string
:type chi: str
:param one_indexed: Return indices in one-indexing?
:type one_indexed: bool
:returns: A dictionary mapping atom keys onto parities
:rtype: dict[int: bool]
"""
ste_lyr_dct = stereo_layers(chi)
atm_ste_dct = _atom_stereo_parities(ste_lyr_dct, one_indexed=one_indexed)
return atm_ste_dct
def is_inverted_enantiomer(chi):
""" Determine enantiomer inversion from the stereo layers.
:param chi: ChI string
:type chi: str
:param iso: Include isotope stereochemistry?
:type iso: bool
:returns: whether or not the ChI is inverted; returns None if not an
enantiomer
:rtype: bool
"""
ste_lyr_dct = stereo_layers(chi)
is_inv = _is_inverted_enantiomer(ste_lyr_dct)
return is_inv
# # # isotope layers
def bond_isotope_stereo_parities(chi, one_indexed=False):
""" Parse the bond stereo parities from the isotope layers.
:param chi: ChI string
:type chi: str
:param one_indexed: Return indices in one-indexing?
:type one_indexed: bool
:returns: A dictionary mapping bond keys onto parities
:rtype: dict[frozenset[int]: bool]
"""
iso_lyr_dct = isotope_layers(chi)
bnd_ste_dct = _bond_stereo_parities(iso_lyr_dct, one_indexed=one_indexed)
return bnd_ste_dct
def atom_isotope_stereo_parities(chi, one_indexed=False):
""" Parse the atom stereo parities from the isotope layers.
:param chi: ChI string
:type chi: str
:param one_indexed: Return indices in one-indexing?
:type one_indexed: bool
:returns: A dictionary mapping atom keys onto parities
:rtype: dict[int: bool]
"""
iso_lyr_dct = isotope_layers(chi)
atm_ste_dct = _atom_stereo_parities(iso_lyr_dct, one_indexed=one_indexed)
return atm_ste_dct
def is_inverted_isotope_enantiomer(chi):
""" Determine enantiomer inversion from the isotope layers.
:param chi: ChI string
:type chi: str
:returns: whether or not the ChI is inverted; returns None if not an
enantiomer
:rtype: bool
"""
iso_lyr_dct = isotope_layers(chi)
is_inv = _is_inverted_enantiomer(iso_lyr_dct)
return is_inv
# # other properties
def has_stereo(chi):
""" Determine if the ChI string has stereochemistry information.
:param chi: ChI string
:type chi: str
:rtype: bool
"""
ste_dct = stereo_layers(chi)
iso_dct = isotope_layers(chi)
return bool(ste_dct or
any(pfx in iso_dct for pfx in STE_PFXS))
def has_multiple_components(chi):
""" Determine if the ChI string has multiple components.
:param chi: ChI string
:type chi: str
:rtype: bool
"""
return len(split(chi)) > 1
def low_spin_multiplicity(chi):
""" Guess spin multiplicity based on the number of electrons.
:param chi: ChI string
:type chi: str
:rtype: int
"""
fml = formula(chi)
nelec = automol.formula.electron_count(fml) - charge(chi)
if (nelec % 2) == 0:
mult = 1
else:
mult = 2
return mult
# # split/join
def split(chi):
""" Split a multi-component ChI into ChIs for each of its components.
:param chi: ChI string
:type chi: str
:returns: the split ChI strings
:rtype: tuple[str]
"""
fml_str = formula_string(chi)
main_dct = main_layers(chi)
char_dct = charge_layers(chi)
ste_dct = stereo_layers(chi)
iso_dct = isotope_layers(chi)
fml_strs = _split_layer_string(
fml_str, count_sep_ptt='', sep_ptt=app.escape('.'))
count = len(fml_strs)
main_dcts = _split_layers(main_dct, count)
char_dcts = _split_layers(char_dct, count)
ste_dcts = _split_layers(ste_dct, count)
iso_dcts = _split_layers(iso_dct, count)
chis = tuple(from_data(fml_str=fml_str,
main_lyr_dct=main_dct,
char_lyr_dct=char_dct,
ste_lyr_dct=ste_dct,
iso_lyr_dct=iso_dct)
for fml_str, main_dct, char_dct, ste_dct, iso_dct
in zip(fml_strs, main_dcts, char_dcts, ste_dcts, iso_dcts))
return chis
def join(chis):
""" Join separate ChI strings into one multi-component ChI string.
:param chis: sequence of ChI strings
:type chis: tuple[str]
:returns: the joined ChI string
:rtype: str
"""
# first, make sure they are completely split up
chis = list(itertools.chain(*map(split, chis)))
fml_strs = list(map(formula_string, chis))
fml_str = _join_layer_strings(fml_strs, count_sep='', sep='.')
main_dct = _join_layers(list(map(main_layers, chis)))
char_dct = _join_layers(list(map(charge_layers, chis)))
ste_dct = _join_layers(list(map(stereo_layers, chis)))
iso_dct = _join_layers(list(map(isotope_layers, chis)))
return from_data(fml_str=fml_str,
main_lyr_dct=main_dct,
char_lyr_dct=char_dct,
ste_lyr_dct=ste_dct,
iso_lyr_dct=iso_dct)
# # common multilayer properties
def _bond_stereo_parities(lyr_dct, one_indexed=False):
""" Parse bond stereo parities from a given layer dictionary
"""
if 'b' not in lyr_dct:
bnd_ste_dct = {}
else:
lyr = lyr_dct['b']
# Set up the parser
integer = pp.Word(pp.nums)
bond = integer + pp.Suppress('-') + integer
parity = pp.Or(['+', '-'])
term = pp.Group(pp.Group(bond) + parity)
parser = pp.Opt(pp.delimitedList(term, delim=','))
# Do the parsing
lst = ap_cast(parser.parseString(lyr).asList())
# Interpret the list
shift = 0 if one_indexed else -1
bnd_ste_dct = {frozenset({k1+shift, k2+shift}): (p == '+')
for (k1, k2), p in lst}
return bnd_ste_dct
def _atom_stereo_parities(lyr_dct, one_indexed=False):
""" Parse atom stereo parities from a given layer dictionary
"""
if 't' not in lyr_dct:
atm_ste_dct = {}
else:
lyr = lyr_dct['t']
# Set up the parser
integer = pp.Word(pp.nums)
parity = pp.Or(['+', '-'])
term = pp.Group(integer + parity)
parser = pp.Opt(pp.delimitedList(term, delim=','))
# Do the parsing
lst = ap_cast(parser.parseString(lyr).asList())
# Interpret the list
shift = 0 if one_indexed else -1
atm_ste_dct = {k+shift: (p == '+') for k, p in lst}
return atm_ste_dct
def _is_inverted_enantiomer(lyr_dct):
""" Determine enantiomer inversion from a given layer dictionary.
"""
is_inv = None
if 'm' in lyr_dct:
if lyr_dct['m'] == '1':
is_inv = True
else:
assert lyr_dct['m'] == '0'
is_inv = False
return is_inv
# # parsing helpers
def _layers(lyrs_str):
""" Parse the layers of the specified layer of a ChI string to a
dictionary, keyed by prefix.
:param lyrs_str: a string containing one or more ChI layers
:type lyrs_str: str
:returns: a dictionary of layers, keyed by layer prefix
:rtype: dict[str: str]
"""
if lyrs_str:
ptt = _layer_pattern(key_ptt=app.capturing(app.LOWERCASE_LETTER),
val_ptt=app.capturing(NONSLASHES))
dct = dict(apf.all_captures(ptt, lyrs_str))
else:
dct = {}
return dct
# # parsing patterns
def _version_pattern():
""" Build the autoparse regex pattern for the ChI string version.
:rtype: str
"""
ptt = app.preceded_by('=') + _layer_pattern()
return ptt
def _formula_pattern():
""" Build the autoparse regex pattern for the chemical formual layer.
:rtype: str
"""
ptt = _layer_pattern(key_ptt=app.not_followed_by(app.LOWERCASE_LETTER))
return ptt
def _main_layers_pattern():
""" Build the autoparse regex pattern for the connectivity layer.
:rtype: str
"""
c_lyr_ptt = _layer_pattern(key_ptt='c')
h_lyr_ptt = _layer_pattern(key_ptt='h')
ptt = (app.one_of_these([c_lyr_ptt, h_lyr_ptt]) +
app.maybe(SLASH + h_lyr_ptt))
return ptt
def _charge_layers_pattern():
""" Build the autoparse regex pattern for the charge layer.
:rtype: str
"""
q_lyr_ptt = _layer_pattern(key_ptt='q')
p_lyr_ptt = _layer_pattern(key_ptt='p')
ptt = (app.one_of_these([q_lyr_ptt, p_lyr_ptt]) +
app.maybe(SLASH + p_lyr_ptt))
return ptt
def _stereo_layers_pattern():
""" Build the autoparse regex pattern for the stereochemistry layer.
:rtype: str
"""
b_lyr_ptt = _layer_pattern(key_ptt='b')
t_lyr_ptt = _layer_pattern(key_ptt='t')
m_lyr_ptt = _layer_pattern(key_ptt='m')
s_lyr_ptt = _layer_pattern(key_ptt='s')
ptt = (app.one_of_these([b_lyr_ptt, t_lyr_ptt]) +
app.maybe(SLASH + t_lyr_ptt) +
app.maybe(SLASH + m_lyr_ptt) +
app.maybe(SLASH + s_lyr_ptt))
return ptt
def _isotope_layers_pattern():
""" Build the autoparse regex pattern for the isotope layer.
:rtype: str
"""
i_lyr_ptt = _layer_pattern(key_ptt='i')
h_lyr_ptt = _layer_pattern(key_ptt='h')
ptt = (i_lyr_ptt +
app.maybe(SLASH + h_lyr_ptt) +
app.maybe(SLASH + _stereo_layers_pattern()))
return ptt
def _layer_pattern(key_ptt='', val_ptt=NONSLASHES):
""" Build the autoparse regex pattern for an arbitrary ChI layer.
:rtype: str
"""
return key_ptt + val_ptt
# # split/join helpers
def _join_layers(dcts):
""" Join all of the components of a ChI layer.
:param dcts: layer components, grouped by prefix
:type dct: dict[str: str]
:rtype: dict[str: str]
"""
pfxs = sorted(functools.reduce(set.union, map(set, dcts)))
if 's' in pfxs:
pfxs.remove('s')
dcts = [dict_.by_key(dct, pfxs, fill_val='') for dct in dcts]
lyrs_lst = [[dct[pfx] for dct in dcts] for pfx in pfxs]
dct = {pfx: (_join_layer_strings(lyrs) if pfx != 'm' else
_join_m_layer_strings(lyrs))
for pfx, lyrs in zip(pfxs, lyrs_lst)}
return dct
def _join_m_layer_strings(m_lyrs):
m_lyrs = [m_lyr if m_lyr else '.' for m_lyr in m_lyrs]
return ''.join(m_lyrs)
def _join_layer_strings(lyrs, count_sep='*', sep=';'):
""" Join layer strings into one multi-component layer string.
:param lyrs: layers to join
:type lyrs: tuple(str)?
:param count_sep: delimiter for ???
:type count_sep: str
:param sep: delimiter for ???
:type sep: str
"""
def _s(count, lyr):
if count > 1 and lyr:
ret = ('{:d}' + count_sep + '{:s}').format(count, lyr)
elif lyr:
ret = lyr
else:
ret = sep * (count - 1)
return ret
counts, lyrs = zip(*[
(len(list(g)), lyr) for lyr, g in itertools.groupby(lyrs)])
lyr = sep.join([_s(count, lyr) for count, lyr in zip(counts, lyrs)])
return lyr
def _split_layers(dct, count):
""" split a multi-component layer dictionary into separate ones
"""
if dct:
pfxs = sorted(dct.keys())
if 's' in pfxs:
pfxs.remove('s')
lyrs_lst = [
_split_layer_string(dct[pfx]) if pfx != 'm'
else | |
state", b'PROP AUTO FEATHER ARMED:index', b'Bool', 'N'],
"PROP_FEATHER_SWITCH:index": ["Prop feather switch", b'PROP FEATHER SWITCH:index', b'Bool', 'N'],
"PANEL_AUTO_FEATHER_SWITCH:index": ["Auto-feather arming switch", b'PANEL AUTO FEATHER SWITCH:index', b'Bool', 'N'],
"PROP_SYNC_ACTIVE:index": ["True if prop sync is active", b'PROP SYNC ACTIVE:index', b'Bool', 'N'],
"PROP_DEICE_SWITCH:index": ["True if prop deice switch on", b'PROP DEICE SWITCH:index', b'Bool', 'N'],
"ENG_COMBUSTION": ["True if the engine is running", b'ENG COMBUSTION', b'Bool', 'N'],
"ENG_N1_RPM:index": ["Engine N1 rpm", b'ENG N1 RPM:index', b'Rpm (0 to 16384 = 0 to 100%)', 'N'],
"ENG_N2_RPM:index": ["Engine N2 rpm", b'ENG N2 RPM:index', b'Rpm(0 to 16384 = 0 to 100%)', 'N'],
"ENG_FUEL_FLOW_GPH:index": ["Engine fuel flow", b'ENG FUEL FLOW GPH:index', b'Gallons per hour', 'N'],
"ENG_FUEL_FLOW_PPH:index": ["Engine fuel flow", b'ENG FUEL FLOW PPH:index', b'Pounds per hour', 'N'],
"ENG_TORQUE:index": ["Torque", b'ENG TORQUE:index', b'Foot pounds', 'N'],
"ENG_ANTI_ICE:index": ["Anti-ice switch", b'ENG ANTI ICE:index', b'Bool', 'N'],
"ENG_PRESSURE_RATIO:index": ["Engine pressure ratio", b'ENG PRESSURE RATIO:index', b'Ratio (0-16384)', 'N'],
"ENG_EXHAUST_GAS_TEMPERATURE:index": ["Exhaust gas temperature", b'ENG EXHAUST GAS TEMPERATURE:index', b'Rankine', 'N'],
"ENG_EXHAUST_GAS_TEMPERATURE_GES:index": ["Governed engine setting", b'ENG EXHAUST GAS TEMPERATURE GES:index', b'Percent over 100', 'N'],
"ENG_CYLINDER_HEAD_TEMPERATURE:index": ["Engine cylinder head temperature", b'ENG CYLINDER HEAD TEMPERATURE:index', b'Rankine', 'N'],
"ENG_OIL_TEMPERATURE:index": ["Engine oil temperature", b'ENG OIL TEMPERATURE:index', b'Rankine', 'N'],
"ENG_OIL_PRESSURE:index": ["Engine oil pressure", b'ENG OIL PRESSURE:index', b'foot pounds', 'N'],
"ENG_OIL_QUANTITY:index": ["Engine oil quantitiy as a percentage of full capacity", b'ENG OIL QUANTITY:index', b'Percent over 100', 'N'],
"ENG_HYDRAULIC_PRESSURE:index": ["Engine hydraulic pressure", b'ENG HYDRAULIC PRESSURE:index', b'foot pounds', 'N'],
"ENG_HYDRAULIC_QUANTITY:index": ["Engine hydraulic fluid quantity, as a percentage of total capacity", b'ENG HYDRAULIC QUANTITY:index', b'Percent over 100', 'N'],
"ENG_MANIFOLD_PRESSURE:index": ["Engine manifold pressure.", b'ENG MANIFOLD PRESSURE:index', b'inHG.', 'N'],
"ENG_VIBRATION:index": ["Engine vibration", b'ENG VIBRATION:index', b'Number', 'N'],
"ENG_RPM_SCALER:index": ["Obsolete", b'ENG RPM SCALER:index', b'Scalar', 'N'],
"ENG_MAX_RPM": ["Maximum rpm", b'ENG MAX RPM', b'Rpm', 'N'],
"GENERAL_ENG_STARTER_ACTIVE": ["True if engine starter is active", b'GENERAL ENG STARTER ACTIVE', b'Bool', 'N'],
"GENERAL_ENG_FUEL_USED_SINCE_START": ["Fuel used since the engines were last started", b'GENERAL ENG FUEL USED SINCE START', b'Pounds', 'N'],
"TURB_ENG_PRIMARY_NOZZLE_PERCENT:index": ["Percent thrust of primary nozzle", b'TURB ENG PRIMARY NOZZLE PERCENT:index', b'Percent over 100', 'N'],
"TURB_ENG_IGNITION_SWITCH": ["True if the turbine engine ignition switch is on", b'TURB ENG IGNITION SWITCH', b'Bool', 'N'],
"TURB_ENG_MASTER_STARTER_SWITCH": ["True if the turbine engine master starter switch is on", b'TURB ENG MASTER STARTER SWITCH', b'Bool', 'N'],
"TURB_ENG_AFTERBURNER_STAGE_ACTIVE": ["The stage of the afterburner, or 0 if the afterburner is not active.", b'TURB ENG AFTERBURNER STAGE ACTIVE', b'Number', 'N'],
}
class __FuelTankSelection(RequestHelper):
list = {
}
class __AircraftFuelData(RequestHelper):
list = {
"FUEL_TANK_CENTER_LEVEL": ["Percent of maximum capacity", b'FUEL TANK CENTER LEVEL', b'Percent Over 100', 'Y'],
"FUEL_TANK_CENTER2_LEVEL": ["Percent of maximum capacity", b'FUEL TANK CENTER2 LEVEL', b'Percent Over 100', 'Y'],
"FUEL_TANK_CENTER3_LEVEL": ["Percent of maximum capacity", b'FUEL TANK CENTER3 LEVEL', b'Percent Over 100', 'Y'],
"FUEL_TANK_LEFT_MAIN_LEVEL": ["Percent of maximum capacity", b'FUEL TANK LEFT MAIN LEVEL', b'Percent Over 100', 'Y'],
"FUEL_TANK_LEFT_AUX_LEVEL": ["Percent of maximum capacity", b'FUEL TANK LEFT AUX LEVEL', b'Percent Over 100', 'Y'],
"FUEL_TANK_LEFT_TIP_LEVEL": ["Percent of maximum capacity", b'FUEL TANK LEFT TIP LEVEL', b'Percent Over 100', 'Y'],
"FUEL_TANK_RIGHT_MAIN_LEVEL": ["Percent of maximum capacity", b'FUEL TANK RIGHT MAIN LEVEL', b'Percent Over 100', 'Y'],
"FUEL_TANK_RIGHT_AUX_LEVEL": ["Percent of maximum capacity", b'FUEL TANK RIGHT AUX LEVEL', b'Percent Over 100', 'Y'],
"FUEL_TANK_RIGHT_TIP_LEVEL": ["Percent of maximum capacity", b'FUEL TANK RIGHT TIP LEVEL', b'Percent Over 100', 'Y'],
"FUEL_TANK_EXTERNAL1_LEVEL": ["Percent of maximum capacity", b'FUEL TANK EXTERNAL1 LEVEL', b'Percent Over 100', 'Y'],
"FUEL_TANK_EXTERNAL2_LEVEL": ["Percent of maximum capacity", b'FUEL TANK EXTERNAL2 LEVEL', b'Percent Over 100', 'Y'],
"FUEL_TANK_CENTER_CAPACITY": ["Maximum capacity in volume", b'FUEL TANK CENTER CAPACITY', b'Gallons', 'N'],
"FUEL_TANK_CENTER2_CAPACITY": ["Maximum capacity in volume", b'FUEL TANK CENTER2 CAPACITY', b'Gallons', 'N'],
"FUEL_TANK_CENTER3_CAPACITY": ["Maximum capacity in volume", b'FUEL TANK CENTER3 CAPACITY', b'Gallons', 'N'],
"FUEL_TANK_LEFT_MAIN_CAPACITY": ["Maximum capacity in volume", b'FUEL TANK LEFT MAIN CAPACITY', b'Gallons', 'N'],
"FUEL_TANK_LEFT_AUX_CAPACITY": ["Maximum capacity in volume", b'FUEL TANK LEFT AUX CAPACITY', b'Gallons', 'N'],
"FUEL_TANK_LEFT_TIP_CAPACITY": ["Maximum capacity in volume", b'FUEL TANK LEFT TIP CAPACITY', b'Gallons', 'N'],
"FUEL_TANK_RIGHT_MAIN_CAPACITY": ["Maximum capacity in volume", b'FUEL TANK RIGHT MAIN CAPACITY', b'Gallons', 'N'],
"FUEL_TANK_RIGHT_AUX_CAPACITY": ["Maximum capacity in volume", b'FUEL TANK RIGHT AUX CAPACITY', b'Gallons', 'N'],
"FUEL_TANK_RIGHT_TIP_CAPACITY": ["Maximum capacity in volume", b'FUEL TANK RIGHT TIP CAPACITY', b'Gallons', 'N'],
"FUEL_TANK_EXTERNAL1_CAPACITY": ["Maximum capacity in volume", b'FUEL TANK EXTERNAL1 CAPACITY', b'Gallons', 'N'],
"FUEL_TANK_EXTERNAL2_CAPACITY": ["Maximum capacity in volume", b'FUEL TANK EXTERNAL2 CAPACITY', b'Gallons', 'N'],
"FUEL_LEFT_CAPACITY": ["Maximum capacity in volume", b'FUEL LEFT CAPACITY', b'Gallons', 'N'],
"FUEL_RIGHT_CAPACITY": ["Maximum capacity in volume", b'FUEL RIGHT CAPACITY', b'Gallons', 'N'],
"FUEL_TANK_CENTER_QUANTITY": ["Current quantity in volume", b'FUEL TANK CENTER QUANTITY', b'Gallons', 'Y'],
"FUEL_TANK_CENTER2_QUANTITY": ["Current quantity in volume", b'FUEL TANK CENTER2 QUANTITY', b'Gallons', 'Y'],
"FUEL_TANK_CENTER3_QUANTITY": ["Current quantity in volume", b'FUEL TANK CENTER3 QUANTITY', b'Gallons', 'Y'],
"FUEL_TANK_LEFT_MAIN_QUANTITY": ["Current quantity in volume", b'FUEL TANK LEFT MAIN QUANTITY', b'Gallons', 'Y'],
"FUEL_TANK_LEFT_AUX_QUANTITY": ["Current quantity in volume", b'FUEL TANK LEFT AUX QUANTITY', b'Gallons', 'Y'],
"FUEL_TANK_LEFT_TIP_QUANTITY": ["Current quantity in volume", b'FUEL TANK LEFT TIP QUANTITY', b'Gallons', 'Y'],
"FUEL_TANK_RIGHT_MAIN_QUANTITY": ["Current quantity in volume", b'FUEL TANK RIGHT MAIN QUANTITY', b'Gallons', 'Y'],
"FUEL_TANK_RIGHT_AUX_QUANTITY": ["Current quantity in volume", b'FUEL TANK RIGHT AUX QUANTITY', b'Gallons', 'Y'],
"FUEL_TANK_RIGHT_TIP_QUANTITY": ["Current quantity in volume", b'FUEL TANK RIGHT TIP QUANTITY', b'Gallons', 'Y'],
"FUEL_TANK_EXTERNAL1_QUANTITY": ["Current quantity in volume", b'FUEL TANK EXTERNAL1 QUANTITY', b'Gallons', 'Y'],
"FUEL_TANK_EXTERNAL2_QUANTITY": ["Current quantity in volume", b'FUEL TANK EXTERNAL2 QUANTITY', b'Gallons', 'Y'],
"FUEL_LEFT_QUANTITY": ["Current quantity in volume", b'FUEL LEFT QUANTITY', b'Gallons', 'N'],
"FUEL_RIGHT_QUANTITY": ["Current quantity in volume", b'FUEL RIGHT QUANTITY', b'Gallons', 'N'],
"FUEL_TOTAL_QUANTITY": ["Current quantity in volume", b'FUEL TOTAL QUANTITY', b'Gallons', 'N'],
"FUEL_WEIGHT_PER_GALLON": ["Fuel weight per gallon", b'FUEL WEIGHT PER GALLON', b'Pounds', 'N'],
"FUEL_TANK_SELECTOR:index": ["Which tank is selected. See fuel tank list.", b'FUEL TANK SELECTOR:index', b'Enum', 'N'],
"FUEL_CROSS_FEED": ["Cross feed valve:; 0 = Closed; 1 = Open", b'FUEL CROSS FEED', b'Enum', 'N'],
"FUEL_TOTAL_CAPACITY": ["Total capacity of the aircraft", b'FUEL TOTAL CAPACITY', b'Gallons', 'N'],
"FUEL_SELECTED_QUANTITY_PERCENT": ["Percent or capacity for selected tank", b'FUEL SELECTED QUANTITY PERCENT', b'Percent Over 100', 'N'],
"FUEL_SELECTED_QUANTITY": ["Quantity of selected tank", b'FUEL SELECTED QUANTITY', b'Gallons', 'N'],
"FUEL_TOTAL_QUANTITY_WEIGHT": ["Current total fuel weight of the aircraft", b'FUEL TOTAL QUANTITY WEIGHT', b'Pounds', 'N'],
"NUM_FUEL_SELECTORS": ["Number of selectors on the aircraft", b'NUM FUEL SELECTORS', b'Number', 'N'],
"UNLIMITED_FUEL": ["Unlimited fuel flag", b'UNLIMITED FUEL', b'Bool', 'N'],
"ESTIMATED_FUEL_FLOW": ["Estimated fuel flow at cruise", b'ESTIMATED FUEL FLOW', b'Pounds per hour', 'N'],
}
class __AircraftLightsData(RequestHelper):
list = {
"LIGHT_STROBE": ["Light switch state", b'LIGHT STROBE', b'Bool', 'N'],
"LIGHT_PANEL": ["Light switch state", b'LIGHT PANEL', b'Bool', 'N'],
"LIGHT_LANDING": ["Light switch state", b'LIGHT LANDING', b'Bool', 'N'],
"LIGHT_TAXI": ["Light switch state", b'LIGHT TAXI', b'Bool', 'N'],
"LIGHT_BEACON": ["Light switch state", b'LIGHT BEACON', b'Bool', 'N'],
"LIGHT_NAV": ["Light switch state", b'LIGHT NAV', b'Bool', 'N'],
"LIGHT_LOGO": ["Light switch state", b'LIGHT LOGO', b'Bool', 'N'],
"LIGHT_WING": ["Light switch state", b'LIGHT WING', b'Bool', 'N'],
"LIGHT_RECOGNITION": ["Light switch state", b'LIGHT RECOGNITION', b'Bool', 'N'],
"LIGHT_CABIN": ["Light switch state", b'LIGHT CABIN', b'Bool', 'N'],
"LIGHT_ON_STATES": ["Bit mask:; 0x0001: Nav; 0x0002: Beacon; 0x0004: Landing; 0x0008: Taxi; 0x0010: Strobe; 0x0020: Panel; 0x0040: Recognition; 0x0080: Wing; 0x0100: Logo; 0x0200: Cabin", b'LIGHT ON STATES', b'Mask', 'N'],
"LIGHT_STATES": ["Same as LIGHT ON STATES", b'LIGHT STATES', b'Mask', 'N'],
# "LANDING_LIGHT_PBH": ["Landing light pitch bank and heading", b'LANDING LIGHT PBH', b'SIMCONNECT_DATA_XYZ', 'N'],
"LIGHT_TAXI_ON": ["Return true if the light is on.", b'LIGHT TAXI ON', b'Bool', 'N'],
"LIGHT_STROBE_ON": ["Return true if the light is on.", b'LIGHT STROBE ON', b'Bool', 'N'],
"LIGHT_PANEL_ON": ["Return true if the light is on.", b'LIGHT PANEL ON', b'Bool', 'N'],
"LIGHT_RECOGNITION_ON": ["Return true if the light is on.", b'LIGHT RECOGNITION ON', b'Bool', 'N'],
"LIGHT_WING_ON": ["Return true if the light is on.", b'LIGHT WING ON', b'Bool', 'N'],
"LIGHT_LOGO_ON": ["Return true if the light is on.", b'LIGHT LOGO ON', b'Bool', 'N'],
"LIGHT_CABIN_ON": ["Return true if the light is on.", b'LIGHT CABIN ON', b'Bool', 'N'],
"LIGHT_HEAD_ON": ["Return true if the light is on.", b'LIGHT HEAD ON', b'Bool', 'N'],
"LIGHT_BRAKE_ON": ["Return true if the light is on.", b'LIGHT BRAKE ON', b'Bool', 'N'],
"LIGHT_NAV_ON": ["Return true if the light is on.", b'LIGHT NAV ON', b'Bool', 'N'],
"LIGHT_BEACON_ON": ["Return true if the light is on.", b'LIGHT BEACON ON', b'Bool', 'N'],
"LIGHT_LANDING_ON": ["Return true if the light is on.", b'LIGHT LANDING ON', b'Bool', 'N'],
}
class __AircraftPositionandSpeedData(RequestHelper):
list = {
"GROUND_VELOCITY": ["Speed relative to the earths surface", b'GROUND VELOCITY', b'Knots', 'N'],
"TOTAL_WORLD_VELOCITY": ["Speed relative to the earths center", b'TOTAL WORLD VELOCITY', b'Feet per second', 'N'],
"VELOCITY_BODY_Z": ["True longitudinal speed, relative to aircraft axis", b'VELOCITY BODY Z', b'Feet per second', 'Y'],
"VELOCITY_BODY_X": ["True lateral speed, relative to aircraft axis", b'VELOCITY BODY X', b'Feet per second', 'Y'],
"VELOCITY_BODY_Y": ["True vertical speed, relative to aircraft axis", b'VELOCITY BODY Y', b'Feet per second', 'Y'],
"VELOCITY_WORLD_Z": ["Speed relative to earth, in North/South direction", b'VELOCITY WORLD Z', b'Feet per second', 'Y'],
"VELOCITY_WORLD_X": ["Speed relative to earth, in East/West direction", b'VELOCITY WORLD X', b'Feet per second', 'Y'],
"VELOCITY_WORLD_Y": ["Speed relative to earth, in vertical direction", b'VELOCITY WORLD Y', b'Feet per second', 'Y'],
"ACCELERATION_WORLD_X": ["Acceleration relative to earth, in east/west direction", b'ACCELERATION WORLD X', b'Feet per second squared', 'Y'],
"ACCELERATION_WORLD_Y": ["Acceleration relative to earch, in vertical direction", b'ACCELERATION WORLD Y', b'Feet per second squared', 'Y'],
"ACCELERATION_WORLD_Z": ["Acceleration | |
3)
OP '.' (1, 3) (1, 4)
NAME 'async' (1, 4) (1, 9)
OP '+' (1, 10) (1, 11)
NUMBER '1' (1, 12) (1, 13)
>>> dump_tokens("async def foo(): dalej")
ENCODING 'utf-8' (0, 0) (0, 0)
ASYNC 'async' (1, 0) (1, 5)
NAME 'def' (1, 6) (1, 9)
NAME 'foo' (1, 10) (1, 13)
OP '(' (1, 13) (1, 14)
OP ')' (1, 14) (1, 15)
OP ':' (1, 15) (1, 16)
NAME 'pass' (1, 17) (1, 21)
>>> dump_tokens('''async def foo():
... def foo(await):
... await = 1
... jeżeli 1:
... await
... async += 1
... ''')
ENCODING 'utf-8' (0, 0) (0, 0)
ASYNC 'async' (1, 0) (1, 5)
NAME 'def' (1, 6) (1, 9)
NAME 'foo' (1, 10) (1, 13)
OP '(' (1, 13) (1, 14)
OP ')' (1, 14) (1, 15)
OP ':' (1, 15) (1, 16)
NEWLINE '\\n' (1, 16) (1, 17)
INDENT ' ' (2, 0) (2, 2)
NAME 'def' (2, 2) (2, 5)
NAME 'foo' (2, 6) (2, 9)
OP '(' (2, 9) (2, 10)
AWAIT 'await' (2, 10) (2, 15)
OP ')' (2, 15) (2, 16)
OP ':' (2, 16) (2, 17)
NEWLINE '\\n' (2, 17) (2, 18)
INDENT ' ' (3, 0) (3, 4)
AWAIT 'await' (3, 4) (3, 9)
OP '=' (3, 10) (3, 11)
NUMBER '1' (3, 12) (3, 13)
NEWLINE '\\n' (3, 13) (3, 14)
DEDENT '' (4, 2) (4, 2)
NAME 'if' (4, 2) (4, 4)
NUMBER '1' (4, 5) (4, 6)
OP ':' (4, 6) (4, 7)
NEWLINE '\\n' (4, 7) (4, 8)
INDENT ' ' (5, 0) (5, 4)
AWAIT 'await' (5, 4) (5, 9)
NEWLINE '\\n' (5, 9) (5, 10)
DEDENT '' (6, 0) (6, 0)
DEDENT '' (6, 0) (6, 0)
NAME 'async' (6, 0) (6, 5)
OP '+=' (6, 6) (6, 8)
NUMBER '1' (6, 9) (6, 10)
NEWLINE '\\n' (6, 10) (6, 11)
>>> dump_tokens('''async def foo():
... async dla i w 1: dalej''')
ENCODING 'utf-8' (0, 0) (0, 0)
ASYNC 'async' (1, 0) (1, 5)
NAME 'def' (1, 6) (1, 9)
NAME 'foo' (1, 10) (1, 13)
OP '(' (1, 13) (1, 14)
OP ')' (1, 14) (1, 15)
OP ':' (1, 15) (1, 16)
NEWLINE '\\n' (1, 16) (1, 17)
INDENT ' ' (2, 0) (2, 2)
ASYNC 'async' (2, 2) (2, 7)
NAME 'for' (2, 8) (2, 11)
NAME 'i' (2, 12) (2, 13)
NAME 'in' (2, 14) (2, 16)
NUMBER '1' (2, 17) (2, 18)
OP ':' (2, 18) (2, 19)
NAME 'pass' (2, 20) (2, 24)
DEDENT '' (3, 0) (3, 0)
>>> dump_tokens('''async def foo(async): await''')
ENCODING 'utf-8' (0, 0) (0, 0)
ASYNC 'async' (1, 0) (1, 5)
NAME 'def' (1, 6) (1, 9)
NAME 'foo' (1, 10) (1, 13)
OP '(' (1, 13) (1, 14)
ASYNC 'async' (1, 14) (1, 19)
OP ')' (1, 19) (1, 20)
OP ':' (1, 20) (1, 21)
AWAIT 'await' (1, 22) (1, 27)
>>> dump_tokens('''def f():
...
... def baz(): dalej
... async def bar(): dalej
...
... await = 2''')
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'def' (1, 0) (1, 3)
NAME 'f' (1, 4) (1, 5)
OP '(' (1, 5) (1, 6)
OP ')' (1, 6) (1, 7)
OP ':' (1, 7) (1, 8)
NEWLINE '\\n' (1, 8) (1, 9)
NL '\\n' (2, 0) (2, 1)
INDENT ' ' (3, 0) (3, 2)
NAME 'def' (3, 2) (3, 5)
NAME 'baz' (3, 6) (3, 9)
OP '(' (3, 9) (3, 10)
OP ')' (3, 10) (3, 11)
OP ':' (3, 11) (3, 12)
NAME 'pass' (3, 13) (3, 17)
NEWLINE '\\n' (3, 17) (3, 18)
ASYNC 'async' (4, 2) (4, 7)
NAME 'def' (4, 8) (4, 11)
NAME 'bar' (4, 12) (4, 15)
OP '(' (4, 15) (4, 16)
OP ')' (4, 16) (4, 17)
OP ':' (4, 17) (4, 18)
NAME 'pass' (4, 19) (4, 23)
NEWLINE '\\n' (4, 23) (4, 24)
NL '\\n' (5, 0) (5, 1)
NAME 'await' (6, 2) (6, 7)
OP '=' (6, 8) (6, 9)
NUMBER '2' (6, 10) (6, 11)
DEDENT '' (7, 0) (7, 0)
>>> dump_tokens('''async def f():
...
... def baz(): dalej
... async def bar(): dalej
...
... await = 2''')
ENCODING 'utf-8' (0, 0) (0, 0)
ASYNC 'async' (1, 0) (1, 5)
NAME 'def' (1, 6) (1, 9)
NAME 'f' (1, 10) (1, 11)
OP '(' (1, 11) (1, 12)
OP ')' (1, 12) (1, 13)
OP ':' (1, 13) (1, 14)
NEWLINE '\\n' (1, 14) (1, 15)
NL '\\n' (2, 0) (2, 1)
INDENT ' ' (3, 0) (3, 2)
NAME 'def' (3, 2) (3, 5)
NAME 'baz' (3, 6) (3, 9)
OP '(' (3, 9) (3, 10)
OP ')' (3, 10) (3, 11)
OP ':' (3, 11) (3, 12)
NAME 'pass' (3, 13) (3, 17)
NEWLINE '\\n' (3, 17) (3, 18)
ASYNC 'async' (4, 2) (4, 7)
NAME 'def' (4, 8) (4, 11)
NAME 'bar' (4, 12) (4, 15)
OP '(' (4, 15) (4, 16)
OP ')' (4, 16) (4, 17)
OP ':' (4, 17) (4, 18)
NAME 'pass' (4, 19) (4, 23)
NEWLINE '\\n' (4, 23) (4, 24)
NL '\\n' (5, 0) (5, 1)
AWAIT 'await' (6, 2) (6, 7)
OP '=' (6, 8) (6, 9)
NUMBER '2' (6, 10) (6, 11)
DEDENT '' (7, 0) (7, 0)
"""
z test zaimportuj support
z tokenize zaimportuj (tokenize, _tokenize, untokenize, NUMBER, NAME, OP,
STRING, ENDMARKER, ENCODING, tok_name, detect_encoding,
open jako tokenize_open, Untokenizer)
z io zaimportuj BytesIO
z unittest zaimportuj TestCase, mock
zaimportuj os
zaimportuj token
def dump_tokens(s):
"""Print out the tokens w s w a table format.
The ENDMARKER jest omitted.
"""
f = BytesIO(s.encode('utf-8'))
dla type, token, start, end, line w tokenize(f.readline):
jeżeli type == ENDMARKER:
przerwij
type = tok_name[type]
print("%(type)-10.10s %(token)-13.13r %(start)s %(end)s" % locals())
def roundtrip(f):
"""
Test roundtrip dla `untokenize`. `f` jest an open file albo a string.
The source code w f jest tokenized to both 5- oraz 2-tuples.
Both sequences are converted back to source code via
tokenize.untokenize(), oraz the latter tokenized again to 2-tuples.
The test fails jeżeli the 3 pair tokenizations do nie match.
When untokenize bugs are fixed, untokenize przy 5-tuples should
reproduce code that does nie contain a backslash continuation
following spaces. A proper test should test this.
This function would be more useful dla correcting bugs jeżeli it reported
the first point of failure, like assertEqual, rather than just
returning Nieprawda -- albo jeżeli it were only used w unittests oraz nie
doctest oraz actually used assertEqual.
"""
# Get source code oraz original tokenizations
jeżeli isinstance(f, str):
code = f.encode('utf-8')
inaczej:
code = f.read()
f.close()
readline = iter(code.splitlines(keepends=Prawda)).__next__
tokens5 = list(tokenize(readline))
tokens2 = [tok[:2] dla tok w tokens5]
# Reproduce tokens2 z pairs
bytes_from2 = untokenize(tokens2)
readline2 = iter(bytes_from2.splitlines(keepends=Prawda)).__next__
tokens2_from2 = [tok[:2] dla tok w tokenize(readline2)]
# Reproduce tokens2 z 5-tuples
bytes_from5 = untokenize(tokens5)
readline5 = iter(bytes_from5.splitlines(keepends=Prawda)).__next__
tokens2_from5 = [tok[:2] dla tok w tokenize(readline5)]
# Compare 3 versions
zwróć tokens2 == tokens2_from2 == tokens2_from5
# This jest an example z the docs, set up jako a doctest.
def decistmt(s):
"""Substitute Decimals dla floats w a string of statements.
>>> z decimal zaimportuj Decimal
>>> s = 'print(+21.3e-5*-.1234/81.7)'
>>> decistmt(s)
"print (+Decimal ('21.3e-5')*-Decimal ('.1234')/Decimal ('81.7'))"
The format of the exponent jest inherited z the platform C library.
Known cases are "e-007" (Windows) oraz "e-07" (nie Windows). Since
we're only showing 11 digits, oraz the 12th isn't close to 5, the
rest of the output should be platform-independent.
>>> exec(s) #doctest: +ELLIPSIS
-3.2171603427...e-0...7
Output z calculations przy Decimal should be identical across all
platforms.
>>> exec(decistmt(s))
-3.217160342717258261933904529E-7
"""
result = []
g = tokenize(BytesIO(s.encode('utf-8')).readline) # tokenize the string
dla toknum, tokval, _, _, _ w g:
jeżeli toknum == NUMBER oraz '.' w tokval: # replace NUMBER tokens
result.extend([
(NAME, 'Decimal'),
| |
<filename>EP4/ep4.py
"""
AO PREENCHER ESSE CABECALHO COM O MEU NOME E O MEU NUMERO USP,
DECLARO QUE SOU A UNICA PESSOA AUTORA E RESPONSAVEL POR ESSE PROGRAMA.
TODAS AS PARTES ORIGINAIS DESSE EXERCICIO PROGRAMA (EP) FORAM
DESENVOLVIDAS E IMPLEMENTADAS POR MIM SEGUINDO AS INSTRUCOES
DESSE EP E, PORTANTO, NAO CONSTITUEM ATO DE DESONESTIDADE ACADEMICA,
FALTA DE ETICA OU PLAGIO.
DECLARO TAMBEM QUE SOU A PESSOA RESPONSAVEL POR TODAS AS COPIAS
DESSE PROGRAMA E QUE NAO DISTRIBUI OU FACILITEI A
SUA DISTRIBUICAO. ESTOU CIENTE QUE OS CASOS DE PLAGIO E
DESONESTIDADE ACADEMICA SERAO TRATADOS SEGUNDO OS CRITERIOS
DIVULGADOS NA PAGINA DA DISCIPLINA.
ENTENDO QUE EPS SEM ASSINATURA NAO SERAO CORRIGIDOS E,
AINDA ASSIM, PODERAO SER PUNIDOS POR DESONESTIDADE ACADEMICA.
Nome : <NAME>
NUSP : 10295429
Referencias: Com excecao das rotinas fornecidas no enunciado
e em sala de aula, caso voce tenha utilizado alguma referencia,
liste-as abaixo para que o seu programa nao seja considerado
plagio ou irregular.
Exemplo:
- O algoritmo Quicksort foi baseado em:
https://pt.wikipedia.org/wiki/Quicksort
http://www.ime.usp.br/~pf/algoritmos/aulas/quick.html
- Slides de aula
- Conversas entre colegas de turma discutindo valores de batch
size, learning rate e outras constantes que usaram no EP
- Video sobre redes neurais, explicando o problema da parte 3:
https://www.youtube.com/watch?v=aircAruvnKk
"""
import nn
class PerceptronModel(object):
def __init__(self, dimensions):
"""
Initialize a new Perceptron instance.
A perceptron classifies data points as either belonging to a particular
class (+1) or not (-1). `dimensions` is the dimensionality of the data.
For example, dimensions=2 would mean that the perceptron must classify
2D points.
"""
self.w = nn.Parameter(1, dimensions)
def get_weights(self):
"""
Return a Parameter instance with the current weights of the perceptron.
"""
return self.w
def run(self, x):
"""
Calculates the score assigned by the perceptron to a data point x.
Inputs:
x: a node with shape (1 x dimensions)
Returns: a node containing a single number (the score)
"""
return nn.DotProduct(x, self.get_weights())
def get_prediction(self, x):
"""
Calculates the predicted class for a single data point `x`.
Returns: 1 or -1
"""
score = nn.as_scalar(self.run(x))
if score < 0:
return -1
return 1
def train(self, dataset):
"""
Train the perceptron until convergence.
"""
w = self.get_weights()
converged = False
while not converged:
converged = True
for x, y in dataset.iterate_once(1):
if self.get_prediction(x) != nn.as_scalar(y):
converged = False
w.update(x, self.get_prediction(x))
class RegressionModel(object):
"""
A neural network model for approximating a function that maps from real
numbers to real numbers. The network should be sufficiently large to be able
to approximate sin(x) on the interval [-2pi, 2pi] to reasonable precision.
"""
def __init__(self):
# Initialize your model parameters here
self.w1 = nn.Parameter(1, 50)
self.b1 = nn.Parameter(1, 50)
self.w2 = nn.Parameter(50, 1)
self.b2 = nn.Parameter(1, 1)
def run(self, x):
"""
Runs the model for a batch of examples.
Inputs:
x: a node with shape (batch_size x 1)
Returns:
A node with shape (batch_size x 1) containing predicted y-values
"""
xw1 = nn.Linear(x, self.w1)
xw1b1 = nn.AddBias(xw1, self.b1)
relu1 = nn.ReLU(xw1b1)
xw2 = nn.Linear(relu1, self.w2)
return nn.AddBias(xw2, self.b2)
def get_loss(self, x, y):
"""
Computes the loss for a batch of examples.
Inputs:
x: a node with shape (batch_size x 1)
y: a node with shape (batch_size x 1), containing the true y-values
to be used for training
Returns: a loss node
"""
return nn.SquareLoss(self.run(x), y)
def train(self, dataset):
"""
Trains the model.
"""
acceptable_loss = False
multiplier = 0.05
while not acceptable_loss:
for x, y in dataset.iterate_once(50):
loss = self.get_loss(x, y)
gradients = nn.gradients(
loss, [self.w1, self.b1, self.w2, self.b2])
self.w1.update(gradients[0], multiplier)
self.b1.update(gradients[1], multiplier)
self.w2.update(gradients[2], multiplier)
self.b2.update(gradients[3], multiplier)
new_loss = self.get_loss(nn.Constant(
dataset.x), nn.Constant(dataset.y))
if nn.as_scalar(new_loss) < 0.02:
acceptable_loss = True
class DigitClassificationModel(object):
"""
A model for handwritten digit classification using the MNIST dataset.
Each handwritten digit is a 28x28 pixel grayscale image, which is flattened
into a 784-dimensional vector for the purposes of this model. Each entry in
the vector is a floating point number between 0 and 1.
The goal is to sort each digit into one of 10 classes (number 0 through 9).
(See RegressionModel for more information about the APIs of different
methods here. We recommend that you implement the RegressionModel before
working on this part of the project.)
"""
def __init__(self):
# Initialize your model parameters here
self.w1 = nn.Parameter(784, 50)
self.b1 = nn.Parameter(1, 50)
self.w2 = nn.Parameter(50, 10)
self.b2 = nn.Parameter(1, 10)
def run(self, x):
"""
Runs the model for a batch of examples.
Your model should predict a node with shape (batch_size x 10),
containing scores. Higher scores correspond to greater probability of
the image belonging to a particular class.
Inputs:
x: a node with shape (batch_size x 784)
Output:
A node with shape (batch_size x 10) containing predicted scores
(also called logits)
"""
xw1 = nn.Linear(x, self.w1)
xw1b1 = nn.AddBias(xw1, self.b1)
relu1 = nn.ReLU(xw1b1)
xw2 = nn.Linear(relu1, self.w2)
return nn.AddBias(xw2, self.b2)
def get_loss(self, x, y):
"""
Computes the loss for a batch of examples.
The correct labels `y` are represented as a node with shape
(batch_size x 10). Each row is a one-hot vector encoding the correct
digit class (0-9).
Inputs:
x: a node with shape (batch_size x 784)
y: a node with shape (batch_size x 10)
Returns: a loss node
"""
return nn.SoftmaxLoss(self.run(x), y)
def train(self, dataset):
"""
Trains the model.
"""
acceptable_acc = False
multiplier = 0.1
while not acceptable_acc:
for x, y in dataset.iterate_once(50):
loss = self.get_loss(x, y)
gradients = nn.gradients(
loss, [self.w1, self.b1, self.w2, self.b2])
self.w1.update(gradients[0], multiplier)
self.b1.update(gradients[1], multiplier)
self.w2.update(gradients[2], multiplier)
self.b2.update(gradients[3], multiplier)
new_accuracy = dataset.get_validation_accuracy()
print(new_accuracy)
if new_accuracy >= 0.96:
acceptable_acc = True
class LanguageIDModel(object):
"""
A model for language identification at a single-word granularity.
(See RegressionModel for more information about the APIs of different
methods here. We recommend that you implement the RegressionModel before
working on this part of the project.)
"""
def __init__(self):
# Our dataset contains words from five different languages, and the
# combined alphabets of the five languages contain a total of 47 unique
# characters.
# You can refer to self.num_chars or len(self.languages) in your code
self.num_chars = 47
self.languages = ["English", "Spanish", "Finnish", "Dutch", "Polish"]
# Initialize your model parameters here
"*** YOUR CODE HERE ***"
def run(self, xs):
"""
Runs the model for a batch of examples.
Although words have different lengths, our data processing guarantees
that within a single batch, all words will be of the same length (L).
Here `xs` will be a list of length L. Each element of `xs` will be a
node with shape (batch_size x self.num_chars), where every row in the
array is a one-hot vector encoding of a character. For example, if we
have a batch of 8 three-letter words where the last word is "cat", then
xs[1] will be a node that contains a 1 at position (7, 0). Here the
index 7 reflects the fact that "cat" is the last word in the batch, and
the index 0 reflects the fact that the letter "a" is the inital (0th)
letter of our combined alphabet for this task.
Your model should use a Recurrent Neural Network to summarize the list
`xs` into a single node of shape (batch_size x hidden_size), for your
choice of hidden_size. It should then calculate a node of shape
(batch_size x 5) containing scores, where higher scores correspond to
greater probability of the word originating from a particular language.
Inputs:
xs: a list with L elements (one per character), where each element
is a node with shape (batch_size x self.num_chars)
Returns:
A node with shape (batch_size x 5) containing predicted scores
(also called logits)
"""
"*** YOUR CODE HERE ***"
def get_loss(self, xs, y):
"""
Computes the loss for a batch of examples.
The correct labels `y` are represented as a node with shape
(batch_size x 5). Each row is a one-hot vector encoding the correct
language.
Inputs:
xs: a list with | |
# --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
import os
import time
import datetime
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.utils import accuracy, AverageMeter
from config import get_config
from models import build_model
from data import build_loader
from lr_scheduler import build_scheduler
from optimizer import build_optimizer
from logger import create_logger
from utils.storage import load_checkpoint, save_checkpoint, get_grad_norm, auto_resume_helper, reduce_tensor
from config import config
from utils.net_utils import constrainScoreByWhole, freeze_model_subnet, freeze_model_weights
from utils.compute_flops import print_model_param_flops_sparse, print_model_param_flops
import copy
try:
# noinspection PyUnresolvedReferences
from apex import amp
except ImportError:
amp = None
def main(config):
config.defrost()
dataset_train, dataset_val, data_loader_train, data_loader_val, mixup_fn = build_loader(config)
logger.info(f"Creating model:{config.MODEL.TYPE}/{config.MODEL.NAME}")
model = build_model(config)
model.cuda()
logger.info(str(model))
weight_opt, score_opt, weight_params = build_optimizer(config, model)
if config.AMP_OPT_LEVEL != "O0":
model, opt_l = amp.initialize(model, [weight_opt, score_opt], opt_level=config.AMP_OPT_LEVEL)
weight_opt = opt_l[0]
score_opt = opt_l[1]
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[config.LOCAL_RANK], broadcast_buffers=False, find_unused_parameters=True)
model_without_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
logger.info(f"number of params: {n_parameters}")
if hasattr(model_without_ddp, 'flops'):
flops = model_without_ddp.flops()
logger.info(f"number of GFLOPs: {flops / 1e9}")
lr_scheduler1, lr_scheduler2 = build_scheduler(config, weight_opt, score_opt, len(data_loader_train))
if config.AUG.MIXUP > 0.:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif config.MODEL.LABEL_SMOOTHING > 0.:
criterion = LabelSmoothingCrossEntropy(smoothing=config.MODEL.LABEL_SMOOTHING)
else:
criterion = torch.nn.CrossEntropyLoss()
max_accuracy = 0.0
if config.TRAIN.AUTO_RESUME:
resume_file = auto_resume_helper(config.OUTPUT)
if resume_file:
if config.MODEL.RESUME:
logger.warning(f"auto-resume changing resume file from {config.MODEL.RESUME} to {resume_file}")
config.MODEL.RESUME = resume_file
logger.info(f'auto resuming from {resume_file}')
else:
logger.info(f'no checkpoint found in {config.OUTPUT}, ignoring auto resume')
if config.MODEL.RESUME:
max_accuracy = load_checkpoint(config, model_without_ddp, weight_opt, score_opt, lr_scheduler1, lr_scheduler2, logger)
acc1, acc5, loss = validate(config, data_loader_val, model)
logger.info(f"Accuracy of the network on the {len(dataset_val)} test images: {acc1:.1f}%")
if config.EVAL_MODE:
return
if config.THROUGHPUT_MODE:
throughput(data_loader_val, model, logger)
return
logger.info("Start training")
start_time = time.time()
flops_reduction_list = []
config.finetuning = False
for epoch in range(config.TRAIN.START_EPOCH, config.TRAIN.EPOCHS):
data_loader_train.sampler.set_epoch(epoch)
train_one_epoch(config, model, criterion, data_loader_train, weight_opt, score_opt, epoch, mixup_fn, lr_scheduler1, lr_scheduler2)
if dist.get_rank() == 0 and (epoch % config.SAVE_FREQ == 0 or epoch == (config.TRAIN.EPOCHS - 1)):
save_checkpoint(config, epoch, model_without_ddp, max_accuracy, weight_opt, score_opt, lr_scheduler1, lr_scheduler2, logger)
if epoch % 1 == 0 and "Dense" in config.conv_type:
print_model_param_flops(model, c=3, input_res=224, multiply_adds=False)
if epoch % 1 == 0 and "Dense" not in config.conv_type:
print("=> compute model params and flops")
flops_reduction = print_model_param_flops_sparse(model, c=3, input_res=224, multiply_adds=False)
flops_reduction_list.append(flops_reduction.item())
print("avg train cost/ savings", sum(flops_reduction_list)/len(flops_reduction_list), 3/(4*sum(flops_reduction_list)/len(flops_reduction_list)))
torch.cuda.empty_cache()
acc1, acc5, loss = validate(config, data_loader_val, model)
logger.info(f"Accuracy of the network on the {len(dataset_val)} test images: {acc1:.1f}%")
max_accuracy = max(max_accuracy, acc1)
logger.info(f'Max accuracy: {max_accuracy:.2f}%')
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logger.info('Training time {}'.format(total_time_str))
if config.finetune:
print("continue finetune")
max_accuracy = 0
config.K = 1
config.finetuning = True
freeze_model_subnet(model)
fix_model_subnet(model)
finetune_optimizer = torch.optim.AdamW(weight_params, eps=config.TRAIN.OPTIMIZER.EPS, betas=config.TRAIN.OPTIMIZER.BETAS,
lr=config.TRAIN.BASE_LR*1e-2, weight_decay=config.TRAIN.WEIGHT_DECAY)
if config.sample_from_training_set:
config.use_running_stats = False
i = 0
BESTACC1, BESTIDX = 0, 0
BESTSTATEDICT = None
BESTMODEL = copy.deepcopy(model)
while i < 10:
i += 1
acc1, acc5, loss = validate(config, data_loader_train, model)
print(acc1, BESTACC1, i)
if acc1 > BESTACC1:
BESTACC1 = acc1
BESTIDX = i
BESTMODEL = copy.deepcopy(model)
print(BESTACC1, BESTIDX)
for n, m in model.named_modules():
if hasattr(m, "scores") and m.prune:
m.subnet = (torch.rand_like(m.scores) < m.clamped_scores).float()
model = copy.deepcopy(BESTMODEL)
config.use_running_stats = True
for epoch in range(0, 40):
data_loader_train.sampler.set_epoch(epoch)
train_one_epoch(config, model, criterion, data_loader_train, finetune_optimizer, None, epoch, mixup_fn,
None, None)
acc1, acc5, loss = validate(config, data_loader_val, model)
logger.info(f"Accuracy of the network on the {len(dataset_val)} test images: {acc1:.1f}%")
max_accuracy = max(max_accuracy, acc1)
logger.info(f'Max accuracy: {max_accuracy:.2f}%')
def master_params(optimizer1, optimizer2):
"""
Generator expression that iterates over the params owned by ``optimizer``s.
Args:
optimizer1: An optimizer previously returned from ``amp.initialize``.
optimizer2: An optimizer previously returned from ``amp.initialize``.
"""
for group in optimizer1.param_groups:
for p in group['params']:
yield p
for group in optimizer2.param_groups:
for p in group['params']:
yield p
def perform_backward(loss, weight_opt, score_opt):
if config.AMP_OPT_LEVEL != "O0":
with amp.scale_loss(loss, [weight_opt, score_opt]) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
def get_grad_norm(model, weight_opt, score_opt):
if config.AMP_OPT_LEVEL != "O0":
if config.TRAIN.CLIP_GRAD:
grad_norm = torch.nn.utils.clip_grad_norm_(master_params(weight_opt, score_opt), config.TRAIN.CLIP_GRAD)
else:
grad_norm = get_grad_norm(master_params(weight_opt, score_opt))
else:
if config.TRAIN.CLIP_GRAD:
grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), config.TRAIN.CLIP_GRAD)
else:
grad_norm = get_grad_norm(model.parameters())
return grad_norm
def calculateGrad(model, fn_avg, fn_list, args):
for n, m in model.named_modules():
if hasattr(m, "scores") and m.prune:
m.scores.grad.data += 1/(args.K-1)*(fn_list[0] - fn_avg)*getattr(m, 'stored_mask_0') + 1/(args.K-1)*(fn_list[1] - fn_avg)*getattr(m, 'stored_mask_1')
def calculateGrad_pge(model, fn_list, args):
for n, m in model.named_modules():
if hasattr(m, "scores") and m.prune:
m.scores.grad.data += 1/args.K*(fn_list[0]*getattr(m, 'stored_mask_0')) + 1/args.K*(fn_list[1]*getattr(m, 'stored_mask_1'))
def train_one_epoch(config, model, criterion, data_loader, weight_opt, score_opt, epoch, mixup_fn, lr_scheduler1, lr_scheduler2):
model.train()
num_steps = len(data_loader)
batch_time = AverageMeter()
loss_meter = AverageMeter()
norm_meter = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
v_meter = AverageMeter()
max_score_meter = AverageMeter()
start = time.time()
end = time.time()
for idx, (samples, targets) in enumerate(data_loader):
samples = samples.cuda(non_blocking=True)
targets = targets.cuda(non_blocking=True)
original_targets = targets.clone()
l, ol, gl, al, a1, a5, ll = 0, 0, 0, 0, 0, 0, 0
if weight_opt is not None:
weight_opt.zero_grad()
if score_opt is not None:
score_opt.zero_grad()
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets)
fn_list = []
for j in range(config.K):
config.j = j
outputs = model(samples)
original_loss = criterion(outputs, targets)
loss = original_loss/config.K
fn_list.append(loss.item()*config.K)
perform_backward(loss, weight_opt, score_opt)
acc1, acc5 = accuracy(outputs, original_targets, topk=(1, 5))
l = l + loss.item()
ol = ol + original_loss.item() / config.K
a1 = a1 + acc1.item() / config.K
a5 = a5 + acc5.item() / config.K
fn_avg = l
if not config.finetuning:
if config.conv_type == "ReinforceLOO" or config.conv_type == "ReinforceLOOVR" or config.conv_type == "ReinforceLOOVRWeight":
calculateGrad(model, fn_avg, fn_list, config)
if config.conv_type == "Reinforce":
calculateGrad_pge(model, fn_list, config)
grad_norm = get_grad_norm(model, weight_opt, score_opt)
if weight_opt is not None:
weight_opt.step()
if score_opt is not None:
score_opt.step()
if lr_scheduler1 is not None:
lr_scheduler1.step_update(epoch * num_steps + idx)
if lr_scheduler2 is not None:
lr_scheduler2.step_update(epoch * num_steps + idx)
torch.cuda.synchronize()
loss_meter.update(l, targets.size(0))
norm_meter.update(grad_norm)
batch_time.update(time.time() - end)
top1.update(a1, targets.size(0))
top5.update(a5, targets.size(0))
end = time.time()
if config.conv_type == "ReinforceLOO" or config.conv_type == "ReinforceLOOVR" or config.conv_type == "Reinforce" or config.conv_type == "ReinforceLOOVRWeight":
if not config.finetuning:
with torch.no_grad():
constrainScoreByWhole(model, v_meter, max_score_meter)
if idx % config.PRINT_FREQ == 0:
lr1 = weight_opt.param_groups[0]['lr'] if weight_opt is not None else 0
lr2 = score_opt.param_groups[0]['lr'] if score_opt is not None else 0
etas = batch_time.avg * (num_steps - idx)
logger.info(
f'Train: [{epoch}/{config.TRAIN.EPOCHS}][{idx}/{num_steps}]\t'
f'eta {datetime.timedelta(seconds=int(etas))} w_lr {lr1:.6f} s_lr {lr2:.6f}\t'
f'time {batch_time.val:.4f} ({batch_time.avg:.4f})\t'
f'loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t'
f'top1 {top1.val:.4f} ({top1.avg:.4f})\t'
f'top5 {top5.val:.4f} ({top5.avg:.4f})\t'
f'v_meter {v_meter.val:.4f} ({v_meter.avg:.4f})\t'
f'max_score_meter {max_score_meter.val:.4f} ({max_score_meter.avg:.4f})\t'
)
epoch_time = time.time() - start
logger.info(f"EPOCH {epoch} training takes {datetime.timedelta(seconds=int(epoch_time))}")
@torch.no_grad()
def validate(config, data_loader, model):
criterion = torch.nn.CrossEntropyLoss()
loss_meter = AverageMeter()
acc1_meter = AverageMeter()
acc5_meter = AverageMeter()
if config.use_running_stats:
model.eval()
with torch.no_grad():
for idx, (images, target) in enumerate(data_loader):
images = images.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
output = model(images)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
acc1 = reduce_tensor(acc1)
acc5 = reduce_tensor(acc5)
loss = reduce_tensor(loss)
loss_meter.update(loss.item(), target.size(0))
acc1_meter.update(acc1.item(), target.size(0))
acc5_meter.update(acc5.item(), target.size(0))
if idx % config.PRINT_FREQ == 0:
logger.info(
f'Test: [{idx}/{len(data_loader)}]\t'
f'Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t'
f'Acc@1 {acc1_meter.val:.3f} ({acc1_meter.avg:.3f})\t'
f'Acc@5 {acc5_meter.val:.3f} ({acc5_meter.avg:.3f})\t'
)
logger.info(f' * Acc@1 {acc1_meter.avg:.3f} Acc@5 {acc5_meter.avg:.3f}')
return acc1_meter.avg, acc5_meter.avg, loss_meter.avg
@torch.no_grad()
def throughput(data_loader, model, logger):
model.eval()
for idx, (images, _) in enumerate(data_loader):
images = images.cuda(non_blocking=True)
batch_size = images.shape[0]
for i in range(50):
model(images)
torch.cuda.synchronize()
logger.info(f"throughput averaged with 30 times")
tic1 = time.time()
for i in range(30):
model(images)
torch.cuda.synchronize()
tic2 = time.time()
logger.info(f"batch_size {batch_size} throughput {30 * batch_size / (tic2 - tic1)}")
return
if __name__ == '__main__':
if config.AMP_OPT_LEVEL != "O0":
assert amp is not None, "amp not installed!"
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
rank = int(os.environ["RANK"])
world_size = int(os.environ['WORLD_SIZE'])
print(f"RANK and WORLD_SIZE in environ: {rank}/{world_size}")
else:
rank = -1
world_size = -1
torch.cuda.set_device(config.LOCAL_RANK)
torch.distributed.init_process_group(backend='nccl', init_method='env://', world_size=world_size, rank=rank)
torch.distributed.barrier()
seed = config.SEED + dist.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
# linear scale the learning rate according to total batch size, may not be optimal
linear_scaled_lr = config.TRAIN.BASE_LR * config.DATA.BATCH_SIZE * dist.get_world_size() / 512.0
linear_scaled_warmup_lr = config.TRAIN.WARMUP_LR * config.DATA.BATCH_SIZE * dist.get_world_size() / 512.0
linear_scaled_min_lr = config.TRAIN.MIN_LR * config.DATA.BATCH_SIZE * dist.get_world_size() / 512.0
# gradient accumulation also need to scale the learning rate
if config.TRAIN.ACCUMULATION_STEPS > 1:
linear_scaled_lr = linear_scaled_lr * config.TRAIN.ACCUMULATION_STEPS
linear_scaled_warmup_lr = linear_scaled_warmup_lr * config.TRAIN.ACCUMULATION_STEPS
linear_scaled_min_lr = linear_scaled_min_lr * config.TRAIN.ACCUMULATION_STEPS
config.TRAIN.BASE_LR = linear_scaled_lr
config.TRAIN.WARMUP_LR = linear_scaled_warmup_lr
config.TRAIN.MIN_LR = | |
from taskinit import *
import time
import os
import re
import json
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
import partitionhelper as ph
from mpi4casa.MPICommandClient import MPICommandClient
# pieflag is released under a BSD 3-Clause License
# See LICENSE for details
# HISTORY:
# 1.0 2005 Initial version by <NAME>, designed
# for use with customized UVLIST in MIRIAD
# 1.1 Jan2006 Various upgrades by Enno Middelberg
# 2.0 31Oct2014 Release of updated and CASA-compatible version
# written by <NAME>
# 2.1 26Nov2014 Fixed subscan bug (only operate on '0') and
# logger default value printout
# 2.2 25Mar2015 Updated handling for pre-flagged baselines and
# hid unimportant runtime display messages
# 3.0 28Mar2015 Enabled parallel processing
# 3.1 10Jun2015 Added error messages for SEFD extrapolation
# and integration time rounding problem, and
# fixed default numthreads
# 3.2 19Feb2016 Fixed parallel processing bug, enabled
# operation using DATA column, and removed
# lock file deletion
# 4.0 4Aug2016 Upgraded to use MPI parallelism in CASA 4.6.0+
# 4.1 13Oct2016 Fixed license, no changes to code
# 4.2 24Oct2016 Updated code category, no changes to code
# 4.3 25Oct2016 Fixed version number (affects 4.1, 4.2)
# 4.4 26Oct2016 Removed flag_row check, CASA does not
# currently respect this column properly
#
# See additional information in pieflag function below
def pieflag_getflagstats(vis,field,spw,npol,feedbasis):
#casalog.filter('WARN')
af.open(msname=vis)
af.selectdata(field=str(field),spw=str(spw))
ag0={'mode':'summary','action':'calculate'}
af.parseagentparameters(ag0)
af.init()
temp=af.run(writeflags=False)
af.done()
#casalog.filter('INFO')
if feedbasis:
RRf=temp['report0']['correlation']['RR']['flagged']
RRt=temp['report0']['correlation']['RR']['total']
LLf=temp['report0']['correlation']['LL']['flagged']
LLt=temp['report0']['correlation']['LL']['total']
else:
RRf=temp['report0']['correlation']['XX']['flagged']
RRt=temp['report0']['correlation']['XX']['total']
LLf=temp['report0']['correlation']['YY']['flagged']
LLt=temp['report0']['correlation']['YY']['total']
TOTf=temp['report0']['flagged']
TOTt=temp['report0']['total']
flagstats=np.array([RRf,RRt,LLf,LLt,TOTf,TOTt])
if npol == 4:
if feedbasis:
RLf=temp['report0']['correlation']['RL']['flagged']
RLt=temp['report0']['correlation']['RL']['total']
LRf=temp['report0']['correlation']['LR']['flagged']
LRt=temp['report0']['correlation']['LR']['total']
else:
RLf=temp['report0']['correlation']['XY']['flagged']
RLt=temp['report0']['correlation']['XY']['total']
LRf=temp['report0']['correlation']['YX']['flagged']
LRt=temp['report0']['correlation']['YX']['total']
flagstats=np.append(flagstats,[RLf,RLt,LRf,LRt])
return flagstats
def pieflag_flag(vis,datacol,nthreads,field,
vtbleLIST,inttime,nant,
ddid,spw,refchan,nchan,npol,feedbasis,
fitorderLIST,sefdLIST,
staticflag,madmax,binsamples,
dynamicflag,chunktime,stdmax,maxoffset,
extendflag,boxtime,boxthresh):
# Go through each baseline, spw, channel, and polarization and compare to reference channel
# while accounting for a spectral fit and the SEFD.
# Perform static, dynamic, and extend operations if requested
casalog.filter('INFO')
if nthreads > 1:
threadID = MPIEnvironment.mpi_processor_rank
casalog.post(' thread '+str(threadID)+'/'+str(nthreads)+' status: 0% complete (updates delivered every 10%)')
else:
casalog.post(' status: 0% complete (updates delivered every 10%)')
vtble = np.array(vtbleLIST)
sefd = np.array(sefdLIST)
fitorder = np.array(fitorderLIST)
tb.open(vis)
temp_ant1 = tb.getcol('ANTENNA1')
temp_ant2 = tb.getcol('ANTENNA2')
tb.close()
ant1 = temp_ant1[0]
ant2 = temp_ant2[0]
# get number of baselines from unique antenna combinations
nb = np.vstack(set(map(tuple, np.transpose(np.array([temp_ant1,temp_ant2])) ))).shape[0]
nspw=len(spw)
if feedbasis:
pSTR = ['RR']
if npol == 2:
pSTR.append('LL')
elif npol == 4:
pSTR.append('RL')
pSTR.append('LR')
pSTR.append('LL')
else:
pSTR = ['XX']
if npol == 2:
pSTR.append('YY')
elif npol == 4:
pSTR.append('XY')
pSTR.append('YX')
pSTR.append('YY')
# dim0 --> npol=2: 0=RR, 1=LL
# npol=4: 0=RR, 1=RL, 2=LR, 3=LL
specfitcoeffS=np.zeros((npol,max(fitorder)+1))
# rc = reference channel
# rcx = frequency in Hz for static flagging
rcx=np.zeros(nspw)
for i in range(nspw):
rcx[i] = vtble[refchan[i]][spw[i]]
# S = static
# Srcy: dim2=(median visibility amplitude, median absolute deviation)
Srcy=np.zeros((nspw,npol,2))
if extendflag:
kernellen = int(boxtime/inttime)
#kernel = np.ones(kernellen)
tb.open(vis)
ms.open(vis,nomodify=False)
printupdate=np.ones(9).astype(bool)
printcounter=1
checkprint=True
for b in range(nb):
if checkprint:
if printupdate[printcounter-1] and b+1>nb/10*printcounter:
if nthreads > 1:
casalog.post(' thread '+str(threadID)+'/'+str(nthreads)+' status: '+str(10*printcounter)+'% complete')
else:
casalog.post(' status: '+str(10*printcounter)+'% complete')
printupdate[printcounter-1]=False
printcounter+=1
if printcounter > 9:
checkprint=False
# get reference channel median and MAD for static flagging
validspw = np.zeros((npol,nspw))
for s in range(nspw):
for p in range(npol):
tempstr1 = '([select from '+vis+' where ANTENNA1=='+str(ant1)+' && ANTENNA2=='+str(ant2)+\
' && FIELD_ID=='+str(field)+' && DATA_DESC_ID=='+str(ddid[s])+\
' && FLAG['+str(p)+','+str(refchan[s])+']==False giving '
# ' && WEIGHT['+str(p)+']>0 giving '
tempstr2 = '[abs('+datacol.upper()+'['+str(p)+','+str(refchan[s])+'])]])'
tempval = tb.calc('count'+tempstr1+tempstr2)[0]
if tempval > 0:
validspw[p][s] = 1
if staticflag:
Srcy[s][p][0] = tb.calc('median'+tempstr1+tempstr2)[0]
tempstr3 = '[abs(abs('+datacol.upper()+'['+str(p)+','+str(refchan[s])+'])-'+\
str(Srcy[s][p][0])+')]])'
Srcy[s][p][1] = tb.calc('median'+tempstr1+tempstr3)[0]
else:
# If the reference channel for any one polarization isn't present,
# flag all data on this baseline in this spw.
# You won't be able to do static or dynamic flagging (nor extend flagging as a result).
# This part of the loop shouldn't get activated much on unflagged data because the
# user should have picked a suitable reference channel in each spw.
validspw[0][s] = 0
casalog.filter('WARN')
ms.reset()
try:
ms.msselect({'field':str(field),'baseline':str(ant1)+'&&'+str(ant2),'spw':str(spw[s])})
tempflag = ms.getdata('flag')
tempflag['flag'][:]=True
ms.putdata(tempflag)
casalog.filter('INFO')
except:
# this gets triggered if the entire baseline is already flagged
casalog.filter('INFO')
break
# get static spectral fits for each polarization
if staticflag:
tempfitorderS = np.copy(fitorder)
for p in range(npol):
# check that there are enough spw's to fit the requested spectral order
if sum(validspw[p]) > 0:
if tempfitorderS[p] > sum(validspw[p])-1:
if sum(validspw[p]) == 2:
tempfitorderS[p] = 1
else:
tempfitorderS[p] = 0
casalog.post('*** WARNING: staticflag fitorder for baseline ant1='+str(ant1)+' ant2='+str(ant2)+\
' pol='+pSTR[p]+' has been reduced to '+str(int(tempfitorderS[p])),'WARN')
# use MAD to weight the points
# (not mathematically correct, should be standard error, but OK approximation)
specfitcoeffS[p,0:tempfitorderS[p]+1] = np.polyfit(np.log10(rcx[validspw[p]>0]),\
np.log10(Srcy[0:,p,0][validspw[p]>0]),\
tempfitorderS[p],w=1.0/np.log10(Srcy[0:,p,1][validspw[p]>0]))
if dynamicflag and sum(validspw[0]) > 0:
# Don't assume that the same number of integrations (dump times) are present in each spw.
# This requirement makes the code messy
casalog.filter('WARN')
ms.reset()
ms.msselect({'field':str(field),'baseline':str(ant1)+'&&'+str(ant2)})
ms.iterinit(interval=chunktime,columns=['TIME'],adddefaultsortcolumns=False)
# get number of chunks and initialize arrays
ms.iterorigin()
moretodo=True
nchunks = 0
while moretodo:
nchunks += 1
moretodo = ms.iternext()
# start and end timestamp for each chunk
timestamps = np.zeros((nchunks,2))
# D = dynamic
# dim3 (per chunk) --> 0=reference channel median, 1=reference channel standard deviation
Drcy=np.zeros((nspw,npol,nchunks,2))
validspwD = np.zeros((npol,nchunks,nspw))
ms.iterorigin()
moretodo=True
chunk = 0
while moretodo:
tempflagD = ms.getdata('flag')['flag']
tempdataD = abs(ms.getdata(datacol.lower())[datacol.lower()])
tempddidD = ms.getdata('data_desc_id')['data_desc_id']
for s in range(nspw):
for p in range(npol):
# messy...
messydata1 = tempdataD[p,refchan[s]][tempflagD[p,refchan[s]]==False]
if len(messydata1) > 0:
messyddid = tempddidD[tempflagD[p,refchan[s]]==False]
messydata2 = messydata1[messyddid==ddid[s]]
if len(messydata2) > 0:
validspwD[p,chunk,s] = 1
Drcy[s,p,chunk,0] = np.median(messydata2)
Drcy[s,p,chunk,1] = np.std(messydata2)
# Get start and end timestamps so the data can be matched up later.
# The overall timespan reported here will be equal to or greater
# than the timespan reported below when ms.getdata is run on an
# individual spw, because we need to account for the possible
# presence of some spw's with less integrations. Messy...
temptimeD = ms.getdata('time')['time']
timestamps[chunk,0] = min(temptimeD)
timestamps[chunk,1] = max(temptimeD)
chunk += 1
moretodo = ms.iternext()
# get dynamic spectral fits for each polarization
tempfitorderD = np.zeros((nchunks,len(fitorder)))
for i in range(len(fitorder)):
tempfitorderD[:,i] = fitorder[i]
# dim0 --> npol=2: 0=RR, 1=LL
# npol=4: 0=RR, 1=RL, 2=LR, 3=LL
specfitcoeffD=np.zeros((npol,nchunks,max(fitorder)+1))
ms.iterorigin()
moretodo=True
chunk = 0
while moretodo:
for p in range(npol):
# check that there are enough spw's to fit the requested spectral order
if sum(validspwD[p,chunk]) > 0:
if tempfitorderD[chunk,p] > sum(validspwD[p,chunk])-1:
if sum(validspwD[p,chunk]) == 2:
tempfitorderD[chunk,p] = 1
else:
tempfitorderD[chunk,p] = 0
# native time is MJD seconds
t1=qa.time(qa.quantity(timestamps[chunk,0],'s'),form='ymd')[0]
t2=qa.time(qa.quantity(timestamps[chunk,1],'s'),form='d')[0]
casalog.post('*** WARNING: dynamicflag fitorder for baseline ant1='+str(ant1)+' ant2='+str(ant2)+\
' pol='+pSTR[p]+' time='+t1+'-'+t2+\
' has been reduced to '+str(int(tempfitorderD[chunk,p])),'WARN')
# prevent numerical warning when MAD=0 (ie single sample)
tempDrcy = Drcy[0:,p,chunk,1][validspwD[p,chunk]>0]
tempDrcy[tempDrcy==0] = 1e-200
specfitcoeffD[p,chunk,0:tempfitorderD[chunk,p]+1] = \
np.polyfit(np.log10(rcx[validspwD[p,chunk]>0]),np.log10(Drcy[0:,p,chunk,0][validspwD[p,chunk]>0]),\
tempfitorderD[chunk,p],w=1.0/np.log10(tempDrcy))
chunk += 1
moretodo = ms.iternext()
casalog.filter('INFO')
for s in range(nspw):
if validspw[0,s] > 0:
casalog.filter('WARN')
ms.reset()
ms.msselect({'field':str(field),'baseline':str(ant1)+'&&'+str(ant2),'spw':str(spw[s])})
# get data for this spw, accounting for existing flags
tempflag = ms.getdata('flag')
tempdata = abs(ms.getdata(datacol.lower())[datacol.lower()])
tempflagpf = np.zeros(tempdata.shape)
temptime = ms.getdata('time')['time']
casalog.filter('INFO')
if staticflag:
windowtime = binsamples * inttime
window = []
casalog.filter('WARN')
ms.iterinit(interval=windowtime)
ms.iterorigin()
# get number of time steps
moretodo=True
while moretodo:
# select from dummy column with small data size, eg int 'antenna1'
# (could also have used float 'time'...)
window.append(len(ms.getdata('antenna1')['antenna1']))
moretodo = ms.iternext()
casalog.filter('INFO')
for f in range(nchan):
# this shouldn't matter, but enforce that flagging
# doesn't take place on the reference channel
if f == refchan[s]:
continue
for p in range(npol):
if tempfitorderS[p] > 0:
specfit = 10.0**(np.poly1d(specfitcoeffS[p,0:tempfitorderS[p]+1])(np.log10(vtble[f][spw[s]])))
else:
specfit = Srcy[s][p][0]
# difference to median of reference channel, accounting for spectrum and sefd
tempdatachan = np.multiply(abs((tempdata[p][f]-specfit)/sefd[s][f]),np.invert(tempflag['flag'][p][f]))
tempbad = np.zeros(tempdatachan.shape)
tempbad[tempdatachan>=Srcy[s,p,1]*madmax] = 1
tempbad[tempdatachan>=Srcy[s,p,1]*madmax*2] += 1
# iterate in units of binsamples*inttime
# flag entire window if sum of badness values >=2
# if flagging needs to take place in one polarization, just flag them all
j=0
for w in window:
if sum(tempbad[j:j+w]) >= 2:
tempflagpf[0:npol,f,j:j+w] = 1
tempflag['flag'][0:npol,f,j:j+w] = True
| |
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from modpy.random import normal_pdf, UniformDist, JointDistribution
from modpy.design import kennard_stone_design
from modpy.optimize import mmo, nlprog, cma_es, bayesian_proposal, NonlinearConstraint
from modpy.optimize._bayesian import _expected_improvement, _probability_of_improvement, _upper_confidence_bound
from modpy.proxy import OrdinaryKrigingModel
from modpy.stats import hamiltonian_mc, posterior_ensemble
from modpy.plot.plot_util import subplot_layout, cm_parula, default_color, set_font_sizes
from modpy.illustration.illustration_util import CASE_PATH, test_function_2d
def _configure_axes_1D(ax, lim):
ax.set_xlim(*lim)
ax.set_ylim([-0.03, 0.15])
ax.set_xlabel('$\\theta$')
ax.set_ylabel('$f_{obj}(\\theta)$')
ax.grid(True)
ax.legend()
def _configure_axes_2D(ax):
ax.set_xlim([-5., 5.])
ax.set_ylim([-5., 5.])
ax.set_xlabel('$\\theta_1$')
ax.set_ylabel('$\\theta_2$')
def _plot_acquisition_functions():
seed = 1234
def obj(x):
return normal_pdf(x, 15., 5.) + normal_pdf(x, 30., 4.) + normal_pdf(x, 40., 5.)
# initial design of experiment
x_doe = np.array([5., 18., 25., 44.])
y_doe = obj(x_doe)
# for illustration of underlying objective function
x_true = np.linspace(0., 50., 500)
y_true = obj(x_true)
# set up optimizer -------------------------------------------------------------------------------------------------
# set bounds
x_min = 0.
x_max = 50.
# set up proxy model -----------------------------------------------------------------------------------------------
proxy = OrdinaryKrigingModel(x_doe, y_doe, 'gaussian', seed=seed)
theta0 = np.array([10.])
proxy.initialize_ML(theta0)
proxy.define_weights(x_true)
mu = proxy.mean()
sigma = proxy.std()
# plot figure ------------------------------------------------------------------------------------------------------
r, c = subplot_layout(4)
fig, axes = plt.subplots(r, c, figsize=(20, 14))
axes = axes.flatten()
def plot_problem(ax):
# plot true objective function
ax.plot(x_true, y_true, color='gray', lw=2, label='True', zorder=2.5)
ax.scatter(x_doe, y_doe, 20, color='k', label='DoE', zorder=5)
ax.plot(x_true, mu, color='m', label='Proxy', zorder=3)
# plot the problem
ax = axes[0]
plot_problem(ax)
ax.plot(x_true, mu + 2. * sigma, color='m', ls='--', label='95% conf.', zorder=3)
ax.plot(x_true, mu - 2. * sigma, color='m', ls='--', zorder=3)
ax.set_title('Optimization Problem')
_configure_axes_1D(ax, [x_min, x_max])
set_font_sizes(ax, 13)
# calculate acquisition functions
kappa = 3.
xi = 0.01
y_max = np.amax(y_doe)
a_ei = _expected_improvement(mu, sigma, y_max, xi)
a_pi = _probability_of_improvement(mu, sigma, y_max, xi)
a_ucb = _upper_confidence_bound(mu, sigma, kappa=kappa)
# plot expected improvement
ax = axes[1]
plot_problem(ax)
axt = ax.twinx()
axt.plot(x_true, a_ei, color='b', label='Acquisition', zorder=4)
_configure_axes_1D(ax, [x_min, x_max])
ax.set_title('Expected Improvement')
ax.legend(handles=[Line2D([], [], color='gray', label='True'),
Line2D([], [], color='m', label='Proxy'),
Line2D([], [], color='b', label='Bayesian'),
Line2D([], [], color='k', label='DoE', marker='o', ls='')],
loc='upper left')
axt.yaxis.set_visible(False)
axt.yaxis.set_ticks([])
set_font_sizes(ax, 13)
# plot probability of improvement
ax = axes[2]
plot_problem(ax)
axt = ax.twinx()
axt.plot(x_true, a_pi, color='b', label='Acquisition', zorder=4)
_configure_axes_1D(ax, [x_min, x_max])
ax.set_title('Probability of Improvement')
ax.legend(handles=[Line2D([], [], color='gray', label='True'),
Line2D([], [], color='m', label='Proxy'),
Line2D([], [], color='b', label='Bayesian'),
Line2D([], [], color='k', label='DoE', marker='o', ls='')],
loc='upper left')
axt.yaxis.set_visible(False)
axt.yaxis.set_ticks([])
set_font_sizes(ax, 13)
# plot ubber confidence bounds
ax = axes[3]
plot_problem(ax)
ax.plot(x_true, a_ucb, color='b', label='Acquisition', zorder=4)
_configure_axes_1D(ax, [x_min, x_max])
ax.set_title('Upper Confidence Bound')
set_font_sizes(ax, 13)
fig.savefig(CASE_PATH + 'acquisition_functions_1D.png')
def _plot_bayesian_sequential_1D():
seed = 1234
def obj(x):
return normal_pdf(x, 15., 5.) + normal_pdf(x, 30., 4.) + normal_pdf(x, 40., 5.)
# initial design of experiment
x_doe = np.array([5., 18., 25., 44.])
y_doe = obj(x_doe)
# for illustration of underlying objective function
x_true = np.linspace(0., 50., 500)
y_true = obj(x_true)
# set up optimizer -------------------------------------------------------------------------------------------------
# set bounds
x_min = 0.
x_max = 50.
bounds_ = ((x_min, x_max),)
# set up optimization algorithm
x0 = np.array([25.])
def opt(obj_):
return (cma_es(obj_, x0, bounds=bounds_, constraints=(), sigma0=10., lam=100, lbound=-1e-5, max_restart=10),)
# set up proxy model -----------------------------------------------------------------------------------------------
proxy = OrdinaryKrigingModel(x_doe, y_doe, 'gaussian', seed=seed)
theta0 = np.array([10.])
proxy.initialize_ML(theta0)
# prepare figure ---------------------------------------------------------------------------------------------------
loops = 4
r, c = subplot_layout(loops + 2)
fig, axes = plt.subplots(r, c, figsize=(20, 14))
axes = axes.flatten()
# plot true objective function
ax = axes[0]
ax.plot(x_true, y_true, color='gray', lw=2, label='True', zorder=2.5)
ax.scatter(x_doe, y_doe, 20, color='k', label='Initial DoE', zorder=5)
_configure_axes_1D(ax, [x_min, x_max])
ax.set_title('Design of Experiment')
# main optimization loop -------------------------------------------------------------------------------------------
x_all = np.array(x_doe)
y_all = np.array(y_doe)
for i in range(loops):
n_sim = y_all.size
# prepare proxy model function
def model(x):
proxy.define_weights(x)
return proxy.mean(), proxy.std()
# find new simulation parameters based on bayesian proposal
y_max = np.amax(y_all)
xi = 0.01
proposals = bayesian_proposal(model, opt, acq='EI', args=(y_max, xi))
# create progress chart
ax = axes[i + 1]
axt = ax.twinx()
# plot true objective function
ax.plot(x_true, y_true, color='gray', lw=2, label='True', zorder=2.5)
# plot proxy model and +- 2 sigma bounds
proxy.define_weights(x_true)
mu = proxy.mean()
sigma = proxy.std()
ax.plot(x_true, mu, color='m', label='Proxy', zorder=3)
# plot objective function
y_obj = _expected_improvement(mu, sigma, y_max, xi)
axt.plot(x_true, y_obj, color='b', label='Bayesian', zorder=4)
# plot all existing sample points
ax.scatter(x_all, y_all, 20, color='k', label='Samples', zorder=5)
if proposals:
# extract proposal points and associated objective value
x_pro = proposals[0].x
y_pro_obj = proposals[0].f
# simulate new proposals
y_pro = obj(x_pro)
# plot proposed samples
axt.scatter(x_pro, y_pro_obj, 20, color='g', label='Proposal', zorder=6)
# update proxy-model
proxy.update(x_pro, y_pro)
# add proposals to all samples
x_all = np.append(x_all, x_pro)
y_all = np.append(y_all, y_pro)
# configure figure
_configure_axes_1D(ax, [x_min, x_max])
ax.legend(handles=[Line2D([], [], color='gray', label='True'),
Line2D([], [], color='m', label='Proxy'),
Line2D([], [], color='b', label='Bayesian'),
Line2D([], [], color='k', label='DoE', marker='o', ls='')],
loc='upper left')
axt.yaxis.set_visible(False)
axt.yaxis.set_ticks([])
ax.set_title('Iteration {} (points: {}, proposals: {})'.format(i + 1, n_sim, len(proposals)))
# prepare proxy-model for sampling
def log_like(x):
proxy.define_weights(x)
return np.sum(np.log(proxy.mean()))
par0 = np.array([(x_min + x_max) / 2.])
mcmc = hamiltonian_mc(log_like, par0, 2000, df='3-point', burn=1000, bounds=bounds_, seed=seed)
k = 10
xp, fp = posterior_ensemble(mcmc, k, alpha=0.3)
# plot sampling of proxy model
ax = axes[-1]
ax.plot(x_true, y_true, color='gray', lw=2, label='True', zorder=2.5)
ax.scatter(xp, np.exp(fp), 20, color='g', label='Posterior Ensemble', zorder=6)
_configure_axes_1D(ax, [x_min, x_max])
ax.set_title('Posterior Samples')
# save figure
fig.savefig(CASE_PATH + 'SBO_1D.png')
def _plot_bayesian_1D():
# example from: https://towardsdatascience.com/a-parallel-implementation-of-bayesian-optimization-2ffcdb2733a2
dim = 1
seed = 1234
# constraint options
use_constraints = True
nl_r = 3.
def obj(x):
return normal_pdf(x, 15., 5.) + normal_pdf(x, 30., 4.) + normal_pdf(x, 40., 5.)
# initial design of experiment
x_doe = np.array([5., 18., 25., 44.])
y_doe = obj(x_doe)
# for illustration of underlying objective function
x_true = np.linspace(0., 50., 500)
y_true = obj(x_true)
# set up optimizer -------------------------------------------------------------------------------------------------
# set bounds
x_min = 0.
x_max = 50.
bounds_ = ((x_min, x_max),)
# define population sampler of MMO using prior probability assumption
prior = UniformDist(x_min, x_max, seed=seed)
def population(N):
return prior.sample((N, dim))
# set up underlying optimization algorithm of the MMO
def opt_core(obj_, x0, bounds, constraints):
return nlprog(obj_, x0, bounds=bounds, constraints=constraints)
# set up proxy model -----------------------------------------------------------------------------------------------
proxy = OrdinaryKrigingModel(x_doe, y_doe, 'gaussian', seed=seed)
theta0 = np.array([10.])
proxy.initialize_ML(theta0)
# prepare figure ---------------------------------------------------------------------------------------------------
loops = 4
r, c = subplot_layout(loops + 2)
fig, axes = plt.subplots(r, c, figsize=(20, 14))
axes = axes.flatten()
# plot true objective function
ax = axes[0]
ax.plot(x_true, y_true, color='gray', lw=2, label='True', zorder=2.5)
ax.scatter(x_doe, y_doe, 20, color='k', label='Initial DoE', zorder=5)
_configure_axes_1D(ax, [x_min, x_max])
ax.set_title('Design of Experiment')
# main optimization loop -------------------------------------------------------------------------------------------
x_all = np.array(x_doe)
y_all = np.array(y_doe)
for i in range(loops):
n_sim = y_all.size
# constraint problem to reduce clustering
if use_constraints:
# update constraints to include all points
def nl_con(x):
return (x - x_all) ** 2.
nl_lb = np.array([nl_r ** 2. for _ in y_all])
nl_ub = np.array([np.inf for _ in y_all])
constraints = NonlinearConstraint(nl_con, lb=nl_lb, ub=nl_ub)
else:
constraints = ()
def opt(obj_):
return mmo(obj_, opt_core, population, dim, bounds=bounds_, constraints=constraints, maxit=5).results
# prepare proxy model function
def model(x):
proxy.define_weights(x)
return proxy.mean(), proxy.std()
# find new simulation parameters based on bayesian proposal
# y_max = np.amax(y_all)
kappa = 3.
proposals = bayesian_proposal(model, opt, acq='UCB', thresh=0.5, args=(kappa,))
# create progress chart
ax = axes[i + 1]
# plot true objective function
ax.plot(x_true, y_true, color='gray', lw=2, label='True', zorder=2.5)
# plot proxy model and +- 2 sigma bounds
proxy.define_weights(x_true)
mu = proxy.mean()
sigma = proxy.std()
ax.plot(x_true, mu, color='m', label='Proxy', zorder=3)
ax.plot(x_true, mu + 2. * sigma, color='m', ls='--', label='95% conf.', zorder=3)
ax.plot(x_true, mu - 2. * sigma, color='m', ls='--', zorder=3)
# plot objective function
y_obj = _upper_confidence_bound(mu, sigma, kappa=kappa)
ax.plot(x_true, y_obj, color='b', label='Bayesian', zorder=4)
# plot all existing sample points
ax.scatter(x_all, y_all, 20, color='k', label='Samples', zorder=5)
if proposals:
# extract proposal points and associated objective value
x_pro = np.block([res.x for res in proposals])
y_pro_obj = np.block([res.f for res in proposals])
# simulate new proposals
y_pro = obj(x_pro)
# plot proposed samples
ax.scatter(x_pro, y_pro_obj, 20, color='g', label='Proposals', zorder=6)
# update proxy-model
proxy.update(x_pro, y_pro)
# add proposals to all samples
x_all = np.append(x_all, x_pro)
y_all = np.append(y_all, y_pro)
# configure figure
_configure_axes_1D(ax, [x_min, x_max])
ax.set_title('Iteration {} (points: {}, proposals: {})'.format(i + 1, n_sim, len(proposals)))
# prepare proxy-model for sampling
def log_like(x):
proxy.define_weights(x)
return np.sum(np.log(proxy.mean()))
par0 = np.array([(x_min + x_max) / 2.])
| |
= bytearray(enc_msg[:-32])
encrypted_data[-1] ^= 1 # Bad padding
enc_msg = bytes(encrypted_data) + hmac_digest(key_m, encrypted_data, _sha256)
with pytest.raises(DecryptionError) as e:
priv.decrypt_message(enc_msg)
assert 'padding' in str(e.value)
enc_msg = bytes.fromhex(
'4249453102e5cde5b5924d745958ba05c87d6d8930c6314481fbdefa02d8f4bafc8a2e1dee7d9c3e9d704'
'8d72c63fc3e7b76f7f0d0b99c9b75ac78af43442e5926ea9fbaab1d4d32d71a4237e432bc2bbf7808fcd3'
)
with pytest.raises(DecryptionError) as e:
priv.decrypt_message(enc_msg)
assert 'inconsistent padding bytes' in str(e.value)
# A valid encrypted message but for the wrong key; should give hmac mismatch
enc_msg = P.add(one).encrypt_message(msg)
with pytest.raises(DecryptionError) as e:
priv.decrypt_message(enc_msg)
assert 'bad HMAC' in str(e.value)
def test_str(self):
p = PrivateKey.from_random()
assert str(p) == sha256(p.to_bytes()).hex()
class TestPublicKey:
@pytest.mark.parametrize("bad_key", (
1,
bytes.fromhex('036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2'),
'g',
))
def test_bad_type(self, bad_key):
with pytest.raises(TypeError):
PublicKey(bad_key, True)
def test_good(self):
pub = PrivateKey.from_random().public_key
PublicKey(pub._public_key, False)
def test_eq(self):
secret = os.urandom(32)
priv = PrivateKey(secret)
pub1 = priv.public_key
pub2 = PublicKey(pub1._public_key, False)
assert pub1 is not pub2
assert pub1 == pub2
assert PrivateKey.from_random().public_key != pub1
# Other types
assert pub1 != 0
assert pub1 != 'foo'
def test_hashable(self):
secret = os.urandom(32)
p1 = PrivateKey(secret, True, Bitcoin)
p2 = PrivateKey(secret, False, BitcoinTestnet)
pub1 = p1.public_key
pub2 = p2.public_key
assert pub1.is_compressed() != pub2.is_compressed()
assert pub1.coin() != pub2.coin()
assert len({pub1, pub2}) == 1
def test_to_bytes(self):
priv = PrivateKey(bytes(range(32)))
pub = priv.public_key
assert pub.to_bytes().hex() == pub.to_bytes(compressed=True).hex() == (
'036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2'
)
assert pub.to_bytes(compressed=False).hex() == (
'046d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e'
'2487e6222a6664e079c8edf7518defd562dbeda1e7593dfd7f0be285880a24dab'
)
def test_from_bytes(self):
priv = PrivateKey.from_random()
for compressed in (False, True):
pub = PublicKey.from_bytes(priv.public_key.to_bytes(compressed=compressed))
assert pub == priv.public_key
assert pub.coin() is Bitcoin
assert pub.is_compressed() is compressed
def test_from_bytes_bad(self):
priv = PrivateKey(bytes(range(32)))
pub = priv.public_key
data = pub.to_bytes(compressed=False)[:-1] + bytes(1)
with pytest.raises(ValueError):
PublicKey.from_bytes(data)
def test_to_hex(self):
priv = PrivateKey(bytes(range(32)))
pub = priv.public_key
assert pub.to_hex() == pub.to_hex(compressed=True) == (
'036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2'
)
assert pub.to_hex(compressed=False) == (
'046d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e'
'2487e6222a6664e079c8edf7518defd562dbeda1e7593dfd7f0be285880a24dab'
)
def test_from_hex(self):
priv = PrivateKey(bytes(range(32)))
pub = priv.public_key
assert pub == PublicKey.from_hex(
'046d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2'
'487e6222a6664e079c8edf7518defd562dbeda1e7593dfd7f0be285880a24dab'
)
assert pub == PublicKey.from_hex(
'036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2'
)
def test_from_hex_bad(self):
with pytest.raises(ValueError):
PublicKey.from_hex(
'046d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2'
'487e6222a6664e079c8edf7518defd562dbeda1e7593dfd7f0be285880a24daa'
)
def test_to_point(self):
priv = PrivateKey(bytes(range(32)))
pub = priv.public_key
assert pub.to_point() == (
49494098513335428077054975730806467664514612540321453185917289738417036617954,
32789825133461354718917898111687295334475538855588308840542926904654395755947
)
def test_from_point(self):
priv = PrivateKey(bytes(range(32)))
pub = priv.public_key
x = 49494098513335428077054975730806467664514612540321453185917289738417036617954
y = 32789825133461354718917898111687295334475538855588308840542926904654395755947
assert PublicKey.from_point(x, y) == pub
def test_from_point_bad(self):
x = 49494098513335428077054975730806467664514612540321453185917289738417036617954
y = 32789825133461354718917898111687295334475538855588308840542926904654395755948
with pytest.raises(ValueError):
PublicKey.from_point(x, y)
def test_to_address(self):
priv = PrivateKey(bytes(range(32)))
pub = priv.public_key
assert pub.to_address() == Address.from_string('<KEY>',
Bitcoin)
assert pub.to_address(compressed=False) == Address.from_string(
'1G9f5Kdd5A8MeBN8jduUNfcAXUVvtFxVhP', Bitcoin)
assert pub.to_address(coin=Bitcoin) == Address.from_string(
'<KEY>', Bitcoin)
assert pub.to_address(coin=Bitcoin, compressed=False) == Address.from_string(
'1G9f5Kdd5A8MeBN8jduUNfcAXUVvtFxVhP', Bitcoin)
assert pub.to_address(coin=BitcoinTestnet, compressed=True) == Address.from_string(
'mm5Yiba1U2odgUskxnXMJGQMV1DSAXVPib', BitcoinTestnet)
assert pub.to_address(coin=BitcoinTestnet, compressed=False) == Address.from_string(
'mvfcNNibtBZcRHqkTCsrCapVPU6dpCoKjp', BitcoinTestnet)
def test_add(self):
priv = PrivateKey.from_random()
value = os.urandom(32)
P = priv.public_key
priv2 = priv.add(value)
P2 = P.add(value)
assert P2 == priv2.public_key
assert P == priv.public_key
@pytest.mark.parametrize("coin,WIF,hex_str,compressed", WIF_tests)
def test_add_preserves_attributes(self, coin, WIF, hex_str, compressed):
P = PrivateKey.from_WIF(WIF).public_key.add(one)
assert P.coin() is coin
assert P.is_compressed() is compressed
def test_add_bad(self):
priv = PrivateKey.from_random()
P = priv.public_key
value = int_to_be_bytes(CURVE_ORDER - priv.to_int())
with pytest.raises(ValueError):
P.add(value)
with pytest.raises(ValueError):
P.add(bytes(33))
with pytest.raises(ValueError):
P.add(b'1' * 31)
with pytest.raises(TypeError):
P.add('')
def test_multiply(self):
priv = PrivateKey.from_random()
value = os.urandom(32)
priv2 = priv.multiply(value)
P = priv.public_key
P2 = P.multiply(value)
assert P2 == priv2.public_key
assert P == priv.public_key
@pytest.mark.parametrize("coin,WIF,hex_str,compressed", WIF_tests)
def test_multiply_preserves_attributes(self, coin, WIF, hex_str, compressed):
P = PrivateKey.from_WIF(WIF).public_key.multiply(one)
assert P.coin() is coin
assert P.is_compressed() is compressed
def test_multiply_bad(self):
priv = PrivateKey.from_random()
P = priv.public_key
with pytest.raises(ValueError):
P.multiply(bytes(32))
with pytest.raises(ValueError):
P.multiply(bytes(33))
with pytest.raises(ValueError):
P.multiply(b'1' * 31)
with pytest.raises(TypeError):
P.multiply('')
def test_combine_keys_none(self):
with pytest.raises(ValueError):
PublicKey.combine_keys([])
def test_combine_keys_self(self):
priv = PrivateKey.from_random()
P = priv.public_key
P2 = PublicKey.combine_keys([P])
assert P is not P2
assert P == P2
priv2 = priv.add(priv._secret)
assert PublicKey.combine_keys([P, P]) == priv2.public_key
@pytest.mark.parametrize("compressed,coin", (
(True, Bitcoin), (False, Bitcoin), (True, BitcoinTestnet), (False, BitcoinTestnet),
))
def test_combine_keys(self, compressed, coin):
priv_keys = [PrivateKey.from_random() for n in range(5)]
priv_keys[0]._compressed = compressed
priv_keys[0]._coin = coin
pub_keys = [priv_key.public_key for priv_key in priv_keys]
pk = priv_keys[0]
for n in range(1, len(priv_keys)):
pk = pk.add(priv_keys[n]._secret)
combined = PublicKey.combine_keys(pub_keys)
assert pk.public_key == combined
assert combined.coin() is coin
assert combined.is_compressed() is compressed
def test_combine_keys_bad(self):
priv = PrivateKey.from_random()
priv2 = PrivateKey(int_to_be_bytes(CURVE_ORDER - priv.to_int()))
with pytest.raises(ValueError):
PublicKey.combine_keys([priv.public_key, priv2.public_key])
def test_combine_keys_bad_intermediate(self):
priv = PrivateKey.from_random()
priv2 = PrivateKey(int_to_be_bytes(CURVE_ORDER - priv.to_int()))
# But combining with bad intermediate result but good final is fine
P = PublicKey.combine_keys([priv.public_key, priv2.public_key, priv.public_key])
assert P == priv.public_key
def test_verify_der_signature(self):
priv = PrivateKey.from_random()
message = b'BitcoinSV'
sig = priv.sign(message)
P = priv.public_key
assert P.verify_der_signature(sig, message)
assert P.verify_der_signature(sig, message, sha256)
assert P.verify_der_signature(sig, sha256(message), None)
assert not P.verify_der_signature(sig, message[:-1])
def test_verify_der_signature_bad(self):
priv = PrivateKey.from_random()
message = b'BitcoinSV'
sig_der = priv.sign(message)
sig_rec = priv.sign_recoverable(message)
P = priv.public_key
with pytest.raises(InvalidSignatureError):
P.verify_der_signature(sig_rec, message)
for n in (10, 20, 30, 40):
bad_der = bytearray(sig_der)
bad_der[n] ^= 0x10
try:
assert not P.verify_der_signature(bytes(bad_der), message)
except InvalidSignatureError:
pass
def test_verify_recoverable_signature(self):
priv = PrivateKey.from_random()
message = b'BitcoinSV'
sig = priv.sign_recoverable(message)
P = priv.public_key
assert P.verify_recoverable_signature(sig, message)
assert P.verify_recoverable_signature(sig, message, sha256)
assert P.verify_recoverable_signature(sig, sha256(message), None)
assert not P.verify_recoverable_signature(sig, message[:-1])
def test_verify_recoverable_signature_bad(self):
priv = PrivateKey.from_random()
message = b'BitcoinSV'
sig = priv.sign(message)
P = priv.public_key
# Bad recid
bad_sig = bytes([0x01]) * 64 + bytes([4])
with pytest.raises(InvalidSignatureError):
P.verify_recoverable_signature(bad_sig, message)
# Overflow
bad_sig = bytes([0xff]) * 64 + bytes([1])
with pytest.raises(InvalidSignatureError):
P.verify_recoverable_signature(bad_sig, message)
# Invalid sig
bad_sig = bytes([sig[0] ^ 1]) + sig[1:]
with pytest.raises(InvalidSignatureError):
P.verify_recoverable_signature(bad_sig, message)
def test_from_recoverable_signature(self):
priv = PrivateKey.from_random()
message = b'BitcoinSV'
rec_sig = priv.sign_recoverable(message)
pub = PublicKey.from_recoverable_signature(rec_sig, message)
assert priv.public_key == pub
assert pub.coin() is Bitcoin
def test_from_recoverable_signature_bad(self):
message = b'BitcoinSV'
with pytest.raises(InvalidSignatureError):
PublicKey.from_recoverable_signature(b'1' * 64, message)
def test_from_recoverable_signature_bad_r(self):
priv = PrivateKey.from_random()
message = b'BitcoinSV'
rec_sig = priv.sign_recoverable(message)
bad_sig = bytes(32) + rec_sig[32:]
with pytest.raises(InvalidSignatureError):
PublicKey.from_recoverable_signature(bad_sig, message)
def test_from_signed_message(self):
priv = PrivateKey.from_random()
P = priv.public_key
msg = b'BitcoinSV'
msg_sig = priv.sign_message(msg)
P2 = PublicKey.from_signed_message(msg_sig, msg)
assert P == P2
assert P2.coin() is Bitcoin
def test_from_signed_message_base64(self):
priv = PrivateKey.from_random()
message = b'BitcoinSV'
message_sig = priv.sign_message(message)
P1 = PublicKey.from_signed_message(message_sig, message)
P2 = PublicKey.from_signed_message(b64encode(message_sig).decode(), message)
assert P1 == P2 == priv.public_key
with pytest.raises(InvalidSignatureError):
PublicKey.from_signed_message('abcd%', message)
def test_from_signed_message_no_hasher(self):
priv = PrivateKey.from_random()
message = bytes(32)
message_sig = priv.sign_message(message)
PublicKey.from_signed_message(message_sig, message)
with pytest.raises(ValueError):
PublicKey.from_signed_message(message_sig, message, None)
@pytest.mark.parametrize("msg", (
b'BitcoinSV', 'BitcoinSV',
# Test longer messages are prefix-encoded correctly
b'BitcoinSV * 100',
))
def test_verify_message(self, msg):
priv = PrivateKey.from_random()
P = priv.public_key
address_comp = P.to_address()
address_uncomp = P.to_address(compressed=False)
assert address_comp != address_uncomp
base_msg = b'BitcoinSV'
msg_sig = priv.sign_message(msg)
msg_sig2 = bytearray(msg_sig)
msg_sig2[3] ^= 79
msg_sig2 = bytes(msg_sig2)
assert P.verify_message(msg_sig, msg)
assert PublicKey.verify_message_and_address(msg_sig, msg, address_comp)
assert PublicKey.verify_message_and_address(msg_sig, msg, address_uncomp)
msg_sig = priv.sign_message_to_base64(msg)
assert P.verify_message(msg_sig, msg)
assert PublicKey.verify_message_and_address(msg_sig, msg, address_comp)
assert PublicKey.verify_message_and_address(msg_sig, msg, address_uncomp)
assert not PublicKey.verify_message_and_address(msg_sig, msg,
'1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa')
assert not P.verify_message(msg_sig2, msg)
assert not PublicKey.verify_message_and_address(msg_sig2, msg, address_comp)
assert not PublicKey.verify_message_and_address(msg_sig2, msg, address_uncomp)
def test_verify_message_no_hasher(self):
priv = PrivateKey.from_random()
message = bytes(32)
message_sig = priv.sign_message(message)
priv.public_key.verify_message(message_sig, message)
with pytest.raises(ValueError):
priv.public_key.verify_message(message_sig, message, hasher=None)
def test_verify_message_sig_base64(self):
priv = PrivateKey.from_random()
P = priv.public_key
message = bytes(32)
message_sig = priv.sign_message(message)
P.verify_message(message_sig, message)
P.verify_message(b64encode(message_sig).decode(), message)
with pytest.raises(InvalidSignatureError):
P.verify_message('abcd%', message)
def test_verify_message_and_address_coin(self):
priv = PrivateKey.from_random()
P = priv.public_key
msg = b'BitcoinSV'
msg_sig = priv.sign_message(msg)
assert P.verify_message_and_address(msg_sig, msg, P.to_address(coin=Bitcoin))
assert P.verify_message_and_address(msg_sig, msg, P.to_address(coin=BitcoinTestnet))
def test_verify_message_and_address_bad(self):
priv1 = PrivateKey.from_random()
priv2 = PrivateKey.from_random()
P1, P2 = priv1.public_key, priv2.public_key
msg = b'BitcoinSV'
msg_sig = priv1.sign_message(msg)
assert P1.verify_message_and_address(msg_sig, msg, P1.to_address())
assert not P1.verify_message_and_address(msg_sig, msg, P2.to_address())
with pytest.raises(TypeError):
P1.verify_message_and_address(msg_sig, msg, b'foobar')
@pytest.mark.parametrize("hasher", (double_sha256, sha256))
def test_verify_message_and_address_hasher(self, hasher):
priv = PrivateKey.from_random()
P = priv.public_key
msg = b'BitcoinSV'
msg_sig = priv.sign_message(msg, hasher=hasher)
assert P.verify_message_and_address(msg_sig, msg, P.to_address(), hasher=hasher)
def test_verify_message_bad(self):
priv = PrivateKey.from_random()
P = priv.public_key
msg = b'BitcoinSV'
msg_sig = priv.sign_message(msg)
with pytest.raises(InvalidSignatureError):
P.verify_message(b'bar', msg)
with pytest.raises(InvalidSignatureError):
P.verify_message(msg_sig[:-1], msg)
msg_sig = bytearray(msg_sig)
for n in (26, 35):
msg_sig[0] = n
with pytest.raises(InvalidSignatureError):
P.verify_message(bytes(msg_sig), msg)
def test_encrypt_message_and_to_base64(self, AES_impl):
bmsg = b'BitcoinSV'
# Test both compressed and uncompressed pubkeys
for msg in (bmsg, bmsg.decode()):
for compressed in (False, True):
priv = PrivateKey.from_random()
priv._compressed = compressed
P = priv.public_key
enc_msg = P.encrypt_message(msg)
assert isinstance(enc_msg, bytes)
assert priv.decrypt_message(enc_msg) == bmsg
# This tests the default magic of both functions is b'BIE1'
assert priv.decrypt_message(enc_msg, magic=b'BIE1') == bmsg
# Now base64
enc_msg = P.encrypt_message_to_base64(msg)
assert isinstance(enc_msg, str)
assert priv.decrypt_message(enc_msg) == bmsg
# This tests the default magic of both functions is b'BIE1'
assert priv.decrypt_message(enc_msg, magic=b'BIE1') == bmsg
def test_encrypt_message_magic(self, AES_impl):
priv = PrivateKey.from_random()
P | |
RichardHufford"
player = axl.SecondByRichardHufford
expected_classifier = {
"memory_depth": float("inf"),
"stochastic": False,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def test_strategy(self):
actions = [(C, C)] * 19 + [(D, C), (C, C), (C, C)]
self.versus_test(
axl.Cooperator(), expected_actions=actions, attrs={"streak_needed": 14}
)
actions = [(C, C)] * 19 + [(D, C), (C, C)]
actions += [
(C, C)
] # This is the first Cooperation that gets counted on the new streak
actions += [(C, C)] * 13 + [(D, C), (C, C), (C, C)]
self.versus_test(
axl.Cooperator(), expected_actions=actions, attrs={"streak_needed": 11}
)
opponent_actions = [C] * 20 + [D]
BoredCooperator = axl.MockPlayer(actions=opponent_actions)
actions = [(C, C)] * 19 + [(D, C), (C, D), (C, C)]
self.versus_test(
BoredCooperator, expected_actions=actions, attrs={"streak_needed": 31}
)
actions = [(C, D)] # "Disagreement"
actions += [(D, C)] # TFT. Disagreement
actions += [(C, C)] # TFT.
actions += [(C, D)] # TFT. Disagreement
actions += [(D, C)] # Three of last four are disagreements.
actions += [(C, C)] # TFT. Disagreement
actions += [(D, D)] # Three of last four are disagreements. Disagreement
actions += [(D, D)] # Three of last four are disagreements.
actions += [(D, D)] # Now there are 5/9 disagreements, so Defect.
self.versus_test(
axl.WinShiftLoseStay(),
expected_actions=actions,
attrs={"num_agreements": 5},
)
class TestYamachi(TestPlayer):
name = "<NAME>"
player = axl.SecondByYamachi
expected_classifier = {
"memory_depth": float("inf"),
"stochastic": False,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def test_strategy(self):
actions = [(C, C)] * 100
self.versus_test(axl.Cooperator(), expected_actions=actions)
actions = [
(C, D)
] * 2 # Also Cooperate in first two moves (until we update `count_them_us_them`.)
actions += [
(C, D)
] # them_three_ago defaults to C, so that (C, C, *) gets updated, then (D, C, *) get checked.
# It's actually impossible to Defect on the third move.
actions += [(D, D)] # (D, C, *) gets updated, then checked.
actions += [(C, D)] # (D, C, *) gets updated, but (D, D, *) checked.
actions += [(D, D)] * 30 # (D, D, *) gets updated and checked from here on.
self.versus_test(axl.Defector(), expected_actions=actions)
actions = [(C, C), (C, D)]
actions += [(C, C)] # Increment (C, C, C). Check (C, C, *). Cooperate.
# Reminder that first C is default value and last C is opponent's first move.
actions += [(C, D)] # Increment (C, C, D). Check (D, C, *) = 0. Cooperate.
actions += [(C, C)] # Increment (D, C, C). Check (C, C, *) = 0. Cooperate.
# There is one Defection and one Cooperation in this scenario,
# but the Cooperation was due to a default value only. We can see where this is going.
actions += [(C, D)] # Increment (C, C, D). Check (D, C, *) = 1. Cooperate.
actions += [(D, C)] # Increment (D, C, C). Check (C, C, *) = -1. Defect.
actions += [
(C, D)
] # Increment (C, C, D). Check (D, D, *) = 0 (New). Cooperate.
actions += [(D, C)] # Increment (D, D, C). Check (C, C, *) < 0. Defect.
actions += [(C, D)] # Increment (C, C, D). Check (D, D, *) > 0. Cooperate.
actions += [(D, C), (C, D)] * 15 # This pattern continues for a while.
actions += [
(D, C),
(D, D),
] * 30 # Defect from turn 41 on, since near 50% Defections.
self.versus_test(axl.Alternator(), expected_actions=actions)
# Rip-off is the most interesting interaction.
actions = [
(C, D),
(C, C),
(C, D),
(D, C),
(C, C),
(D, C),
(C, D),
(D, C),
(C, D),
(D, C),
(C, D),
(D, C),
(C, D),
(D, C),
(C, D),
(D, C),
(C, D),
(D, C),
(C, D),
(D, C),
(C, D),
(D, C),
(C, D),
(D, C),
(C, D),
(D, C),
(C, D),
(D, C),
(C, D),
(D, C),
(C, D),
(D, C),
(C, D),
(D, C),
(C, D),
(D, C),
(C, D),
(D, C),
(C, D),
(D, C),
]
my_dict = {
(C, C, C): 1,
(C, C, D): 18,
(C, D, C): 1,
(C, D, D): 0,
(D, C, C): 1,
(D, C, D): 0,
(D, D, C): 17,
(D, D, D): 0,
}
RipoffPlayer = axl.Ripoff()
self.versus_test(
RipoffPlayer,
expected_actions=actions,
attrs={"count_them_us_them": my_dict},
)
self.assertEqual(
RipoffPlayer.defections, 19
) # Next turn, `portion_defect` = 0.4756
# The pattern (C, D), (D, C) will continue indefintely unless overriden.
actions += [(D, D)] # Next turn, `portion_defect` = 0.4881
actions += [(D, D)] # Next turn, `portion_defect` = 0.5
actions += [(D, D)] # Next turn, `portion_defect` = 0.5114
actions += [(D, D)] # Next turn, `portion_defect` = 0.5222
actions += [(D, D)] # Next turn, `portion_defect` = 0.5326
actions += [(D, D)] # Next turn, `portion_defect` = 0.5426
actions += [(D, D)] # Next turn, `portion_defect` = 0.5521
actions += [
(D, D),
(C, D),
(D, C),
(C, D),
] # Takes a turn to fall back into the cycle.
self.versus_test(axl.Ripoff(), expected_actions=actions)
class TestColbert(TestPlayer):
name = "<NAME> Colbert"
player = axl.SecondByColbert
expected_classifier = {
"memory_depth": 4,
"stochastic": False,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def test_strategy(self):
actions = [(C, C)] * 5 + [(D, C)] + [(C, C)] * 30
self.versus_test(axl.Cooperator(), expected_actions=actions)
actions = [(C, D)] * 5 + [(D, D)] + [(C, D)] * 2
actions += [(D, D), (D, D), (C, D), (C, D)] * 20
self.versus_test(axl.Defector(), expected_actions=actions)
opponent_actions = [C] * 8 + [C, C, D, C, C, C, C, C]
OddBall = axl.MockPlayer(actions=opponent_actions)
actions = [(C, C)] * 5 + [(D, C)] + [(C, C)] * 4
actions += [(C, D)] + [(D, C), (D, C), (C, C), (C, C)] + [(C, C)]
self.versus_test(OddBall, expected_actions=actions)
class TestMikkelson(TestPlayer):
name = "<NAME>"
player = axl.SecondByMikkelson
expected_classifier = {
"memory_depth": float("inf"),
"stochastic": False,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def test_strategy(self):
actions = [(C, C)] * 30
self.versus_test(
axl.Cooperator(), expected_actions=actions, attrs={"credit": 8}
)
actions = [(C, D), (C, D), (C, D), (C, D)]
self.versus_test(
axl.Defector(), expected_actions=actions, attrs={"credit": 1}
)
# Defect then reset to 4
actions += [(D, D)]
self.versus_test(
axl.Defector(), expected_actions=actions, attrs={"credit": 4}
)
# Repeat
actions += [(C, D), (D, D)] * 2
self.versus_test(
axl.Defector(), expected_actions=actions, attrs={"credit": 4}
)
# With ten turns passed, keep defecting now
actions += [(C, D), (D, D)]
self.versus_test(
axl.Defector(), expected_actions=actions, attrs={"credit": 0}
)
# With ten turns passed, keep defecting now
actions += [(D, D)] * 30
self.versus_test(
axl.Defector(), expected_actions=actions, attrs={"credit": -7}
)
actions = [(C, D), (C, D), (C, C)]
self.versus_test(
axl.Cycler("DDC"), expected_actions=actions, attrs={"credit": 3}
)
actions += [(C, D), (C, D)]
self.versus_test(
axl.Cycler("DDC"), expected_actions=actions, attrs={"credit": 2}
)
actions += [(D, C)]
self.versus_test(
axl.Cycler("DDC"), expected_actions=actions, attrs={"credit": 4}
)
actions += [(C, D)]
self.versus_test(
axl.Cycler("DDC"), expected_actions=actions, attrs={"credit": 5}
)
actions += [(C, D)]
self.versus_test(
axl.Cycler("DDC"), expected_actions=actions, attrs={"credit": 3}
)
opponent_actions = [C] * 100 + [D] * 10
Change_of_Heart = axl.MockPlayer(actions=opponent_actions)
actions = [(C, C)] * 100 + [(C, D)] * 4
self.versus_test(Change_of_Heart, expected_actions=actions, attrs={"credit": 2})
Change_of_Heart = axl.MockPlayer(actions=opponent_actions)
actions += [(C, D)] * 2
self.versus_test(
Change_of_Heart, expected_actions=actions, attrs={"credit": -2}
)
# Still Cooperate, because Defect rate is low
class TestRowsam(TestPlayer):
name = "<NAME>"
player = axl.SecondByRowsam
expected_classifier = {
"memory_depth": float("inf"),
"stochastic": False,
"makes_use_of": set("game"),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def test_strategy(self):
# Should always cooperate with Cooperator
actions = [(C, C)] * 100
self.versus_test(axl.Cooperator(), expected_actions=actions)
# Against a Defector should eventually enter Defect mode
actions = [(C, D)] * 5
actions += [(D, D), (C, D), (D, D)] # Do a Coop-Def cycle
self.versus_test(axl.Defector(), expected_actions=actions, attrs={
"distrust_points": 5})
actions += [(C, D)] * 3 # Continue for now
actions += [(D, D)] * 100 # Now Defect mode
self.versus_test(axl.Defector(), expected_actions=actions, attrs={
"distrust_points": 10, "mode": "Defect"})
# Test specific score scenarios
# 5 Defects
opponent_actions = [D] * 5 + [C] * 100
| |
map["tiles"][0]["img"])
):
if os.path.exists(
os.path.splitext(
urllib.parse.unquote(map["img"] or map["tiles"][0]["img"])
)[0]
+ args.jpeg
):
map["img"] = os.path.splitext(map["img"])[0] + args.jpeg
elif os.path.exists(
os.path.splitext(
urllib.parse.unquote(map["img"] or map["tiles"][0]["img"])
)[0]
+ ".jpg"
):
map["img"] = os.path.splitext(map["img"])[0] + ".jpg"
if os.path.exists(map["img"]):
randomok = True
if args.gui:
worker.outputLog("Generating cover image")
print("\rGenerating cover image", file=sys.stderr, end="")
if randomok:
with PIL.Image.open(map["img"] or map["tiles"][0]["img"]) as img:
if img.width <= img.height:
img = img.crop((0, 0, img.width, img.width))
else:
img = img.crop((0, 0, img.height, img.height))
if img.width > 1024:
img = img.resize((1024, 1024))
if args.jpeg == ".jpg" and img.mode in ("RGBA", "P"):
img = img.convert("RGB")
img.save(os.path.join(tempdir, "module_cover" + args.jpeg))
modimage.text = "module_cover" + args.jpeg
# write to file
sys.stderr.write("\033[K")
if args.gui:
worker.updateProgress(70)
if args.packdir:
worker.outputLog("Generating pack.xml")
else:
worker.outputLog("Generating module.xml")
print("\rWriting XML", file=sys.stderr, end="")
tree = ET.ElementTree(indent(module, 1))
tree.write(
os.path.join(
packdir if args.packdir else tempdir,
"pack.xml" if args.packdir else "module.xml",
),
xml_declaration=True,
short_empty_elements=False,
encoding="utf-8",
)
if "styles" in mod:
if not os.path.exists(os.path.join(tempdir, "assets")):
os.mkdir(os.path.join(tempdir, "assets"))
if not os.path.exists(os.path.join(tempdir, "assets", "css")):
os.mkdir(os.path.join(tempdir, "assets", "css"))
for style in mod["styles"]:
if os.path.exists(os.path.join(moduletmp, mod["name"], style)):
with open(
os.path.join(tempdir, "assets", "css", "custom.css"), "a"
) as f:
with open(os.path.join(moduletmp, mod["name"], style)) as css:
for l in css:
f.write(l)
if os.path.exists(os.path.join(moduletmp, mod["name"], "fonts")):
os.rename(
os.path.join(moduletmp, mod["name"], "fonts"),
os.path.join(tempdir, "assets", "fonts"),
)
if args.compendium and (len(items) + len(actors)) > 0:
if args.gui:
worker.updateProgress(75)
worker.outputLog("Generating compendium data")
def fixHTMLContent(text):
text = re.sub(
r'<a(.*?)data-entity="?(.*?)"? (.*?)data-id="?(.*?)"?( .*?)?>',
fixLink,
text,
)
text = re.sub(r"@(.*?)\[(.*?)\](?:\{(.*?)\})?", fixFTag, text)
text = re.sub(r"<h([0-9]).*?>(.*?)</h\1>", r"<b>\2</b>\n", text)
text = re.sub(r"<em.*?>(.*?)</em>", r"<i>\1</i>", text)
text = re.sub(r"<strong.*?>(.*?)</strong>", r"<b>\1</b>", text)
text = re.sub(
r"<blockquote.*?>(.*?)</blockquote>",
r"-------------\n\1-------------\n",
text,
)
text = re.sub(
r'<img(.*?)src="?(.*?)"?( .*?)?>', r'<a\1href="\2"\3>Image</a>', text
)
text = re.sub(r"<tr.*?><td.*?>(.*?)</td>", r"\1", text)
text = re.sub(r"<td.*?>(.*?)</td>", r" | \1", text)
text = re.sub(r"</tr>", "\n", text)
text = re.sub(r"</?p.*?>", "", text)
text = re.sub(r"<br.*?>", "\n", text)
text = re.sub(r"<hr.*?>", "------------------------\n", text)
text = re.sub(r"<!--.*?-->", "", text)
text = re.sub(
r"\[\[(?:/(?:gm)?r(?:oll)? )?(.*?)(?: ?# ?(.*?))?\]\]", fixRoll, text
)
text = re.sub(
r"<section .*?class=.secret..*?>(.*?)</section>.*",
r"\1",
text,
flags=re.S,
)
return html.unescape(text.strip())
compendium = ET.Element("compendium")
os.mkdir(os.path.join(tempdir, "items"))
os.mkdir(os.path.join(tempdir, "spells"))
os.mkdir(os.path.join(tempdir, "monsters"))
itemnumber = 0
for i in items:
itemnumber += 1
if args.gui:
worker.updateProgress(
75 + (itemnumber / (len(items) + len(actors))) * 10
)
print(
"\rGenerating compendium [{}/{}]".format(
itemnumber, len(items) + len(actors)
),
file=sys.stderr,
end="",
)
if i["type"] in ["feat"]:
continue
d = i["data"]
if i["type"] == "spell":
spell = ET.SubElement(
compendium, "spell", {"id": str(uuid.uuid5(moduuid, i["_id"]))}
)
ET.SubElement(spell, "name").text = i["name"]
ET.SubElement(spell, "slug").text = slugify(i["name"])
ET.SubElement(spell, "level").text = str(d["level"])
ET.SubElement(spell, "school").text = (
schools[d["school"]] if d["school"] in schools else d["school"]
)
ET.SubElement(spell, "ritual").text = (
"YES" if d["components"]["ritual"] else "NO"
)
ET.SubElement(spell, "time").text = "{} {}".format(
d["activation"]["cost"], d["activation"]["type"]
)
ET.SubElement(spell, "range").text = "{} {}".format(
"{}/{}".format(d["range"]["value"], d["range"]["long"])
if d["range"]["long"]
else d["range"]["value"],
d["range"]["units"]
)
components = []
for component in d["components"].keys():
if component in ["value", "ritual", "concentration"]:
continue
elif d["components"][component]:
comp = component[0].upper()
if (
comp == "M"
and "value" in d["materials"]
and d["materials"]["value"]
):
if d["materials"]["consumed"]:
d["materials"]["value"] += ", which the spell consumes"
comp += " ({})".format(d["materials"]["value"])
components.append(comp)
ET.SubElement(spell, "components").text = ",".join(components)
ET.SubElement(spell, "duration").text = (
("Concentration" if d["components"]["concentration"] else "")
+ "Instantaneous"
if d["duration"]["units"] == "inst"
else "{} {}".format(d["duration"]["value"], d["duration"]["units"])
)
ET.SubElement(spell, "source").text = d["source"]
ET.SubElement(spell, "text").text = (
d["description"]["value"] + "\n<i>Source: " + d["source"] + "</i>"
)
continue
item = ET.SubElement(
compendium, "item", {"id": str(uuid.uuid5(moduuid, i["_id"]))}
)
ET.SubElement(item, "name").text = i["name"]
ET.SubElement(item, "slug").text = slugify(i["name"])
if "weight" in d and d["weight"]:
ET.SubElement(item, "weight").text = str(d["weight"])
if "rarity" in d and d["rarity"]:
ET.SubElement(item, "rarity").text = d["rarity"].title()
if "price" in d and d["price"]:
value = ET.SubElement(item, "value")
if d["price"] >= 100:
value.text = "{:g} gp".format(d["price"] / 100)
elif d["price"] >= 10:
value.text = "{:g} sp".format(d["price"] / 10)
else:
value.text = "{:g} cp".format(d["price"])
if i["type"] in ["consumable"]:
if d["consumableType"] == "potion":
ET.SubElement(item, "type").text = "P"
elif d["consumableType"] == "wand":
ET.SubElement(item, "type").text = "WD"
elif d["consumableType"] == "scroll":
ET.SubElement(item, "type").text = "SC"
elif d["consumableType"] in ["food", "trinket"]:
ET.SubElement(item, "type").text = "G"
elif d["consumableType"] == "ammo":
ET.SubElement(item, "type").text = "A"
else:
print("Dont know consumable:", d["consumableType"])
ET.SubElement(item, "type").text = "G"
elif i["type"] in ["equipment"]:
if d["armor"]["type"] in ["clothing", "light"]:
ET.SubElement(item, "type").text = "LA"
elif d["armor"]["type"] in ["medium"]:
ET.SubElement(item, "type").text = "MA"
elif d["armor"]["type"] in ["heavy"]:
ET.SubElement(item, "type").text = "HA"
elif d["armor"]["type"] in ["shield"]:
ET.SubElement(item, "type").text = "S"
elif d["armor"]["type"] in ["trinket"]:
ET.SubElement(item, "type").text = "G"
else:
print("Dont know armor type:", d["armor"]["type"])
ET.SubElement(item, "type").text = "AA"
if d["armor"]["value"]:
ET.SubElement(item, "ac").text = str(d["armor"]["value"])
elif i["type"] == "weapon":
if d["weaponType"] in ["simpleR", "martialR"]:
ET.SubElement(item, "type").text = "R"
elif d["weaponType"] in ["simpleM", "martialM"]:
ET.SubElement(item, "type").text = "M"
elif "staff" in d and d["staff"]:
ET.SubElement(item, "type").text = "ST"
else:
if d["weaponType"] not in ["natural"]:
print("Dont know weapon:", d["weaponType"])
ET.SubElement(item, "type").text = "WW"
props = []
for prop in d["properties"].keys():
if not d["properties"][prop]:
continue
if prop == "amm":
props.append("A")
if prop == "fin":
props.append("F")
if prop == "hvy":
props.append("H")
if prop == "lgt":
props.append("L")
if prop == "lod":
props.append("LD")
if prop == "rch":
props.append("R")
if prop == "spc":
props.append("S")
if prop == "thr":
props.append("T")
if prop == "two":
props.append("2H")
if prop == "ver":
props.append("V")
ET.SubElement(item, "property").text = ",".join(props)
if d["damage"]["parts"]:
ET.SubElement(item, "dmg1").text = re.sub(
r"[ ]?\+[ ]?@mod", r"", d["damage"]["parts"][0][0], re.I
)
if d["damage"]["parts"][0][1]:
ET.SubElement(item, "dmgType").text = d["damage"]["parts"][0][
1
][0].upper()
if d["damage"]["versatile"]:
ET.SubElement(item, "dmg2").text = re.sub(
r"\[\[a-z]*\]?[ ]?\+[ ]?(@mod)?", r"", d["damage"]["versatile"], re.I
)
if "range" in d:
ET.SubElement(item, "range").text = "{} {}".format(
"{}/{}".format(d["range"]["value"], d["range"]["long"])
if d["range"]["long"]
else d["range"]["value"],
d["range"]["units"]
)
elif i["type"] in ["loot", "backpack", "tool"]:
ET.SubElement(item, "type").text = "G"
else:
print("Dont know item type", i["type"])
ET.SubElement(item, "text").text = fixHTMLContent(d["description"]["value"])
if i["img"]:
i["img"] = urllib.parse.unquote(i["img"])
if i["img"] and os.path.exists(i["img"]):
ET.SubElement(item, "image").text = (
slugify(i["name"]) + "_" + os.path.basename(i["img"])
)
shutil.copy(
i["img"],
os.path.join(
tempdir,
"items",
slugify(i["name"]) + "_" + os.path.basename(i["img"]),
),
)
for a in actors:
itemnumber += 1
if args.gui:
worker.updateProgress(
75 + (itemnumber / (len(items) + len(actors))) * 10
)
print(
"\rGenerating compendium [{}/{}]".format(
itemnumber, len(items) + len(actors)
),
file=sys.stderr,
end="",
)
monster = ET.SubElement(
compendium, "monster", {"id": str(uuid.uuid5(moduuid, a["_id"]))}
)
d = a["data"]
ET.SubElement(monster, "name").text = a["name"]
ET.SubElement(monster, "slug").text = slugify(a["name"])
ET.SubElement(monster, "size").text = d["traits"]["size"][0].upper()
if "type" in d["details"]:
if type(d["details"]["type"]) == dict:
monstertype = d["details"]["type"]["value"]
if d["details"]["type"]["swarm"]:
monstertype = "swarm of {} {}s".format(d["details"]["type"]["swarm"].title(), monstertype)
if d["details"]["type"]["subtype"]:
monstertype += " ({})".format(d["details"]["type"]["subtype"])
ET.SubElement(monster, "type").text = d["details"]["type"]["custom"] or monstertype
else:
ET.SubElement(monster, "type").text = d["details"]["type"]
if "alignment" in d["details"]:
ET.SubElement(monster, "alignment").text = d["details"]["alignment"]
ET.SubElement(monster, "ac").text = str(d["attributes"]["ac"]["value"] if "value" in d["attributes"]["ac"] else d["attributes"]["ac"]["flat"])
if "formula" in d["attributes"]["hp"] and d["attributes"]["hp"]["formula"]:
ET.SubElement(monster, "hp").text = "{} ({})".format(
d["attributes"]["hp"]["value"], d["attributes"]["hp"]["formula"]
)
else:
ET.SubElement(monster, "hp").text = "{}".format(
d["attributes"]["hp"]["value"]
)
if "speed" in d["attributes"] and "_deprecated" not in d["attributes"]["speed"]:
if d["attributes"]["speed"]["special"]:
ET.SubElement(monster, "speed").text = (
d["attributes"]["speed"]["value"]
+ ", "
+ d["attributes"]["speed"]["special"]
)
else:
ET.SubElement(monster, "speed").text = d["attributes"]["speed"][
"value"
]
elif "movement" in d["attributes"]:
speed = []
m = d["attributes"]["movement"]
for k, v in m.items():
if not m[k]:
continue
if k == "walk":
speed.insert(0, "{} {}".format(m[k], m["units"]))
elif k != "units":
speed.append("{} {} {}".format(k, m[k], m["units"]))
ET.SubElement(monster, "speed").text = ", ".join(speed)
ET.SubElement(monster, "str").text = str(d["abilities"]["str"]["value"])
ET.SubElement(monster, "dex").text = str(d["abilities"]["dex"]["value"])
ET.SubElement(monster, "con").text = str(d["abilities"]["con"]["value"])
ET.SubElement(monster, "int").text = str(d["abilities"]["int"]["value"])
ET.SubElement(monster, "wis").text = str(d["abilities"]["wis"]["value"])
ET.SubElement(monster, "cha").text = str(d["abilities"]["cha"]["value"])
ET.SubElement(monster, "save").text = ", ".join(
[
"{} {:+d}".format(k.title(), v["save"])
for (k, v) in d["abilities"].items()
if "save" in v and (v["save"] != v["mod"] and v["proficient"])
]
)
ET.SubElement(monster, "skill").text = ", ".join(
[
"{} {:+d}".format(
skills[k],
v["total"]
if "total" in v
else v["mod"] + v["prof"]
if "prof" in v
else v["mod"],
)
for (k, v) in d["skills"].items()
if ("total" in v and v["mod"] != v["total"])
or (
"mod" in d["abilities"][v["ability"]] and "mod" in v
and v["mod"] != d["abilities"][v["ability"]]["mod"]
)
]
) if "skills" in d else ""
ET.SubElement(monster, "immune").text = "; ".join(
d["traits"]["di"]["value"]
) + (
" {}".format(d["traits"]["di"]["special"])
if "special" in d["traits"]["di"] and d["traits"]["di"]["special"]
else ""
)
ET.SubElement(monster, "vulnerable").text = "; ".join(
d["traits"]["dv"]["value"]
) + (
" {}".format(d["traits"]["dv"]["special"])
if "special" in d["traits"]["dv"] and d["traits"]["dv"]["special"]
else ""
)
ET.SubElement(monster, "resist").text = | |
table
Description: The table name to use
Type: str
########
Outputs
########
Variable: data
Description: fetched maximum of chosen_key in odbc query
Type: depend on column
"""
warnings.warn("get_min is deprecated, please use SQL in original", DeprecationWarning)
if key==None:
key=self.ts_chosen_key
if database==None:
database=self.database
if table==None:
table=self.ts_table
execution_string = "SELECT MIN({}) AS {} FROM {}.{};".format(
key, key, database, table)
max_data = pd.read_sql(execution_string, con=self.connection).iloc[0,0]
return max_data
def get_max_number_data(self, start, format_string,
database=None, table=None,
key=None):
"""
get the maximal number of rows in database
:param table_name: str : name of table
:param key: str : name of chosen key column
:param date_start:
:param date_end:
:param format_string:
:return:
"""
if database==None:
database=self.ts_database
if table==None:
table=self.ts_table
if key==None:
key=self.ts_chosen_key
if isinstance(start, str):
start = datetime.datetime.strptime(start, format_string)
if isinstance(start, datetime.datetime):
execution_string = "select count(*) from {}.{} where {} >= '{}';".format(
database, table,
key, start.strftime(format_string))
else:
execution_string = "select count(*) from {}.{} where {} >= {};".format(
database, table,
key, start)
number_of_data = pd.read_sql(execution_string, con=self.connection).iloc[0,0]
return number_of_data
def get_min_max(self, key, table):
"""get min and max of key"""
warnings.warn("get_min_max is deprecated, please use SQL in original", DeprecationWarning)
execution_string = "SELECT MIN({}) FROM {}".format(key, table)
minimum = self.execute(execution_string).fetchone()[0]
execution_string = "SELECT MAX({}) FROM {}".format(key, table)
maximum = self.execute(execution_string).fetchone()[0]
return minimum, maximum
def get_last_value_before(self, ids=None,
time_start=None,
time_end=None,
ts_database=None,
ts_table=None,
ts_time_column=None,
ts_value_column=None,
ts_itemid_column=None,
ts_quality_id_column=None,
ts_quality_id=None,
not_null=True,#not used(only for copy+paste)
sort_by=None,
sort_order=None,
additional_sql=None):
"""
function: get_last_value_before
Description: get last value before time_start with various parameters
Attention: if the standard parameters are not set,
the function will take standard extra parameters
########
Inputs
########
Variable: ids
Description: single or list of ids, which identify the timeseries
Type: int, list, dict
########
Variable: time_start
Description: Start time of timeseries
Type: str
Example: time_start="2015-12-20 00:00:00"
Standard: time_start=(dt.now()-timedelta(hours=1)).strftime(self.time_format)
########
Variable: time_end
Description: End time of timeseries
Type: str
Example: time_end="2015-12-21 00:00:00"
Standard: time_end=dt.now().strftime(self.time_format)
########
Variable: ts_database
Description: The database name to use
Type: str
########
Variable: ts_table
Description: The table name to use
Type: str
########
Variable: ts_time_column
Description: The column name of timestamp
Type: str
########
Variable: ts_itemid_column
Description: The column name of ids, which identify the timeseries
Type: str
########
Variable: ts_value_column
Description: The column name of values of timeseries
Type: str
########
Variable: ts_quality_id_column
Description: The column name of quality_id
Type: str
########
Variable: ts_quality_id
Description: The standard value of quality_id
Type: int,str
########
Outputs
########
Variable: data
Description: fetched timeseries data of database query
Type: list
"""
"if some parameters are not handed over, take standard arguments"
(time_start, time_end, ts_database, ts_table, ts_time_column, ts_value_column, ts_itemid_column, _,
ts_quality_id_column, ts_quality_id,
time_format) = self._get_ts_standard(time_start=time_start,
time_end=time_end,
ts_database=ts_database,
ts_table=ts_table,
ts_time_column=ts_time_column,
ts_value_column=ts_value_column,
ts_itemid_column=ts_itemid_column,
ts_chosen_key="",
ts_quality_id_column=ts_quality_id_column,
ts_quality_id=ts_quality_id,
time_format=None)
(ids_keys, ids_values, ids_dict) = self._handle_ids(ids)
sort_by = self._handle_sort_by(sort_by)
# if sort_by == None:
# sort_by = self.ts_time_column
if additional_sql is not None:
additional_sql = "{} {}".format(additional_sql, 'LIMIT 1')
else:
additional_sql = " LIMIT 1"
if type(ids_keys) == int:
execution_string = self.__get_ts_string(ids=ids_keys,
time_start=None,
time_end=time_start,
ts_database=ts_database,
ts_table=ts_table,
ts_time_column=ts_time_column,
ts_itemid_column=ts_itemid_column,
ts_value_column=ts_value_column,
ts_quality_id_column=ts_quality_id_column,
ts_quality_id=ts_quality_id,
not_null=True,
sort_by=ts_time_column,
sort_order="DESC",
additional_sql=additional_sql)
data = self.execute_fetchall(execution_string, ts_time_column)
elif type(ids_keys) == list:
data = pd.DataFrame(columns=[ts_itemid_column,
ts_value_column])
for entry in ids_keys:
execution_string = self.__get_ts_string(ids=entry,
time_start=None,
time_end=time_start,
ts_database=ts_database,
ts_table=ts_table,
ts_time_column=ts_time_column,
ts_itemid_column=ts_itemid_column,
ts_value_column=ts_value_column,
ts_quality_id_column=ts_quality_id_column,
ts_quality_id=ts_quality_id,
not_null=True,
sort_by=ts_time_column,
sort_order="DESC",
additional_sql=additional_sql)
temp_data = self.execute_fetchall(execution_string, ts_time_column)
data = pd.concat([data, temp_data])
if sort_by:
if sort_order == None:
sort_order = "ASC"
if sort_order == "ASC":
ascending = True
elif sort_order == "DESC":
ascending = False
data.sort_index(ascending=ascending, inplace=True)
return data
"""custom made odbc commands"""
def get_timeseries(self, ids=None,
time_start=None,
time_end=None,
ts_database=None,
ts_table=None,
ts_time_column=None,
ts_itemid_column=None,
ts_value_column=None,
ts_quality_id_column=None,
ts_quality_id=None,
not_null=True,
sort_by=None,
sort_order=None,
additional_sql=None):
"""
function: get_timeseries
Description: get timeseries with various parameters
Attention: if the standard parameters are not set,
the function will take standard extra parameters
########
Inputs
########
Variable: ids
Description: single or list of ids, which identify the timeseries
Type: int, list
########
Variable: time_start
Description: Start time of timeseries
Type: str
Example: time_start="2015-12-20 00:00:00"
Standard: time_start=(dt.now()-timedelta(hours=1)).strftime(self.time_format)
########
Variable: time_end
Description: End time of timeseries
Type: str
Example: time_end="2015-12-21 00:00:00"
Standard: time_end=dt.now().strftime(self.time_format)
########
Variable: ts_database
Description: The database name to use
Type: str
########
Variable: ts_table
Description: The table name to use
Type: str
########
Variable: ts_time_column
Description: The column name of timestamp
Type: str
########
Variable: ts_itemid_column
Description: The column name of ids, which identify the timeseries
Type: str
########
Variable: ts_value_column
Description: The column name of values of timeseries
Type: str
########
Variable: ts_quality_id_column
Description: The column name of quality_id
Type: str
########
Variable: ts_quality_id
Description: The standard value of quality_id
Type: int,str
########
Outputs
########
Variable: data
Description: fetched timeseries data of odbc query
Type: list
"""
"if some parameters are not handed over, take standard arguments"
(time_start, time_end, ts_database, ts_table,
ts_time_column, ts_value_column, ts_itemid_column,
_, ts_quality_id_column, ts_quality_id,
time_format) = self._get_ts_standard(
time_start=time_start,
time_end=time_end,
ts_database=ts_database,
ts_table=ts_table,
ts_time_column=ts_time_column,
ts_value_column=ts_value_column,
ts_itemid_column=ts_itemid_column,
ts_chosen_key="",
ts_quality_id_column=ts_quality_id_column,
ts_quality_id=ts_quality_id,
time_format=None)
(ids_keys, ids_values, ids_dict) = self._handle_ids(ids)
sort_by = self._handle_sort_by(sort_by,
ts_time_column,
ts_itemid_column,
ts_value_column)
"""get the string for SQL"""
execution_string = self.__get_ts_string(ids=ids_keys,
time_start=time_start,
time_end=time_end,
ts_database=ts_database,
ts_table=ts_table,
ts_time_column=ts_time_column,
ts_itemid_column=ts_itemid_column,
ts_value_column=ts_value_column,
ts_quality_id_column=ts_quality_id_column,
ts_quality_id=ts_quality_id,
not_null=not_null,
sort_by=sort_by,
sort_order=sort_order,
additional_sql=additional_sql)
data = self.execute_fetchall(execution_string, ts_time_column)
return data
def get_timeseries_df(self, ids=None,
time_start=None,
time_end=None,
ts_database=None,
ts_table=None,
ts_time_column=None,
ts_itemid_column=None,
ts_value_column=None,
ts_quality_id_column=None,
ts_quality_id=None,
not_null=True,
sort_by=None,
sort_order="ASC",
additional_sql=None,
use_query=False,
get_last_value_before=True,
replace_first_index=True):
"""
:param ids: dict, list, string, int with different ids
:param time_start: start time of sql query
:param time_end: end time of sql query
:param ts_database: database to use
:param ts_table: table to use
:param ts_time_column: time column of timeseries table
:param ts_itemid_column: itemid column of timeseries table
:param ts_value_column: value column of timeseries table
:param ts_quality_id_column: qualityid column of timeseries table
:param ts_quality_id: chosen qualityid
:param not_null: if chosen column != null
:param sort_by: column which will be sorted
:param sort_order: sort order ("ASC" or "DESC")
:param additional_sql: additional sql parameters
:param use_query: if query function of
:param get_last_value_before: if get_last_value_before function will be used
:param replace_first_index: if first index will be set to time_start with elements of get_last_value_before
:return: data
:rtype: pandas.DataFrame
"""
"""get standard parameters"""
(time_start, time_end, ts_database, ts_table,
ts_time_column, ts_value_column, ts_itemid_column,
_, ts_quality_id_column, ts_quality_id,
time_format) = self._get_ts_standard(
time_start=time_start,
time_end=time_end,
ts_database=ts_database,
ts_table=ts_table,
ts_time_column=ts_time_column,
ts_value_column=ts_value_column,
ts_itemid_column=ts_itemid_column,
ts_chosen_key="",
ts_quality_id_column=ts_quality_id_column,
ts_quality_id=ts_quality_id,
time_format=None)
(ids_keys, ids_values, ids_dict) = self._handle_ids(ids)
sort_by = self._handle_sort_by(sort_by,
ts_time_column,
ts_itemid_column,
ts_value_column)
if use_query:
raw_data = self.query(ids=ids_keys,
time_start=time_start,
time_end=time_end)
else:
raw_data = self.get_timeseries(ids=ids_keys,
time_start=time_start,
time_end=time_end,
sort_by="ts_time_column",
sort_order=sort_order)
data = self._ts_to_df_matrix(raw_data, ids=ids_dict)
if sort_order == "ASC":
data.sort_index(ascending=True, inplace=True)
data.fillna(method="ffill", inplace=True)
elif sort_order == "DESC":
data.sort_index(ascending=False, inplace=True)
data.fillna(method="bfill", inplace=True)
if get_last_value_before:
if use_query:
time_start_dt = dt.strptime(time_start, self.time_format)
year = time_start_dt.year
month = time_start_dt.month
t_table = "measurement{}{:02d}".format(year, month)
data2 = self.get_last_value_before(ids=ids_keys,
time_start=time_start,
ts_table=t_table)
if sort_by:
if sort_order == None:
sort_order = "ASC"
if sort_order == "ASC":
ascending = True
elif sort_order == "DESC":
ascending = False
data2.sort_index(ascending=ascending, inplace=True)
else:
data2 = self.get_last_value_before(ids=ids_keys,
time_start=time_start)
if sort_by:
if sort_order == None:
sort_order = "ASC"
if sort_order == "ASC":
ascending = True
elif sort_order == "DESC":
ascending = False
data2.sort_index(ascending=ascending, inplace=True)
if replace_first_index:
data2 = self.__df_set_ts_index(data2, time_start)
data2 = self._ts_to_df_matrix(data2, ids=ids)
data2.index = pd.to_datetime(data2.index)
data2.columns = ids_values
if sort_order == "ASC":
data = pd.concat([data2, data])
data.fillna(method="ffill", inplace=True)
elif sort_order == "DESC":
data = pd.concat([data, data2])
data.fillna(method="bfill", inplace=True)
# data.sort_index()
return data
def query(self, ids, time_start, time_end=None):
"""
:param ids: dict, list, string, int with different ids, which identify the timeseries
:param time_start: Start time of timeseries
:param time_end: End time of timeseries
:keyword engine: "pandas" or "odbc"
:return data: pandas.DataFrame with queried data
:rtype: pandas.DataFrame
"""
"""
function: query
Description: query for E.ON ERC main building with various parameters
Attention: the function will take standard extra parameters,
if not set, the function doesn't work
########
Inputs
########
Variable: ids
Description: single or list of ids, which identify the timeseries
Type: int, list
########
Variable: time_start
Description: Start time of timeseries
Type: str
Example: time_start="2015-12-20 00:00:00"
########
Variable: time_end
Description: End time of timeseries
Type: str
Example: time_end="2015-12-21 00:00:00"
########
Outputs
########
Variable: data
Description: fetched | |
<reponame>OverLordGoldDragon/dev_tg<gh_stars>1-10
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2022- <NAME>
#
# Distributed under the terms of the MIT License
# (see wavespin/__init__.py for details)
# -----------------------------------------------------------------------------
"""Convenience utilities."""
import numpy as np
import scipy.signal
import warnings
from scipy.fft import fft, ifft
from itertools import zip_longest, chain
from copy import deepcopy
def drop_batch_dim_jtfs(Scx, sample_idx=0):
"""Index into dim0 with `sample_idx` for every JTFS coefficient, and
drop that dimension.
Doesn't modify input:
- dict/list: new list/dict (with copied meta if applicable)
- array: new object but shared storage with original array (so original
variable reference points to unindexed array).
"""
fn = lambda x: x[sample_idx]
return _iterate_apply(Scx, fn)
def jtfs_to_numpy(Scx):
"""Convert PyTorch/TensorFlow tensors to numpy arrays, with meta copied,
and without affecting original data structures.
"""
B = ExtendedUnifiedBackend(Scx)
return _iterate_apply(Scx, B.numpy)
def _iterate_apply(Scx, fn):
def get_meta(s):
return {k: v for k, v in s.items() if not hasattr(v, 'ndim')}
if isinstance(Scx, dict):
out = {} # don't modify source dict
for pair in Scx:
if isinstance(Scx[pair], list):
out[pair] = []
for i, s in enumerate(Scx[pair]):
out[pair].append(get_meta(s))
out[pair][i]['coef'] = fn(s['coef'])
else:
out[pair] = fn(Scx[pair])
elif isinstance(Scx, list):
out = [] # don't modify source list
for s in Scx:
o = get_meta(s)
o['coef'] = fn(s['coef'])
out.append(o)
elif isinstance(Scx, tuple): # out_type=='array' && out_3D==True
out = (fn(Scx[0]), fn(Scx[1]))
elif hasattr(Scx, 'ndim'):
out = fn(Scx)
else:
raise ValueError(("unrecognized input type: {}; must be as returned by "
"`jtfs(x)`.").format(type(Scx)))
return out
def normalize(X, mean_axis=(1, 2), std_axis=(1, 2), C=None, mu=1, C_mult=None):
"""Log-normalize + (optionally) standardize coefficients for learning
algorithm suitability.
Is a modification of Eq. 10 of https://arxiv.org/pdf/2007.10926.pdf
For exact match (minus temporal global averaging), set
`mean_axis=std_axis=(0, 2)`.
Parameters
----------
X : tensor
Nonnegative tensor with dimensions `(samples, features, spatial)`.
If there's more than one `features` or `spatial` dimensions, flatten
before passing.
(Obtain tensor via e.g. `pack_coeffs_jtfs(Scx)`, or `out_type='array'`.)
std_axis : tuple[int] / int / None
If not None, will unit-variance after `rscaling` along specified axes.
mean_axis : tuple[int] / int / None
If not None, will zero-mean before `rscaling` along specified axes.
C : float / None
`log(1 + X * C / median)`.
Greater will bring more disparate values closer. Too great will equalize
too much, too low will have minimal effect.
Defaults to `5 / sparse_mean(abs(X / mu))`, which should yield moderate
contraction for a variety of signals. This was computed on a mixture
of random processes, with outliers, and may not generalize to all signals.
- `sparse_mean` takes mean over non-negligible points, aiding
consistency between representations. A scalogram with an extra
octave, for example, may capture nothing in the new octave,
while a simple mean would lower the output, attenuating existing
values.
mu : float / None
In case precomputed; See "Online computation".
`mu=None` will compute `mu` for per-channel normalization, while
`mu=1` essentially disables `mu` and preserves channels' relative scaling;
see "Relative scaling".
C_mult : float / None
Multiplies `C`. Useful if the default `C` compute scheme is appropriate
but needs adjusting. Defaults to `5` if `C` is None, else to `1`.
Returns
-------
Xnorm : tensor
Normalized `X`.
Relative scaling
----------------
Scaling `features` independently changes the relative norms bewteen them.
- If a signal rarely has high frequencies and low are dominant, for example,
then post-normalization this nuance is lost and highs and lows are brought
to a common norm - which may be undesired.
- SNR is lowered, as low signal contents that are dominated by noise
or float inaccuracies are amplified.
- Convolutions over `features` dims are invalidated (as it's akin to
standardizing individual time steps in 1D convolution); e.g. if
normalizing on per-`n1` basis, then we can no longer do 2D convs
over the joint `(n1, time)` pairs.
- To keep convs valid, all spatial dims that are convolved over must be
standardized by the same factor - i.e. same `mean` and `std`. `rscaling`
also accounts for rescaling due to log.
Regardless, this "channel normalization" has been used with success in
variuous settings; above are but points worth noting.
To preserve relative scaling, set `mu=1`.
Online computation
------------------
Any computation with `axis` that includes `0` requires simultaneous access
to all samples. This poses a problem in two settings:
1. Insufficient RAM. The solution is to write an *equivalent* computation
that aggregates statistics one sample at a time. E.g. for `mu`:
Xsum = []
for x in dataset:
Xsum.append(B.sum(x, axis=-1, keepdims=True))
mu = B.median(B.vstack(Xsum), axis=0, keepdims=True)
2. Streaming / new samples. In this case we must reuse parameters computed
over e.g. entire train set.
Computations over all axes *except* `0` are done on per-sample basis, which
means not having to rely on other samples - but also an inability to do so
(i.e. to precompute and reuse params).
"""
# validate args & set defaults ###########################################
if X.ndim != 3:
raise ValueError("input must be 3D, `(samples, features, spatial)` - "
"got %s" % str(X.shape))
B = ExtendedUnifiedBackend(X)
if B.backend_name == 'tensorflow' and mu is None:
raise ValueError("mu=None with TensorFlow backend isn't supported, as "
"TF's `median` doesn't support axis args")
# check input values
if B.min(X) < 0:
warnings.warn("`X` must be non-negative; will take modulus.")
X = B.abs(X)
# convert axes to positive
axes = [mean_axis, std_axis]
for i, ax in enumerate(axes):
if ax is None:
continue
ax = ax if isinstance(ax, (list, tuple)) else [ax]
ax = list(ax)
for j, a in enumerate(ax):
if a < 0:
ax[j] = X.ndim + a
axes[i] = tuple(ax)
mean_axis, std_axis = axes
# check input dims
dim_ones = tuple(d for d in range(X.ndim) if X.shape[d] == 1)
if dim_ones != ():
def check_dims(g, name):
g = g if isinstance(g, (tuple, list)) else (g,)
if all(dim in dim_ones for dim in g):
raise ValueError("input dims cannot be `1` along same dims as "
"`{}` (gives NaNs); got X.shape == {}, "
"{} = {}".format(name, X.shape, name, mean_axis))
check_dims(mean_axis, 'mean_axis')
check_dims(std_axis, 'std_axis')
# check mu
if mu is None and 0 in dim_ones and 2 in dim_ones:
raise ValueError("input dims cannot be `1` along dims 0 and 2 "
"if `mu` is None (gives NaNs); "
"got X.shape == {}".format(X.shape))
# main transform #########################################################
if mu is None:
# spatial sum (integral)
Xsum = B.sum(X, axis=-1, keepdims=True)
# sample median
mu = B.median(Xsum, axis=0, keepdims=True)
def sparse_mean(x, div=100, iters=4):
"""Mean of non-negligible points"""
m = B.mean(x)
for _ in range(iters - 1):
m = B.mean(x[x > m / div])
return m
# rescale
Xnorm = X / mu
# contraction factor
if C_mult is None:
C_mult = 5 if C is None else 1
if C is None:
C = 1 / sparse_mean(B.abs(Xnorm), iters=4)
C *= C_mult
# log
Xnorm = B.log(1 + Xnorm * C)
# standardization ########################################################
if mean_axis is not None:
Xnorm -= B.mean(Xnorm, axis=mean_axis, keepdims=True)
if std_axis is not None:
Xnorm /= B.std(Xnorm, axis=std_axis, keepdims=True)
return Xnorm
def pack_coeffs_jtfs(Scx, meta, structure=1, sample_idx=None,
separate_lowpass=None, sampling_psi_fr=None, out_3D=None,
reverse_n1=False, debug=False, recursive=False):
"""Packs efficiently JTFS coefficients into one of valid 4D structures.
Parameters
----------
Scx : tensor/list/dict
JTFS output. Must have `out_type` 'dict:array' or 'dict:list',
and `average=True`.
meta : dict
JTFS meta.
structure : int / None
Structure to pack `Scx` into (see "Structures" below), integer 1 to 4.
Will pack into a structure even if not suitable for convolution (as
determined by JTFS parameters); see "Structures" if convs are relevant.
- If can pack into one structure, can pack into any other (1 to 5).
- 6 to 9 aren't implemented since they're what's already returned
as output.
- This method is only needed for 3D or 4D convolutions, for which
only structure=5 with `out_3D=True` and `aligned=True` is fully valid
(see below); 1D convolutions can be done on any JTFS with
`average=True`, and 2D on any `out_3D=True`.
sample_idx : int / None
Index of sample in batched input to pack. If None (default), will
pack all samples.
Returns 5D | |
"""Pre-commit tests."""
from textwrap import dedent
import pytest
from testfixtures import compare
from nitpick.constants import PRE_COMMIT_CONFIG_YAML, SETUP_CFG
from nitpick.plugins.pre_commit import PreCommitHook
from nitpick.violations import Fuss
from tests.helpers import NBSP, ProjectMock
def test_pre_commit_has_no_configuration(tmp_path):
"""No errors should be raised if pre-commit is not referenced in any style file.
Also the file should not be deleted unless explicitly asked.
"""
ProjectMock(tmp_path).style("").pre_commit("").api_check_then_fix()
def test_pre_commit_referenced_in_style(tmp_path):
"""Only check files if they have configured styles."""
ProjectMock(tmp_path).style(
"""
[".pre-commit-config.yaml"]
fail_fast = true
"""
).pre_commit("").api_check_then_fix(Fuss(False, PRE_COMMIT_CONFIG_YAML, 331, " doesn't have the 'repos' root key"))
def test_suggest_initial_contents(tmp_path):
"""Suggest initial contents for missing pre-commit config file."""
ProjectMock(tmp_path).named_style(
"isort",
'''
["setup.cfg".isort]
line_length = 120
skip = ".tox,build"
known_first_party = "tests"
# The configuration below is needed for compatibility with black.
# https://github.com/python/black#how-black-wraps-lines
# https://github.com/PyCQA/isort#multi-line-output-modes
multi_line_output = 3
include_trailing_comma = true
force_grid_wrap = 0
combine_as_imports = true
[[".pre-commit-config.yaml".repos]]
yaml = """
- repo: https://github.com/PyCQA/isort
rev: 5.8.0
hooks:
- id: isort
"""
''',
).named_style(
"black",
'''
["pyproject.toml".tool.black]
line-length = 120
[[".pre-commit-config.yaml".repos]]
yaml = """
- repo: https://github.com/psf/black
rev: 21.5b2
hooks:
- id: black
args: [--safe, --quiet]
- repo: https://github.com/asottile/blacken-docs
rev: v1.10.0
hooks:
- id: blacken-docs
additional_dependencies: [black==21.5b2]
"""
# TODO The toml library has issues loading arrays with multiline strings:
# https://github.com/uiri/toml/issues/123
# https://github.com/uiri/toml/issues/230
# If they are fixed one day, remove this 'yaml' key and use only a 'repos' list with a single element:
#[".pre-commit-config.yaml"]
#repos = ["""
#<YAML goes here>
#"""]
''',
).pyproject_toml(
"""
[tool.nitpick]
style = ["isort", "black"]
"""
).api_check_then_fix(
Fuss(
False,
PRE_COMMIT_CONFIG_YAML,
331,
" was not found. Create it with this content:",
"""
repos:
- repo: https://github.com/PyCQA/isort
rev: 5.8.0
hooks:
- id: isort
- repo: https://github.com/psf/black
rev: 21.5b2
hooks:
- id: black
args: [--safe, --quiet]
- repo: https://github.com/asottile/blacken-docs
rev: v1.10.0
hooks:
- id: blacken-docs
additional_dependencies: [black==21.5b2]
""",
),
partial_names=[PRE_COMMIT_CONFIG_YAML],
)
def test_no_yaml_key(tmp_path):
"""Test an invalid repo config."""
ProjectMock(tmp_path).style(
'''
[[".pre-commit-config.yaml".repos]]
missing_yaml_key = """
- repo: https://github.com/PyCQA/isort
rev: 5.8.0
hooks:
- id: isort
"""
'''
).api_check_then_fix(
Fuss(
False,
PRE_COMMIT_CONFIG_YAML,
331,
" was not found. Create it with this content:",
"""
repos: []
""",
)
)
def test_root_values_on_missing_file(tmp_path):
"""Test values on the root of the config file when it's missing."""
ProjectMock(tmp_path).style(
"""
[".pre-commit-config.yaml"]
bla_bla = "oh yeah"
fail_fast = true
whatever = "1"
"""
).api_check_then_fix(
Fuss(
False,
PRE_COMMIT_CONFIG_YAML,
331,
" was not found. Create it with this content:",
"""
bla_bla: oh yeah
fail_fast: true
whatever: '1'
""",
)
)
def test_root_values_on_existing_file(tmp_path):
"""Test values on the root of the config file when there is a file."""
ProjectMock(tmp_path).style(
"""
[".pre-commit-config.yaml"]
fail_fast = true
blabla = "what"
something = true
another_thing = "yep"
"""
).pre_commit(
"""
repos:
- hooks:
- id: whatever
something: false
another_thing: "nope"
"""
).api_check_then_fix(
Fuss(
False,
PRE_COMMIT_CONFIG_YAML,
338,
" has missing values:",
"""
blabla: what
fail_fast: true
""",
),
Fuss(
False,
PRE_COMMIT_CONFIG_YAML,
339,
" has different values. Use this:",
"""
another_thing: yep
something: true
""",
),
)
def test_missing_repos(tmp_path):
"""Test missing repos on file."""
ProjectMock(tmp_path).style(
"""
[".pre-commit-config.yaml"]
fail_fast = true
"""
).pre_commit(
"""
grepos:
- hooks:
- id: whatever
"""
).api_check_then_fix(
Fuss(False, PRE_COMMIT_CONFIG_YAML, 331, " doesn't have the 'repos' root key")
)
def test_missing_repo_key(tmp_path):
"""Test missing repo key on the style file."""
ProjectMock(tmp_path).style(
"""
[[".pre-commit-config.yaml".repos]]
grepo = "glocal"
"""
).pre_commit(
"""
repos:
- hooks:
- id: whatever
"""
).api_check_then_fix(
Fuss(False, PRE_COMMIT_CONFIG_YAML, 332, ": style file is missing 'repo' key in repo #0")
)
def test_repo_does_not_exist(tmp_path):
"""Test repo does not exist on the pre-commit file."""
ProjectMock(tmp_path).style(
"""
[[".pre-commit-config.yaml".repos]]
repo = "local"
"""
).pre_commit(
"""
repos:
- hooks:
- id: whatever
"""
).api_check_then_fix(
Fuss(False, PRE_COMMIT_CONFIG_YAML, 333, ": repo 'local' does not exist under 'repos'")
)
def test_missing_hooks_in_repo(tmp_path):
"""Test missing hooks in repo."""
ProjectMock(tmp_path).style(
"""
[[".pre-commit-config.yaml".repos]]
repo = "whatever"
"""
).pre_commit(
"""
repos:
- repo: whatever
"""
).api_check_then_fix(
Fuss(False, PRE_COMMIT_CONFIG_YAML, 334, ": missing 'hooks' in repo 'whatever'")
)
def test_style_missing_hooks_in_repo(tmp_path):
"""Test style file is missing hooks in repo."""
ProjectMock(tmp_path).style(
"""
[[".pre-commit-config.yaml".repos]]
repo = "another"
"""
).pre_commit(
"""
repos:
- repo: another
hooks:
- id: isort
"""
).api_check_then_fix(
Fuss(False, PRE_COMMIT_CONFIG_YAML, 335, ": style file is missing 'hooks' in repo 'another'")
)
def test_style_missing_id_in_hook(tmp_path):
"""Test style file is missing id in hook."""
ProjectMock(tmp_path).style(
f'''
[[".pre-commit-config.yaml".repos]]
repo = "another"
hooks = """
- name: isort
entry: isort -sp {SETUP_CFG}
"""
'''
).pre_commit(
"""
repos:
- repo: another
hooks:
- id: isort
"""
).api_check_then_fix(
Fuss(
False,
PRE_COMMIT_CONFIG_YAML,
336,
": style file is missing 'id' in hook:",
f"""
{NBSP*4}name: isort
{NBSP*4}entry: isort -sp {SETUP_CFG}
""",
)
)
def test_missing_hook_with_id(tmp_path):
"""Test missing hook with specific id."""
ProjectMock(tmp_path).style(
'''
[[".pre-commit-config.yaml".repos]]
repo = "other"
hooks = """
- id: black
name: black
entry: black
"""
'''
).pre_commit(
"""
repos:
- repo: other
hooks:
- id: isort
"""
).api_check_then_fix(
Fuss(
False,
PRE_COMMIT_CONFIG_YAML,
337,
": missing hook with id 'black':",
f"""
{NBSP * 2}- id: black
{NBSP * 2} name: black
{NBSP * 2} entry: black
""",
)
)
def test_get_all_hooks_from():
"""Test if the get_all_hooks_from() method will split the YAML block in hooks and copy the repo info for each."""
data = """
- repo: https://github.com/user/repo
rev: v0.4.5
hooks:
- id: first
additional_dependencies: [package==1.0.0]
- id: second
args: [1, 2, 3]
- id: third
- id: fourth
args: [some, here]
additional_dependencies: [another>=2.0.3]
"""
rv = PreCommitHook.get_all_hooks_from(dedent(data))
def assert_hook_yaml(key, yaml_string):
expected = rv["https://github.com/user/repo_" + key].yaml.reformatted
actual = yaml_string
compare(dedent(actual).strip(), dedent(expected).strip())
assert_hook_yaml(
"first",
"""
- repo: https://github.com/user/repo
rev: v0.4.5
hooks:
- id: first
additional_dependencies: [package==1.0.0]
""",
)
assert_hook_yaml(
"second",
"""
- repo: https://github.com/user/repo
rev: v0.4.5
hooks:
- id: second
args: [1, 2, 3]
""",
)
assert_hook_yaml(
"third",
"""
- repo: https://github.com/user/repo
rev: v0.4.5
hooks:
- id: third
""",
)
assert_hook_yaml(
"fourth",
"""
- repo: https://github.com/user/repo
rev: v0.4.5
hooks:
- id: fourth
args: [some, here]
additional_dependencies: [another>=2.0.3]
""",
)
def test_missing_different_values(tmp_path):
"""Test missing and different values on the hooks."""
ProjectMock(tmp_path).named_style(
"root",
'''
[[".pre-commit-config.yaml".repos]]
yaml = """
- repo: https://github.com/user/repo
rev: 1.2.3
hooks:
- id: my-hook
args: [--expected, arguments]
"""
''',
).named_style(
"mypy",
'''
# https://mypy.readthedocs.io/en/latest/config_file.html
["setup.cfg".mypy]
ignore_missing_imports = true
# Do not follow imports (except for ones found in typeshed)
follow_imports = "skip"
# Treat Optional per PEP 484
strict_optional = true
# Ensure all execution paths are returning
warn_no_return = true
# Lint-style cleanliness for typing
warn_redundant_casts = true
warn_unused_ignores = true
[[".pre-commit-config.yaml".repos]]
yaml = """
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v0.812
hooks:
- id: mypy
"""
''',
).named_style(
"pre-commit/python",
'''
[[".pre-commit-config.yaml".repos]]
yaml = """
- repo: https://github.com/pre-commit/pygrep-hooks
rev: v1.8.0
hooks:
- id: python-check-blanket-noqa
- id: python-check-mock-methods
- id: python-no-eval
- id: python-no-log-warn
- id: rst-backticks
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.0.1
hooks:
- id: debug-statements
- repo: https://github.com/asottile/pyupgrade
hooks:
- id: pyupgrade
"""
''',
).named_style(
"pre-commit/bash",
'''
[[".pre-commit-config.yaml".repos]]
yaml = """
- repo: https://github.com/openstack/bashate
rev: 2.0.0
hooks:
- id: bashate
"""
''',
).pyproject_toml(
"""
[tool.nitpick]
style = ["root", "mypy", "pre-commit/python", "pre-commit/bash"]
"""
).pre_commit(
"""
repos:
- repo: https://github.com/pre-commit/pygrep-hooks
rev: v1.1.0
hooks:
- id: python-check-blanket-noqa
- id: missing-hook-in-this-position
- id: python-no-eval
- id: python-no-log-warn
- id: rst-backticks
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.0.1
hooks:
- id: debug-statements
- repo: https://github.com/asottile/pyupgrade
rev: v2.16.0
hooks:
- id: pyupgrade
- repo: https://github.com/openstack/bashate
rev: 0.5.0
hooks:
- id: extra-hook-before-should-be-ignored
- id: bashate
args: [extra, arguments, should, --not, --throw, errors]
- id: extra-hook-after-should-be-ignored
- repo: https://github.com/user/repo
rev: 1.2.3
hooks:
- id: my-hook
args: [--different, args, --should, throw, errors]
"""
).api_check_then_fix(
Fuss(
False,
PRE_COMMIT_CONFIG_YAML,
332,
": hook 'mypy' not found. Use this:",
f"""
{NBSP * 2}- repo: https://github.com/pre-commit/mirrors-mypy
rev: v0.812
hooks:
- id: mypy
""",
),
Fuss(
False,
PRE_COMMIT_CONFIG_YAML,
332,
": hook 'python-check-mock-methods' not found. Use this:",
f"""
{NBSP * 2}- repo: https://github.com/pre-commit/pygrep-hooks
rev: v1.8.0
hooks:
- id: python-check-mock-methods
""",
),
Fuss(
False,
PRE_COMMIT_CONFIG_YAML,
339,
": hook 'bashate' (rev: 0.5.0) has different values. Use this:",
"rev: 2.0.0",
),
Fuss(
False,
PRE_COMMIT_CONFIG_YAML,
339,
": hook 'python-check-blanket-noqa' (rev: v1.1.0) has different values. Use this:",
"rev: v1.8.0",
),
Fuss(
False,
PRE_COMMIT_CONFIG_YAML,
339,
": hook 'python-no-eval' (rev: v1.1.0) has different values. Use this:",
"rev: v1.8.0",
),
Fuss(
False,
PRE_COMMIT_CONFIG_YAML,
339,
": hook 'python-no-log-warn' (rev: v1.1.0) has different values. Use this:",
"rev: v1.8.0",
),
Fuss(
False,
PRE_COMMIT_CONFIG_YAML,
339,
": hook 'my-hook' (rev: | |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from argparse import Namespace
import datetime, openpyxl as xl, os
import code
import operator, collections
import re
import unicodecsv as csv
import math
# Django imports
from argparse import Namespace
from django.core.management.base import BaseCommand, CommandError
from django.db import models , connection
from django.utils import timezone
# Local imports
import backend.models as back
import contacts.models as cont
import utils
import report_utils
from utils.xl import xl_add_header_row , xl_style_current_row , make_column , bold_font
class Command(BaseCommand):
help = 'Parse and import SMS data bank'
def add_arguments(self,parser):
# code.interact(local=locals())
subparsers = parser.add_subparsers(help='make reports')
# The cmd argument is required for django.core.management.base.CommandParser
print_parser = subparsers.add_parser('print',cmd=parser.cmd,help='report send time statistics')
print_parser.add_argument('-t','--times',action='store_true',default=False,help='print send times')
print_parser.add_argument('-f','--facilities',action='store_true',default=False,help='print registered totals per facility')
print_parser.add_argument('-c','--validation-codes',action='store_true',default=False,help='print validation stats')
print_parser.add_argument('-m','--messages',action='store_true',default=False,help='print message statistics')
print_parser.add_argument('-a','--all',action='store_true',default=False,help='all report options')
print_parser.add_argument('-o','--hours',action='store_true',default=False,help='print hist of message hours')
print_parser.add_argument('-i','--hiv',action='store_true',default=False,help='print hiv messaging status')
print_parser.add_argument('-l','--language',action='store_true',default=False,help='print language histogram')
print_parser.add_argument('-s','--status',action='store_true',default=False,help='print status histogram')
print_parser.add_argument('-e','--enrollment',action='store_true',default=False,help='print enrollment by site')
print_parser.add_argument('-d','--delivery',action='store_true',default=False,help='print delivery statistics')
print_parser.add_argument('-x', '--success-times', action='store_true', default=False, help='print success times report')
print_parser.add_argument('-u', '--message-status', default=None, const='all',
choices=('day','week','cur_week','month','year','all'),nargs='?', help='print message status')
print_parser.add_argument('--delivery-source',action='store_true',default=False,help='print delivery source statistics')
print_parser.add_argument('--topics',action='store_true',default=False,help='incoming message topics')
print_parser.add_argument('--msg-counts',action='store_true',default=False,help='print counts by auto type')
print_parser.add_argument('--sim-count', action='store_true', default=False, help='print information on sim count')
print_parser.add_argument('--weeks',default=5,type=int,help='message history weeks (default 5)')
print_parser.set_defaults(action='print_stats')
xlsx_parser = subparsers.add_parser('xlsx',cmd=parser.cmd,help='create xlsx reports')
xlsx_parser.add_argument('-t','--visit',action='store_true',default=False,help='create visit report')
xlsx_parser.add_argument('-d','--detail',action='store_true',default=False,help='create detail report')
xlsx_parser.add_argument('-a','--all',action='store_true',default=False,help='create all reports')
xlsx_parser.add_argument('-i','--interaction',action='store_true',default=False,help='create participant interaction report')
xlsx_parser.add_argument('-m','--messages',action='store_true',default=False,help='create system message dump')
xlsx_parser.add_argument('-n','--anonymous',action='store_true',default=False,help='create anonymous message dump')
xlsx_parser.add_argument('-w','--weekly',action='store_true',default=False,help='create weakly stats report')
xlsx_parser.add_argument('-c','--conversations',action='store_true',default=False,help='create conversations report')
xlsx_parser.add_argument('-s','--miss-streak',action='store_true',default=False,help='create miss streak report')
xlsx_parser.add_argument('--dir',default='ignore',help='directory to save report in')
xlsx_parser.add_argument('args',nargs='*',help='extra arguments in key:value pairs')
xlsx_parser.set_defaults(action='make_xlsx')
csv_parser = subparsers.add_parser('csv',cmd=parser.cmd,help='create csv reports')
csv_parser.add_argument('--dir',default='ignore',help='directory to save csv in')
csv_parser.add_argument('name',help='csv report type',
choices=(
'hiv_messaging','enrollment','messages','edd','delivery', 'participant_week',
'sae','visits','msg_dump','hiv_statuschange','participant_dump','connection_info',
'sms_status','languages',
)
)
csv_parser.set_defaults(action='make_csv_name')
def handle(self,*args,**options):
start_time = datetime.datetime.now()
self.stdout.write( 'Reports Action: {}'.format(options['action']) )
self.printed = False
self.options = options
self.options['args'] = args
getattr(self,options['action'])()
time_delta = (datetime.datetime.now() - start_time).total_seconds() / 60
print "Quries: {} Min: {}".format( len(connection.queries), time_delta )
########################################
# Commands
########################################
def print_stats(self):
if self.options['facilities'] or self.options['all']:
self.participants_by_facility()
if self.options['times'] or self.options['all']:
self.send_times()
if self.options['status'] or self.options['all']:
self.status_breakdown()
if self.options['validation_codes'] or self.options['all']:
self.validation_stats()
if self.options['messages'] or self.options['all']:
self.message_stats()
if self.options['hiv'] or self.options['all']:
self.hiv_messaging()
if self.options['hours']:
self.message_hours()
if self.options['language']:
self.print_languages()
if self.options['enrollment']:
self.print_enrollment()
if self.options['delivery']:
self.print_delivery_stats()
if self.options['delivery_source'] and not self.options['delivery']:
self.print_delivery_source()
if self.options['topics']:
self.print_report('msg_topics')
if self.options['msg_counts']:
self.print_report('msg_counts')
if self.options['message_status'] is not None:
self.print_message_status()
if self.options['success_times']:
self.print_success_times()
if self.options['sim_count']:
self.print_sim_counts()
def print_report(self,report):
report_modual = getattr(report_utils,report)
report_modual.print_report(self)
# SEC::XLSX Helper Functions
def make_xlsx(self):
kwargs = {'mode':'meta'}
if self.options['args']:
args = self.options['args']
kwargs.update({arg.split(':')[0]:arg.split(':')[1] for arg in args})
workbook_columns = {}
if self.options['visit'] or self.options['all']:
workbook_columns['visit'] = visit_columns
if self.options['detail'] or self.options['all']:
workbook_columns['detail'] = detail_columns
if self.options['interaction']:
workbook_columns['interaction'] = interaction_columns
interaction_columns.queryset = make_interaction_columns()
if self.options['messages']:
# workbook_columns['messages'] = system_message_columns
# system_message_columns.queryset = make_system_message_columns()
make_message_wb(**kwargs)
if self.options['weekly']:
make_weekly_wb()
if self.options['anonymous']:
make_anonymous_wb()
if self.options['conversations']:
make_conversations_wb()
if self.options['miss_streak']:
make_miss_streak_count_wb()
for name , columns in workbook_columns.items():
wb = xl.workbook.Workbook()
today = datetime.date.today()
file_name = today.strftime('mWaChX_{}_%Y-%m-%d.xlsx').format(name)
xlsx_path_out = os.path.join(self.options['dir'],file_name)
self.stdout.write( "Making xlsx file {}".format(xlsx_path_out) )
if hasattr(columns,'facility_sheet'):
make_facility_worksheet(columns,wb.active,'ahero')
make_facility_worksheet(columns,wb.create_sheet(),'bondo')
make_facility_worksheet(columns,wb.create_sheet(),'mathare')
make_facility_worksheet(columns,wb.create_sheet(),'siaya')
make_facility_worksheet(columns,wb.create_sheet(),'rachuonyo')
make_facility_worksheet(columns,wb.create_sheet(),'riruta')
else:
make_worksheet(columns,wb.active,columns.queryset)
wb.save(xlsx_path_out)
# SEC::Start CSV Functions
def make_csv_name(self):
file_path = getattr(self,'make_{}_csv'.format(self.options['name']))()
print "Done:" , file_path
########################################
# Start Print Functions
########################################
def send_times(self):
self.print_header("Participant Send Times")
c_all = cont.Contact.objects_no_link.all().order_by('send_day','send_time')
time_counts = c_all.filter(study_group='two-way').values('send_day','send_time') \
.annotate(count=models.Count('send_day'))
times, day , counts = {} ,0 , [0,0,0]
day_lookup = ['Mon','Tue','Wed','Thu','Fri','Sat','Sun']
time_map = {8:0,13:1,20:2}
for c in time_counts:
if c['send_day'] == day:
counts[time_map[c['send_time']]] = c['count']
else:
times[day] = counts
day = c['send_day']
counts = [0,0,0]
counts[time_map[c['send_time']]] = c['count']
times[day] = counts
totals = [0,0,0]
for i in range(7):
t = times.get(i,[0,0,0])
totals = [t1+t2 for t1,t2 in zip(totals,t)]
self.stdout.write( "{} {} {}".format(day_lookup[i],t,sum(t)) )
self.stdout.write( "Tot {} {}".format(totals,sum(totals)) )
def participants_by_facility(self):
self.print_header("Participant By Facility")
group_counts = cont.Contact.objects.values('facility','study_group') \
.annotate(count=models.Count('study_id',distinct=True))
# Piviot Group Counts
counts = collections.defaultdict(GroupRowCount)
for g in group_counts:
counts[g['facility']][g['study_group']] = g['count']
# Print Group Counts
self.stdout.write( "{:^12}{:^12}{:^12}{:^12}{:^12}".format("","Control","One-Way","Two-Way","Total") )
total_row = GroupRowCount()
for facility, row in counts.items():
self.stdout.write( "{:^12}{:^12}{:^12}{:^12}{:^12}".format(
facility.capitalize(), row['control'], row['one-way'], row['two-way'], row.total())
)
total_row += row
self.stdout.write( "{:^12}{:^12}{:^12}{:^12}{:^12}".format(
"Total", total_row['control'], total_row['one-way'], total_row['two-way'], total_row.total() )
)
def validation_stats(self):
self.print_header('Validation Stats')
c_all = cont.Contact.objects_no_link.all()
stats = collections.OrderedDict( ( ('< 1h',0) , ('< 1d',0) ,('> 1d',0) , ('None',0) ) )
for c in c_all:
seconds = c.validation_delta()
if seconds is None:
stats['None'] += 1
elif seconds <= 3600:
stats['< 1h'] += 1
elif seconds <= 86400:
stats['< 1d'] += 1
elif seconds > 86400:
stats['> 1d'] += 1
else:
stats['None'] += 1
counts = dict( c_all.values_list('is_validated').annotate(count=models.Count('is_validated')) )
total = sum(counts.values())
self.stdout.write( "Total: {} Valididated: {} ({:0.3f}) Not-Validated: {} ({:0.3f})\n".format(
total , counts[True] , counts[True] / float(total) , counts[False] , counts[False] / float(total)
) )
for key , count in stats.items():
self.stdout.write( "\t{}\t{} ({:0.3f})".format(key,count, count/float(total) ) )
def message_stats(self):
self.print_header('Message Statistics (system-participant-nurse)')
# Get messages grouped by facility, system and outgoing
m_all = cont.Message.objects.all()
group_counts = m_all.order_by().values(
'contact__facility','contact__study_group','is_system','is_outgoing'
).annotate(count=models.Count('contact__facility'))
# Piviot Group Counts based on facility
counts = collections.defaultdict(MessageRow)
for g in group_counts:
facility = g['contact__facility']
if facility is None:
continue
study_group = g['contact__study_group']
sender = 'system'
if not g['is_system']:
sender = 'nurse' if g['is_outgoing'] else 'participant'
counts[facility][study_group][sender] = g['count']
# Print Message Totals Table
self.stdout.write( "{:^10}{:^18}{:^18}{:^18}{:^18}".format("","Control","One-Way","Two-Way","Total") )
total_row = MessageRow()
for facility , row in counts.items():
total_row += row
row['two-way'].replies = m_all.filter(parent__isnull=False,contact__facility=facility).count()
self.stdout.write( '{:<10}{} {} ({})'.format(facility.capitalize(),row,row.total(),row.total().total() ) )
none_count = m_all.filter(contact__isnull=True).count()
total_count = total_row.total()
total_row['two-way'].replies = m_all.filter(parent__isnull=False).count()
self.stdout.write( '{:<10}{} {} ({})'.format('Total',total_row,total_count,sum(total_count) ) )
self.stdout.write( '{:<10}{:04d} ({})'.format('None',none_count,none_count+sum(total_count)) )
# Print last 5 weeks of messaging
self.stdout.write('')
self.print_messages(self.options['weeks'])
def print_messages(self,weeks=None):
# Get all two-way messages
m_all = cont.Message.objects.filter(contact__study_group='two-way')
# Get start date
study_start_date = timezone.make_aware(datetime.datetime(2015,11,23))
now = timezone.now()
weeks_start_date = timezone.make_aware(
datetime.datetime(now.year,now.month,now.day) - datetime.timedelta(days=now.weekday())
) # Last Sunday
start_date = study_start_date
if weeks is not None and weeks_start_date > study_start_date:
start_date = weeks_start_date - datetime.timedelta(days=weeks*7)
total_row = MessageRowItem()
while start_date < now:
end_date = start_date + datetime.timedelta(days=7)
m_range = m_all.filter(created__range=(start_date,end_date))
row = MessageRowItem()
row['system'] = m_range.filter(is_system=True).count()
row['participant'] = m_range.filter(is_system=False,is_outgoing=False).count()
row['nurse'] = m_range.filter(is_system=False,is_outgoing=True).count()
row.replies = m_range.filter(parent__isnull=False).count()
total_row += row
self.stdout.write( '{} {} ({})'.format(start_date.strftime('%Y-%m-%d'),row,sum(row) ) )
start_date = end_date
self.stdout.write( "Total {} ({})".format(total_row,sum(total_row)) )
def message_hours(self):
self.print_header('Histogram of message send hour (two-way only)')
messages , hour_counts = {} , {}
messages['p'] = cont.Message.objects.filter(is_outgoing=False,contact__study_group='two-way')
messages['s'] = cont.Message.objects.filter(is_outgoing=True,is_system=True,contact__study_group='two-way')
messages['n'] = cont.Message.objects.filter(is_outgoing=True,is_system=False,contact__study_group='two-way')
for k in messages.keys():
hours = [0 for _ in range(24)]
for m in messages[k]:
hours[m.created.hour] += 1
hour_counts[k] = hours
print " C S N"
for h in range(24):
print "{0:<5}{1:<5}{2:<5}{3:<5}".format((h+3)%24,hour_counts['p'][h],hour_counts['s'][h],hour_counts['n'][h])
print " {0:<5}{1:<5}{2:<5}".format(*map(sum,[hour_counts[k] for k in ('p','s','n')]))
def hiv_messaging(self):
self.print_header('HIV Messaging Preference (none-initiated-system)')
hiv_messaging_groups = cont.Contact.objects.order_by().values('facility','study_group','hiv_messaging') \
.annotate(count=models.Count('study_id',distinct=True))
# Piviot Group Counts
group_counts = collections.defaultdict(HivRowCount)
for g in hiv_messaging_groups:
group_counts[g['facility']][g['study_group']][g['hiv_messaging']] = g['count']
# Print Group Counts
self.stdout.write( "{:^12}{:^12}{:^12}{:^12}{:^12}".format("","Control","One-Way","Two-Way","Total") )
total_row = HivRowCount()
for facility, row in group_counts.items():
self.stdout.write( "{0:^12}{1[control]:^12}{1[one-way]:^12}{1[two-way]:^12}{2:^12}".format(
facility.capitalize(), row, row.total()
) )
total_row += row
self.stdout.write( "{0:^12}{1[control]:^12}{1[one-way]:^12}{1[two-way]:^12} {2:^12}".format(
"Total", total_row, total_row.total()
) )
def print_languages(self):
self.print_header('Language of Messages (participant,nurse)')
message_groups = cont.Message.objects.order_by().filter(contact__study_group='two-way',is_system=False)\
.prefetch_related('contact').values('languages','contact__language','is_outgoing')\
.exclude(languages='').annotate(count=models.Count('id',distinct=True))
# Piviot Group Counts
language_counts = collections.defaultdict(LanguageMessageRow)
for g in message_groups:
language_str = ','.join( sorted( s[0] if s!= 'sheng' else 'h' for s in g['languages'].split(';') ) )
# language_counts[g['languages']][g['contact__language']][g['is_outgoing']] += g['count']
language_counts[language_str][g['contact__language']][g['is_outgoing']] += g['count']
# Print Group Counts
self.stdout.write( "{:^12}{:^12}{:^12}{:^12}{:^12}".format("","English","Swahili","Luo","Total") )
total_row = LanguageMessageRow()
for language, row in language_counts.items():
self.stdout.write( "{0:^12}{1[english]:^12}{1[swahili]:^12}{1[luo]:^12}{2:^12}".format(
language, row, row.total()
) )
total_row += row
self.stdout.write( "{0:^12}{1[english]:^12}{1[swahili]:^12}{1[luo]:^12}{2:^12}".format(
"Total", total_row, total_row.total()
) )
def status_breakdown(self):
self.print_header('Participant Status (control,one-way,two-way)')
status_groups = cont.Contact.objects.order_by().values('facility','status','study_group')\
.annotate(count=models.Count('study_id',distinct=True))
# Piviot Group Counts
status_counts = collections.defaultdict(StatusRow)
for g in status_groups:
status_counts[g['facility']][g['status']][g['study_group']] = g['count']
# Print Group Counts
self.stdout.write( StatusRow.header() )
total_row = StatusRow()
for facility, row in status_counts.items():
self.stdout.write( row.row_str(facility) )
total_row += row
self.stdout.write( total_row.row_str("Total") )
def print_delivery_stats(self):
self.print_header('Participant Delivery Stats')
today = datetime.date.today()
c_all = cont.Contact.objects.all()
edd = c_all.filter(status='pregnant').order_by('due_date')
post = edd.filter(due_date__lt=today)
self.stdout.write( 'Found {:d} pregnant participants with {:d} post edd'.format(
edd.count(), post.count()
) )
future_edd = edd.order_by("-due_date")
self.stdout.write( 'Furthest from EDD')
for p in future_edd[:5]:
self.stdout.write( "\t{0.study_id} {0.due_date} {0.study_group} (weeks {1:.0f})".format(
p, p.delta_days() / 7
) )
self.stdout.write( '\n')
self.stdout.write( 'Furthest past EDD')
for p in edd[:5]:
self.stdout.write( "\t{0.study_id} {0.due_date} {0.study_group} (weeks {1:.0f})".format(
p, p.delta_days() / 7
) )
self.stdout.write( '\n')
# Calculate EDD to Delivery Date offset delta
dd = c_all.filter(delivery_date__isnull=False).order_by('delivery_date')
self.stdout.write( 'Found {:d} post-partum participants'.format(dd.count()) )
self.stdout.write( 'Furthest from delivery | |
we use paired sample test. There are 2 types of paired sample test, one is comparing sample means coming from 2 different groups, known as
Independent Paired Sample T-Test. On the other hand, when comparing 2 samples coming from same groups, we call it as Dependent Paired Sample T-Test.
T-test is only applicable for 2 different samples only.
Null & Alternate hypothesis:
- :math:`H_0`: Means between 2 samples are the same
- :math:`H_1`: Means between 2 samples are not the same
Assumptions in T-Test:
- Residuals (experimental error) are normally distributed (Shapiro Wilks Test)
- Homogeneity of variances (variances are equal between treatment groups) (Levene or Bartlett Test)
- Observations are sampled independently from each other
Parameters
----------
data : object
Dataframe to acquire the population mean and standard deviation for one-sample t-tests
var : string
Column name from dataframe for t-tests
y1 : array-like
One-dimensional array-like object (list, numpy array, pandas DataFrame or pandas Series) containing
the observed sample values.
y2 : array-like, optional
One-dimensional array-like object (list, numpy array, pandas DataFrame or pandas Series) containing
the observed sample values. Not necessary to include when performing one-sample t-tests.
group : array-like or None
The corresponding group vector denoting group sample membership. Will return :code:`None` if not passed.
var_equal : bool, optional
If True, the two samples are assumed to have equal variances and Student's t-test is performed.
Defaults to False, which performs Welch's t-test for unequal sample variances.
paired : bool, optional
If True, performs a paired t-test.
Raises
------
ValueError
If :code:`paired` is True and a second sample, :code:`y2` is not passed.
ValueError
If :code:`paired` is True and the number of sample observations in :code:`y1` and :code:`y2` are not equal.
Notes
-----
Welch's t-test is an adaption of Student's t test and is more performant when the
sample variances and size are unequal. The test still depends on the assumption of
the underlying population distributions being normally distributed.
Welch's t test is defined as:
.. math::
t = \frac{\bar{X_1} - \bar{X_2}}{\sqrt{\frac{s_{1}^{2}}{N_1} + \frac{s_{2}^{2}}{N_2}}}
where:
:math:`\bar{X}` is the sample mean, :math:`s^2` is the sample variance, :math:`n` is the sample size
If the :code:`var_equal` argument is True, Student's t-test is used, which assumes the two samples
have equal variance. The t statistic is computed as:
.. math::
t = \frac{\bar{X}_1 - \bar{X}_2}{s_p \sqrt{\frac{1}{n_1} + \frac{1}{n_2}}
where:
.. math::
s_p = \sqrt{\frac{(n_1 - 1)s^2_{X_1} + (n_2 - 1)s^2_{X_2}}{n_1 + n_2 - 2}
Examples
--------
Similar to other inference methods, there are generally two ways of performing a t-test. The first is to pass
a group vector with the :code:`group` parameter and the corresponding observation vector as below.
The data used in this example is a subset of the professor salary dataset found in Fox and
Weisberg (2011).
>>> professor_discipline = ['B', 'B', 'B', 'B', 'B',
... 'A', 'A', 'A', 'A', 'A']
>>> professor_salary = [139750, 173200, 79750, 11500, 141500,
... 103450, 124750, 137000, 89565, 102580]
>>> ttest = t_test(professor_salary, group=professor_discipline)
>>> print(ttest)
{'Sample 1 Mean': 111469.0,
'Sample 2 Mean': 109140.0,
'p-value': 0.9342936060799869,
't-statistic': 0.08695024086399619,
'test description': "Two-Sample Welch's t-test"}
The other approach is to pass each group sample vector similar to the below.
>>> sal_a = [139750, 173200, 79750, 11500, 141500]
>>> sal_b = [103450, 124750, 137000, 89565, 102580]
>>> ttest2 = t_test(sal_a, sal_b)
>>> print(ttest)
{'Sample 1 Mean': 109140.0,
'Sample 2 Mean': 111469.0,
'p-value': 0.9342936060799869,
't-statistic': -0.08695024086399619,
'test description': "Two-Sample Welch's t-test"}
Returns
-------
sample_statistics : dict
Dictionary contains the statistical analysis on t-tests
"""
self.pop_mean = data[var].mean()
self.pop_std = data[var].std()
self.group = group
self.paired = paired
if self.paired and y2 is None:
self.log.error("Second sample is missing for paired T-Tests ...")
if var_equal:
self.method = "Student's T-Test"
self.var_equal = var_equal
else:
self.method = "Welch's T-Test"
self.var_equal = False
if self.paired == False and y2 is None:
test_description = "One Sample T-Test"
sample_1 = data.loc[(data[group]==y1) & (data[var].notnull())]
self.sample_1_mean = sample_1[var].mean()
self.sample_1_std = sample_1[var].std()
self.ttest, self.p_value = ttest_1samp(sample_1[var].values, popmean=self.pop_mean)
if p_value > Config.ANALYSIS_CONFIG["TEST_ALPHA"]:
self.info = "Accept null hypothesis that the means are equal between sample and population ... \
Interpretation: The P-value obtained from 1-Sample T-Test analysis is not significant (P>0.05), \
and therefore, we conclude that there are no significant differences between samples."
else:
self.info = "Reject null hypothesis that the means are equal between sample and population ... \
Interpretation: The P-value obtained from 1-Sample T-Test analysis is significant (P<0.05), \
and therefore, we conclude that there are significant differences between samples."
self.sample_statistics = {
"Test Description" : "One-Sample T-Test",
"No. of Observations" : int(len(sample_1)),
"Population Mean" : self.pop_mean,
"Sample Mean" : self.sample_1_mean,
"P-Value" : self.p_value,
"T-Statistic" : self.ttest,
"Test Results" : self.info
}
elif self.paired == True and y2 is not None:
sample_1 = data.loc[(data[group]==y1) & (data[group].notnull())]
sample_1 = sample_1.loc[sample_1[var].notnull()]
self.sample_1_mean = sample_1[var].mean()
self.sample_1_std = sample_1[var].std()
sample_2 = data.loc[(data[group]==y2) & (data[group].notnull())]
sample_2 = sample_2.loc[sample_2[var].notnull()]
self.sample_2_mean = sample_2[var].mean()
self.sample_2_std = sample_2[var].std()
sample_1, sample_2 = sample_1.sample(n=sample_size), sample_2.sample(n=sample_size)
if len(sample_1) != len(sample_2):
self.logger.error("Paired samples must have the same number of observations ...")
self.ttest, self.p_value = ttest_ind(sample_1[var], sample_2[var])
if p_value > Config.ANALYSIS_CONFIG["TEST_ALPHA"]:
self.info = "Accept null hypothesis that the means are equal between sample and population ... \
Interpretation: The P-value obtained from 2-Sample T-Test analysis is not significant (P>0.05), \
and therefore, we conclude that there are no significant differences between samples."
else:
self.info = "Reject null hypothesis that the means are equal between samples ... \
Interpretation: The P-value obtained from 2-Sample T-Test analysis is significant (P<0.05), \
and therefore, we conclude that there are significant differences between samples."
self.sample_statistics = {
"Test Description" : "One-Sample T-Test",
"No. of Observations" : int(len(sample_1)),
"Sample 1 Mean" : self.sample_1_mean,
"Sample 2 Mean" : self.sample_2_mean,
"P-Value" : self.p_value,
"T-Statistic" : self.ttest,
"Test Results" : self.info
}
return
def anova(self, data, *args, y_var, type="one", var_equal=True):
"""Analysis of variance on one independent variable
One-Way ANOVA is used to compare 2 means from 2 independent (unrelated) groups using F-distribution.
With the null hypothesis for the test is that 2 means are equal. Therefore, a significant results means that the two means are unequal.
How ANOVA works:
- Check sample sizes: equal number of observations in each grou
- Calculate Mean Square (MS) for each group (Sum of Square of Group / DOG); DOF is degree of freedom for the samples
- Calculate the Mean Square Error (MSE) (Sum of Square of Error / DOF of residuals)
- Calculate the F-Value (MS of Group / Mean Square Error (MSE))
Null & Alternate hypothesis:
- :math:`H_0`: Groups means are equal (no variation in means of groups)
- :math:`H_1`: At least, one group mean is different from other groups
Assumptions in ANOVA:
- Residuals (experimental error) are normally distributed (Shapiro Wilks Test)
- Homogeneity of variances (variances are equal between treatment groups) (Levene or Bartlett Test)
- Observations are sampled independently from each other
Example
-------
Return
------
"""
sample_data = data[var]
if type == "one":
anova_model = ols('{y} ~ C({x})'.format(y=y_var, x=args), data=sample_data).fit()
self.anova_table = sm.stats.anova_lm(anova_model, typ=1)
if self.anova_table["PR(>F)"][0] > self.ANALYSIS_CONFIG["TEST_ALPHA"]:
self.info = "Accept null hypothesis that the means are equal between samples ... \
Interpretation: The P-value obtained from One-Way ANOVA is not significant (P>0.05), \
and therefore, we conclude that there are no significant differences between samples."
else:
self.info = "Reject null hypothesis that the means are equal between samples ... \
Interpretation: The P-value obtained from One-Way ANOVA is significant (P<0.05), \
and therefore, we conclude that there are significant differences between samples."
elif type == "two":
anova_model = ols('{y} ~ C({x})'.format(y=y_var, x=var), data=sample_data).fit()
self.anova_table = sm.stats.anova_lm(anova_model, typ=1)
if self.anova_table["PR(>F)"][0] > self.ANALYSIS_CONFIG["TEST_ALPHA"]:
self.info = "Accept null hypothesis that the means are equal between samples ... \
Interpretation: The P-value obtained from One-Way ANOVA is not significant (P>0.05), \
and | |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 31 13:41:32 2019
@author: s146959
"""
# ========================================================================== #
# ========================================================================== #
from __future__ import absolute_import, with_statement, absolute_import, \
division, print_function, unicode_literals
# ========================================================================== #
# ========================================================================== #
import numpy as _np
import os as _os
import matplotlib.pyplot as _plt
#import scipy.signal.correlate as xcorr
from scipy import signal as _sig
from FFT.fft_analysis import fftanal, ccf
from pybaseutils.plt_utils import savefig
from FFT.fft_analysis import butter_lowpass
from FFT.notch_filter import iirnotch
_plt.close("all")
# =============================== #
datafolder = _os.path.abspath(_os.path.join('..','..','..','..', 'Workshop'))
#datafolder = _os.path.abspath(_os.path.join('W://','HJ','Data'))
#datafolder = _os.path.join('/homea','weir','bin')
print(datafolder)
cmPerGHz = 1
Fs = 1e6
minFreq = 10e3
#minFreq = 15e3
#minFreq = 7.5e3
#minFreq = 0.1e3
fLPF=False
f0 = False
sepshots = False # separate the shots into different figures
backgroundsubtract = True # use the method with _np.real(cc-ccbg)
#backgroundsubtract = False # automatically set ccbg = 0.0, no background subtraction
keepphase = True # preserve the phase, convert to phasor notation then subtract amplitude of background
#keepphase = False # don't worry abotu the phase (assume small complex comp. of bg), just subtract off the whole background spectrum
#keepphase only matters if we use oldstylesubtract = False
oldstylesubtract = True # reproduce results from ECE paper and Creely, etc. (background subtract coherence within frequency limits)
#oldstylesubtract = False # more "Physics based" (background subtract noise spectra as measured pre-plasma start-up)
Bvid=0.5e6
Bif=200e6
#windowoverlap=0.5
# ============================================================== #
# ============================================================== #
fils = []
freqs = []
freq_ref = 60.0 # [GHz]
intb = [10e3, 100e3] # good
#intb = [40e3, 110e3] # good
bg = [400e3, 500e6] # background for coherence subtraction - automatically selected with fallback
#freq_ref = 60.0 # [GHz]
#fils = ['CECE.69769','CECE.69770','CECE.69771','CECE.69772','CECE.69773','CECE.69777']
#freqs = [13.075, 13.075, 13.085, 13.095, 13.105, 13.08]
#freqs = [4.0*freq+8.0 for freq in freqs]
##tb=[0.15,0.25] # background data
#tbg = 0.25 # background
#04-02-18 fix 1
#fils = ['CECE.69757','CECE.69758','CECE.69759','CECE.69760','CECE.69761','CECE.69762']
#freqs = [13.075, 13.095, 13.115, 13.055, 13.085, 13.045]
#freqs = [4.0*freq+8.0 for freq in freqs]
#tb=[0.32,0.40] # background data
#tbg = 0.27 # background
#07-02-18 fix 1
#fils = ['CECE.69884','CECE.69885','CECE.69886','CECE.69887','CECE.69888','CECE.69889','CECE.69890']
#freqs = [13.075, 13.062, 13.085, 13.050, 13.037, 13.025, 13.012]
#freqs = [4.0*freq+8.0 for freq in freqs]
#tb=[0.32,0.40] # background data
#tbg = 0.27 # background
#tb = [0.285, 0.315]
##intb = [10e3, 200e3]
#intb = [10e3, 275e3] # good
##intb = [10e3, 400e3]
##bg = [200e3, 1e6] # background for coherence subtraction - automatically selected with fallback
#bg = [275e3, 1e6] # background for coherence subtraction - automatically selected with fallback
##bg = [400e3, 1e6] # background for coherence subtraction - automatically selected with fallback
#Bvid = intb[1]
#txtstr = 'jan17_fix4'
#fils = ['CECE.65642','CECE.65643','CECE.65644','CECE.65645','CECE.65646','CECE.65647']
#freqs = [68.0, 68.3, 68.2, 68.1, 68.15, 68.05]
#
#fils.extend(['CECE.65648','CECE.65649','CECE.65650','CECE.65651','CECE.65652'])
#freqs.extend([67.95, 68.25, 68.125, 67.90, 67.80])
#tbg = 0.17
##tb = [0.192, 0.370] # a little bit of saturation at beginning
#tb = [0.20, 0.370] # a little bit of saturation at beginning
##tb = [0.20, 0.250] #
##tb = [0.25, 0.30] # we need to check the log for this shot to figure out timing
##tb = [0.32, 0.37] # we need to check the log for this shot to figure out timing
##tb=[0.3,0.39]
#
##intb = [15e3, 400e3] # original
##bg = [400e3, 1e6] # background for coherence subtraction - automatically selected with fallback
#intb = [375e3, 500e3] # much better! with Gavin's method of background subtraction
#bg = [0.0e3, 375.0e3] # background for coherence subtraction - automatically selected with fallback
## January 17th, 2017
#txtstr = 'jan17_fix1'
#freq_ref = 68.0 # [GHz]
#fils = ['CECE.65624','CECE.65625']
#freqs = [68.3, 68.3]
#tbg = 0.17 # background
#tb = [0.20, 0.37] #time bounds
##intb = [10e3, 275e3] # good
##bg = [275e3, 1e6] # background for coherence subtraction - automatically selected with fallback
#intb = [375e3, 500e3] # good
#bg = [10e3, 375e6] # background for coherence subtraction - automatically selected with fallback
##Bvid = intb[1]
#txtstr = 'jan17_fix2'
freq_ref = 68.0 # [GHz]
fils = ['CECE.65626','CECE.65627','CECE.65628','CECE.65629']
freqs = [68.0, 68.2, 68.1, 68.1]
fils.extend(['CECE.65630','CECE.65631','CECE.65632','CECE.65633','CECE.65634'])
freqs.extend([68.15, 68.05, 67.95, 67.90, 68.125])
tb=[0.37,0.395] # time bounds
tbg = 0.17 # background
#txtstr = 'jan17_fix3'
#freq_ref = 68.0 # [GHz]
#fils = ['CECE.65638','CECE.65639','CECE.65640','CECE.65641']
#freqs = [68.3, 68.3, 68.3, 68.3]
#txtstr = 'Jan 27, ECH: 107 kW'
#fils = ['CECE.65947','CECE.65948','CECE.65949','CECE.65950']
#freqs = [68.3, 68.3, 68.3, 68.3]
#tbg = 0.180 # background
#tb = [0.21, 0.315]
#
#txtstr = 'Jan. 27, ECH: 174 kW'
#fils = ['CECE.65953','CECE.65954','CECE.65955','CECE.65956','CECE.65957','CECE.65958']
#freqs = [68.3, 68.3, 68.3, 68.3, 68.3, 68.3]
#tbg = 0.170 # background
#tb = [0.23, 0.30]
#
#txtstr = 'Jan. 27, ECH: 236 kW'
#fils = ['CECE.65961','CECE.65962','CECE.65963','CECE.65964','CECE.65965']
#freqs = [68.3, 68.3, 68.3, 68.3, 68.3]
#tbg = 0.170 # background
#tb = [0.25, 0.34]
#
#txtstr = 'Jan. 27 ECH: 301 kW'
#fils = ['CECE.65968','CECE.65969','CECE.65971','CECE.65973']
#freqs = [68.3, 68.3, 68.3, 68.3]
#tbg = 0.170 # background
#tb = [0.25, 0.34]
#txtstr = 'Date unknown' #very quiescent data
#fils = ['CECE.99103','CECE.99104','CECE.99105','CECE.99106','CECE.99107',
# 'CECE.99108','CECE.99109','CECE.99110','CECE.99111','CECE.99112',
# 'CECE.99113','CECE.99114']
#freqs = [68.3, 68.2, 68.2, 68.1, 68.0, 68.15, 68,05, 67.95, 67.90, 68.125, 68.25, 68.80]
#tbg = 0.20 # background
#tb = [0.20, 0.40]
## october, 2015 - fix 1
#fils = ['CECE.60342','CECE.60343','CECE.60344']
#freqs = [67.400, 67.400, 67.400]
#tbg = 0.175
#tb = [0.24, 0.315]
#
### october, 2015 - fix 2
##fils = ['CECE.60349','CECE.60350','CECE.60351']
##freqs = [67.400, 67.400, 67.400]
##tbg = 0.175
##tb = [0.22, 0.350]
#
#intb = [15e3, 400e3] # original
#bg = [400e3, 1e6] # background for coherence subtraction - automatically selected with fallback
##intb = [375e3, 500e3] # much better! with Gavin's method of background subtraction
##bg = [0.0e3, 375.0e3] # background for coherence subtraction - automatically selected with fallback
# ============================================================== #
# ============================================================== #
if f0:
# Make a band rejection / Notch filter for MHD or electronic noise
f0 = 80.48e3 # [Hz], frequency to reject
Q = 20.0 # Quality factor of digital filter
w0 = f0/(0.5*Fs) # Normalized frequency
# Design the notch
#b, a = _sig.iirnotch(w0, Q) # scipy 0.19.1
b, a = iirnotch(w0, Q) # other
# Frequency response
w, h = _sig.freqz(b,a)
freq = w*Fs/(2.0*_np.pi) # Frequency axis
if fLPF:
fLPF = max((min((2*intb[1], 400e3)), intb[1])) # [Hz], frequency to reject
Bvid = fLPF
# lpf_order = 1 # Order of low pass filter
lpf_order = 3 # Order of low pass filter
# lpf_order = 6 # Order of low pass filter
w0 = f0/(0.5*Fs) # Normalized frequency
# Design the LPF
blpf, alpf = butter_lowpass(fLPF, 0.5*Fs, order=lpf_order)
# blpf, alpf = _sig.butter(lpf_order, fLPF/(0.5*Fs), btype='low', analog=False)
# Frequency response
w, h = _sig.freqz(blpf,alpf)
freq = w*Fs/(2.0*_np.pi) # Frequency axis
nfils = len(fils)
freqs = _np.asarray(freqs[0:nfils], dtype=_np.float64)
Tefluct = []
sigmaTe = []
CC2 = []
_Pxx, _Pyy, _Pxy = 0.0, 0.0, 0.0
ylims = [0,0]
for ii in range(nfils):
filn = _os.path.abspath(_os.path.join(datafolder, fils[ii]))
print(filn)
_, shotno = filn.split('.')
tt, tmpRF, tmpIF = \
_np.loadtxt(filn, dtype=_np.float64, unpack=True, usecols=(0,1,2))
tt = 1e-3*tt.copy()
_plt.figure("raw data")
_plt.plot(tt, tmpRF)
_plt.plot(tt, tmpIF)
if tbg>(tb[1]-tb[0]):
dt = tb[1]-tb[0]
# background signal part
bgRF = tmpRF[_np.where((tt>tbg-dt)*(tt<tbg))].copy()
bgIF = tmpIF[_np.where((tt>tbg-dt)*(tt<tbg))].copy()
ttbg = tt[_np.where((tt>tbg-dt)*(tt<tbg))].copy()
else:
# background signal part
bgRF = tmpRF[_np.where(tt<tbg)].copy()
bgIF = tmpIF[_np.where(tt<tbg)].copy()
ttbg = tt[_np.where(tt<tbg)].copy()
# end if
# signal part
tt_tb=[_np.where(tt>=tb[0])[0][0],_np.where(tt>=tb[1])[0][0]]
tt=tt[tt_tb[0]:tt_tb[1]+1].copy()
tmpRF=tmpRF[tt_tb[0]:tt_tb[1]+1].copy()
tmpIF=tmpIF[tt_tb[0]:tt_tb[1]+1].copy()
_plt.axvline(x=tt[0])
_plt.axvline(x=tt[-1])
tmpRF -= _np.mean(tmpRF)
tmpIF -= _np.mean(tmpIF)
bgRF -= _np.mean(bgRF)
bgIF -= _np.mean(bgIF)
if fLPF:
tmpRF = _sig.filtfilt(blpf, alpf, tmpRF.copy())
tmpIF = _sig.filtfilt(blpf, alpf, tmpIF.copy())
bgRF = _sig.filtfilt(blpf, alpf, bgRF.copy())
bgIF = _sig.filtfilt(blpf, alpf, bgIF.copy())
if f0:
# Apply a zero-phase digital filter to both signals
tmpRF = _sig.filtfilt(b, a, tmpRF.copy()) # padding with zeros
tmpIF = _sig.filtfilt(b, a, tmpIF.copy()) # padding with zeros
bgRF = _sig.filtfilt(b, a, bgRF.copy()) # padding with zeros
bgIF = _sig.filtfilt(b, a, bgIF.copy()) # padding with zeros
if tt[1]-tt[0]!=tt[2]-tt[1]:
tt2=_np.linspace(tt[0],tt[-1],len(tt),endpoint=True)
tmpRF=_np.interp(_np.asarray(tt2,dtype=float), tt, tmpRF.copy())
tmpIF=_np.interp(_np.asarray(tt2,dtype=float), tt, tmpIF.copy())
tt=tt2.copy()
tt2 = _np.linspace(ttbg[0], ttbg[-1], len(ttbg), endpoint=True)
bgRF=_np.interp(_np.asarray(tt2,dtype=float), ttbg, bgRF.copy())
bgIF=_np.interp(_np.asarray(tt2,dtype=float), ttbg, bgIF.copy())
ttbg=tt2.copy()
# end if
# fs=1/(((tt[len(tt)-1]-tt[0])/len(tt)))
# SignalTime=tt[tt_tb][1]-tt[tt_tb][0] # you've already truncated the signal into time
SignalTime=tt[-1]-tt[0]
sig_anal = fftanal(tt.copy(), tmpRF.copy(), tmpIF.copy(), windowfunction='hanning',
onesided=True, minFreq=minFreq, plotit=False)
sig_anal.fftpwelch()
bg_anal = fftanal(ttbg.copy(), bgRF.copy(), bgIF.copy(), windowfunction='hanning',
onesided=True, minFreq=minFreq, plotit=False)
# onesided=True, Navr=sig_anal.Navr, plotit=False)
bg_anal.fftpwelch()
nwins = sig_anal.nwins
fs = sig_anal.Fs
fr = fs/float(nwins)
Navr = sig_anal.Navr
freq = sig_anal.freq.copy()
Pxy = sig_anal.Pxy.copy()
Pxx = sig_anal.Pxx.copy()
Pyy = sig_anal.Pyy.copy()
siglags = sig_anal.fftinfo.lags.copy()
sigcorrcoef = sig_anal.fftinfo.corrcoef.copy()
MaxFreq=fs/2.
MinFreq=2.0*fs/nwins #2*fr
print('Maximal frequency: '+str(MaxFreq)+' Hz')
print('Minimal frequency: '+str(MinFreq)+' Hz')
# integration frequencies
f1=max((MinFreq, intb[0]))
f2=min((MaxFreq, intb[1]))
if1 = _np.where(freq>=f1)[0]
if2 = _np.where(freq>=f2)[0]
if1 = 0 if len(if1) == 0 else if1[0]
if2 = len(freq) if len(if2) == 0 else if2[0]
ifreqs = _np.asarray(range(if1, if2), dtype=int)
# =================== #
cc=Pxy/_np.sqrt(Pxx.real*Pyy.real)
if backgroundsubtract:
if oldstylesubtract:
# background subtraction indices / frequencies
ibg1 = _np.where(freq>=min((MaxFreq,bg[0])))[0]
ibg2 = _np.where(freq>=bg[1])[0]
ibg1 = 0 if len(ibg1) == 0 else ibg1[0]
ibg2 = -1 if len(ibg2) == 0 else ibg2[0]
# ibg1 = _np.where(10*_np.log10(_np.abs(Pxy))<-85)[0][0]
# ibg2 = _np.where(10*_np.log10(_np.abs(Pxy))<-85)[0][-1]
if freq[ibg1]<f2:
df = int( (min((bg[1],freq[-1]))-bg[0])//(freq[1]-freq[0]))
ibg1 = _np.where(freq>=min((MaxFreq,bg[0])))[0]
ibg1 = ibg2-df if len(ibg1)==0 else ibg1[0]
# | |
{
"user-agent": "APIMATIC 2.0",
"accept": "application/json",
"X-Auth-Token": Configuration.x_auth_token,
"X-Auth-Token": Configuration.x_auth_token
}
# Prepare the API call.
http_request = self.http_client.post(query_url, headers=headers)
# Invoke the API call to fetch the response.
response = self.http_client.execute_as_string(http_request);
# Endpoint error handling using HTTP status codes.
if response.status_code == 403:
raise APIException("User not authorized to perform the operation", 403, response.raw_body)
elif response.status_code == 404:
raise APIException("Resource not found", 404, response.raw_body)
# Global error handling using HTTP status codes.
self.validate_response(response)
return response.raw_body
def delete_cart(self,
account_number,
cart_id):
"""Does a DELETE request to /accounts/{account_number}/carts/{cart_id}.
Delete a cart
Args:
account_number (string): Account number
cart_id (string): Cart Id
Returns:
void: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# The base uri for api requests
query_builder = Configuration.BASE_URI
# Prepare query string for API call
query_builder += "/accounts/{account_number}/carts/{cart_id}"
# Process optional template parameters
query_builder = APIHelper.append_url_with_template_parameters(query_builder, {
"account_number": account_number,
"cart_id": cart_id
})
# Validate and preprocess url
query_url = APIHelper.clean_url(query_builder)
# Prepare headers
headers = {
"user-agent": "APIMATIC 2.0",
"X-Auth-Token": Configuration.x_auth_token,
"X-Auth-Token": Configuration.x_auth_token
}
# Prepare the API call.
http_request = self.http_client.delete(query_url, headers=headers)
# Invoke the API call to fetch the response.
response = self.http_client.execute_as_string(http_request);
# Endpoint error handling using HTTP status codes.
if response.status_code == 401:
raise APIException("You are not authenticated", 401, response.raw_body)
elif response.status_code == 403:
raise APIException("This action needs a valid WSSE header", 403, response.raw_body)
elif response.status_code == 404:
raise APIException("Resource not found", 404, response.raw_body)
# Global error handling using HTTP status codes.
self.validate_response(response)
def get_cart(self,
account_number,
cart_id):
"""Does a GET request to /accounts/{account_number}/carts/{cart_id}.
Allow clients to get a specific cart.
Args:
account_number (string): Account number
cart_id (string): Cart id
Returns:
mixed: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# The base uri for api requests
query_builder = Configuration.BASE_URI
# Prepare query string for API call
query_builder += "/accounts/{account_number}/carts/{cart_id}"
# Process optional template parameters
query_builder = APIHelper.append_url_with_template_parameters(query_builder, {
"account_number": account_number,
"cart_id": cart_id
})
# Validate and preprocess url
query_url = APIHelper.clean_url(query_builder)
# Prepare headers
headers = {
"user-agent": "APIMATIC 2.0",
"accept": "application/json",
"X-Auth-Token": Configuration.x_auth_token,
"X-Auth-Token": Configuration.x_auth_token
}
# Prepare the API call.
http_request = self.http_client.get(query_url, headers=headers)
# Invoke the API call to fetch the response.
response = self.http_client.execute_as_string(http_request);
# Endpoint error handling using HTTP status codes.
if response.status_code == 401:
raise APIException("You are not authenticated", 401, response.raw_body)
elif response.status_code == 403:
raise APIException("This action needs a valid WSSE header", 403, response.raw_body)
elif response.status_code == 404:
raise APIException("Resource not found", 404, response.raw_body)
# Global error handling using HTTP status codes.
self.validate_response(response)
return response.raw_body
def create_cart_checkout(self,
account_number,
cart_id,
cart_checkout_form):
"""Does a POST request to /accounts/{account_number}/carts/{cart_id}/checkout.
Checkout a cart and create an order
Args:
account_number (string): Account Number
cart_id (string): Cart Id
cart_checkout_form (CartCheckoutForm): TODO: type description
here.
Returns:
mixed: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# The base uri for api requests
query_builder = Configuration.BASE_URI
# Prepare query string for API call
query_builder += "/accounts/{account_number}/carts/{cart_id}/checkout"
# Process optional template parameters
query_builder = APIHelper.append_url_with_template_parameters(query_builder, {
"account_number": account_number,
"cart_id": cart_id
})
# Validate and preprocess url
query_url = APIHelper.clean_url(query_builder)
# Prepare headers
headers = {
"user-agent": "APIMATIC 2.0",
"accept": "application/json",
"content-type": "application/json; charset=utf-8",
"X-Auth-Token": Configuration.x_auth_token,
"X-Auth-Token": Configuration.x_auth_token
}
# Prepare the API call.
http_request = self.http_client.post(query_url, headers=headers, parameters=APIHelper.json_serialize(cart_checkout_form))
# Invoke the API call to fetch the response.
response = self.http_client.execute_as_string(http_request);
# Endpoint error handling using HTTP status codes.
if response.status_code == 403:
raise APIException("User not authorized to perform the operation", 403, response.raw_body)
elif response.status_code == 404:
raise APIException("Resource not found", 404, response.raw_body)
# Global error handling using HTTP status codes.
self.validate_response(response)
return response.raw_body
def delete_items(self,
account_number,
cart_id):
"""Does a DELETE request to /accounts/{account_number}/carts/{cart_id}/items.
Delete all carts items
Args:
account_number (string): Account Number
cart_id (int): Cart Id
Returns:
string: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# The base uri for api requests
query_builder = Configuration.BASE_URI
# Prepare query string for API call
query_builder += "/accounts/{account_number}/carts/{cart_id}/items"
# Process optional template parameters
query_builder = APIHelper.append_url_with_template_parameters(query_builder, {
"account_number": account_number,
"cart_id": cart_id
})
# Validate and preprocess url
query_url = APIHelper.clean_url(query_builder)
# Prepare headers
headers = {
"user-agent": "APIMATIC 2.0",
"X-Auth-Token": Configuration.x_auth_token,
"X-Auth-Token": Configuration.x_auth_token
}
# Prepare the API call.
http_request = self.http_client.delete(query_url, headers=headers)
# Invoke the API call to fetch the response.
response = self.http_client.execute_as_string(http_request);
# Endpoint error handling using HTTP status codes.
if response.status_code == 401:
raise APIException("You are not authenticated", 401, response.raw_body)
elif response.status_code == 403:
raise APIException("This action needs a valid WSSE header", 403, response.raw_body)
elif response.status_code == 404:
raise APIException("Resource not found", 404, response.raw_body)
# Global error handling using HTTP status codes.
self.validate_response(response)
return str(response.raw_body)
def get_items(self,
account_number,
cart_id,
page=None,
limit=None):
"""Does a GET request to /accounts/{account_number}/carts/{cart_id}/items.
Allow clients to get the list of cart items.
Args:
account_number (string): Account Number
cart_id (int): Cart Id
page (int, optional): Zero based offset index for the results.
e.g. 0 would start at the first result and 10 would start at
the eleventh result.
limit (int, optional): Maximum number of results to return in the
response.
Returns:
mixed: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# The base uri for api requests
query_builder = Configuration.BASE_URI
# Prepare query string for API call
query_builder += "/accounts/{account_number}/carts/{cart_id}/items"
# Process optional template parameters
query_builder = APIHelper.append_url_with_template_parameters(query_builder, {
"account_number": account_number,
"cart_id": cart_id
})
# Process optional query parameters
query_parameters = {
"page": page,
"limit": limit
}
# Validate and preprocess url
query_url = APIHelper.clean_url(query_builder)
# Prepare headers
headers = {
"user-agent": "APIMATIC 2.0",
"accept": "application/json",
"X-Auth-Token": Configuration.x_auth_token,
"X-Auth-Token": Configuration.x_auth_token
}
# Prepare the API call.
http_request = self.http_client.get(query_url, headers=headers, query_parameters=query_parameters)
# Invoke the API call to fetch the response.
response = self.http_client.execute_as_string(http_request);
# Endpoint error handling using HTTP status codes.
if response.status_code == 401:
raise APIException("You are not authenticated", 401, response.raw_body)
elif response.status_code == 403:
raise APIException("This action needs a valid WSSE header", 403, response.raw_body)
elif response.status_code == 404:
raise APIException("Resource not found", 404, response.raw_body)
# Global error handling using HTTP status codes.
self.validate_response(response)
return response.raw_body
def create_items(self,
account_number,
cart_id,
item_form):
"""Does a POST request to /accounts/{account_number}/carts/{cart_id}/items.
TODO: type endpoint description here.
Args:
account_number (string): Account Number
cart_id (int): Cart Id
item_form (ItemForm): TODO: type description here.
Returns:
mixed: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# The base uri for api requests
query_builder = Configuration.BASE_URI
# Prepare query string for API call
query_builder += "/accounts/{account_number}/carts/{cart_id}/items"
# Process optional template parameters
query_builder = APIHelper.append_url_with_template_parameters(query_builder, {
"account_number": account_number,
"cart_id": cart_id
})
# Validate and preprocess url
query_url = APIHelper.clean_url(query_builder)
# Prepare headers
headers = {
"user-agent": "APIMATIC 2.0",
"accept": "application/json",
"content-type": "application/json; charset=utf-8",
"X-Auth-Token": Configuration.x_auth_token,
"X-Auth-Token": Configuration.x_auth_token
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import os
from typing import Any, Callable, Dict, List, Optional, Union
import uuid
import airflow
from airflow.exceptions import AirflowException
if airflow.__version__ > "2.0":
from airflow.hooks.base import BaseHook
else:
from airflow.hooks.base_hook import BaseHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
import great_expectations as ge
from great_expectations.checkpoint import LegacyCheckpoint
from great_expectations.checkpoint.types.checkpoint_result import CheckpointResult
from great_expectations.data_context.types.base import (
DataContextConfig,
GCSStoreBackendDefaults,
)
from great_expectations.data_context import BaseDataContext
class GreatExpectationsOperator(BaseOperator):
"""
An operator to leverage Great Expectations as a task in your Airflow DAG.
Current list of expectations types:
https://docs.greatexpectations.io/en/latest/reference/glossary_of_expectations.html
How to create expectations files:
https://docs.greatexpectations.io/en/latest/guides/tutorials/how_to_create_expectations.html
:param run_name: Identifies the validation run (defaults to timestamp if not specified)
:type run_name: Optional[str]
:param data_context_root_dir: Path of the great_expectations directory
:type data_context_root_dir: Optional[str]
:param data_context: A great_expectations `DataContext` object
:type data_context: Optional[BaseDataContext]
:param expectation_suite_name: The name of the Expectation Suite to use for validation
:type expectation_suite_name: Optional[str]
:param batch_kwargs: The batch_kwargs to use for validation
:type batch_kwargs: Optional[dict]
:param assets_to_validate: A list of dictionaries of batch_kwargs + Expectation Suites to use for validation
:type assets_to_validate: Optional[list[dict]]
:param checkpoint_name: A Checkpoint name to use for validation
:type checkpoint_name: Optional[str]
:param validation_operator_name: name of a Great Expectations validation operator, defaults to action_list_operator
:type validation_operator_name: Optional[str]
:param fail_task_on_validation_failure: Fail the Airflow task if the Great Expectation validation fails
:type fail_task_on_validation_failure: Optiopnal[bool]
:param validation_failure_callback: Called when the Great Expectations validation fails
:type validation_failure_callback: Callable[[CheckpointResult], None]
:param **kwargs: kwargs
:type **kwargs: Optional[dict]
"""
ui_color = "#AFEEEE"
ui_fgcolor = "#000000"
template_fields = (
"checkpoint_name",
"batch_kwargs",
"assets_to_validate",
"data_context_root_dir",
)
@apply_defaults
def __init__(
self,
*,
run_name: Optional[str] = None,
data_context_root_dir: Optional[Union[str, bytes, os.PathLike]] = None,
data_context: Optional[BaseDataContext] = None,
expectation_suite_name: Optional[str] = None,
batch_kwargs: Optional[Dict] = None,
assets_to_validate: Optional[List[Dict]] = None,
checkpoint_name: Optional[str] = None,
validation_operator_name: Optional[str] = None,
fail_task_on_validation_failure: Optional[bool] = True,
validation_failure_callback: Optional[
Callable[[CheckpointResult], None]
] = None,
**kwargs
):
super().__init__(**kwargs)
self.run_name: Optional[str] = run_name
# Check that only one of the arguments is passed to set a data context (or none)
if data_context_root_dir and data_context:
raise ValueError(
"Only one of data_context_root_dir or data_context can be specified."
)
self.data_context_root_dir: Optional[str] = data_context_root_dir
self.data_context: Optional[BaseDataContext] = data_context
# Check that only the correct args to validate are passed
# this doesn't cover the case where only one of expectation_suite_name or batch_kwargs is specified
# along with one of the others, but I'm ok with just giving precedence to the correct one
if (
sum(
bool(x)
for x in [
(expectation_suite_name and batch_kwargs),
assets_to_validate,
checkpoint_name,
]
)
!= 1
):
raise ValueError(
"Exactly one of expectation_suite_name + batch_kwargs, "
"assets_to_validate, or checkpoint_name is required to run validation."
)
self.expectation_suite_name: Optional[str] = expectation_suite_name
self.batch_kwargs: Optional[Dict] = batch_kwargs
self.assets_to_validate: Optional[List[Dict]] = assets_to_validate
self.checkpoint_name: Optional[str] = checkpoint_name
self.validation_operator_name: Optional[str] = validation_operator_name
self.fail_task_on_validation_failure = fail_task_on_validation_failure
self.validation_failure_callback = validation_failure_callback
def create_data_context(self) -> BaseDataContext:
"""Create and return the :class:`~ge.data_context.DataContext` to be used
during validation.
Subclasses should override this to provide custom logic around creating a
`DataContext`. This is called at task execution time, which defers connecting
to the meta database and allows for the use of templated variables.
"""
if self.data_context_root_dir:
return ge.data_context.DataContext(
context_root_dir=self.data_context_root_dir
)
else:
return ge.data_context.DataContext()
def execute(self, context: Any) -> CheckpointResult:
self.log.info("Ensuring data context exists...")
if not self.data_context:
self.log.info("Data context does not exist, creating now.")
self.data_context: Optional[BaseDataContext] = self.create_data_context()
self.log.info("Running validation with Great Expectations...")
batches_to_validate = []
if self.batch_kwargs and self.expectation_suite_name:
batch = {
"batch_kwargs": self.batch_kwargs,
"expectation_suite_names": [self.expectation_suite_name],
}
batches_to_validate.append(batch)
elif self.checkpoint_name:
checkpoint = self.data_context.get_checkpoint(self.checkpoint_name)
for batch in checkpoint.batches:
batch_kwargs = batch["batch_kwargs"]
for suite_name in batch["expectation_suite_names"]:
batch = {
"batch_kwargs": batch_kwargs,
"expectation_suite_names": [suite_name],
}
batches_to_validate.append(batch)
elif self.assets_to_validate:
for asset in self.assets_to_validate:
batch = {
"batch_kwargs": asset["batch_kwargs"],
"expectation_suite_names": [asset["expectation_suite_name"]],
}
batches_to_validate.append(batch)
result = LegacyCheckpoint(
name="_temp_checkpoint",
data_context=self.data_context,
validation_operator_name=self.validation_operator_name,
batches=batches_to_validate,
).run(run_name=self.run_name)
self.handle_result(result)
return result
def handle_result(self, result: CheckpointResult) -> None:
"""Handle the given validation result.
If the validation failed, this method will:
- call :attr:`~validation_failure_callback`, if set
- raise an :exc:`airflow.exceptions.AirflowException`, if
:attr:`~fail_task_on_validation_failure` is `True`, otherwise, log a warning
message
If the validation succeeded, this method will simply log an info message.
:param result: The validation result
:type result: CheckpointResult
"""
if not result["success"]:
if self.validation_failure_callback:
self.validation_failure_callback(result)
if self.fail_task_on_validation_failure:
raise AirflowException("Validation with Great Expectations failed.")
else:
self.log.warning(
"Validation with Great Expectations failed. "
"Continuing DAG execution because "
"fail_task_on_validation_failure is set to False."
)
else:
self.log.info("Validation with Great Expectations successful.")
class GreatExpectationsBigQueryOperator(GreatExpectationsOperator):
"""
An operator that allows you to use Great Expectations to validate data Expectations
against a BigQuery table or the result of a SQL query.
The Expectations need to be stored in a JSON file sitting in an accessible GCS
bucket. The validation results are output to GCS in both JSON and HTML formats.
:param gcp_project: The GCP project of the bucket holding the Great Expectations
artifacts.
:type gcp_project: str
:param gcs_bucket: GCS bucket holding the Great Expectations artifacts.
:type gcs_bucket: str
:param gcs_expectations_prefix: GCS prefix where the Expectations file can be
found. For example, "ge/expectations".
:type gcs_expectations_prefix: str
:param gcs_validations_prefix: GCS prefix where the validation output files should
be saved. For example, "ge/expectations".
:type gcs_validations_prefix: str
:param gcs_datadocs_prefix: GCS prefix where the validation datadocs files should
be saved. For example, "ge/expectations".
:type gcs_datadocs_prefix: str
:param query: The SQL query that defines the set of data to be validated. If the
query parameter is filled in then the `table` parameter cannot be.
:type query: Optional[str]
:param table: The name of the BigQuery table with the data to be validated. If the
table parameter is filled in then the `query` parameter cannot be.
:type table: Optional[str]
:param bq_dataset_name: The name of the BigQuery data set where any temp tables
will be created that are needed as part of the GE validation process.
:type bq_dataset_name: str
:param bigquery_conn_id: ID of the connection with the credentials info needed to
connect to BigQuery.
:type bigquery_conn_id: str
"""
ui_color = "#AFEEEE"
ui_fgcolor = "#000000"
template_fields = GreatExpectationsOperator.template_fields + (
"bq_dataset_name",
"gcp_project",
"gcs_bucket",
)
@apply_defaults
def __init__(
self,
*,
gcp_project: str,
gcs_bucket: str,
gcs_expectations_prefix: str,
gcs_validations_prefix: str,
gcs_datadocs_prefix: str,
query: Optional[str] = None,
table: Optional[str] = None,
bq_dataset_name: str,
bigquery_conn_id: str = "bigquery_default",
**kwargs
):
self.query: Optional[str] = query
self.table: Optional[str] = table
self.bigquery_conn_id = bigquery_conn_id
self.bq_dataset_name = bq_dataset_name
self.gcp_project = gcp_project
self.gcs_bucket = gcs_bucket
self.gcs_expectations_prefix = gcs_expectations_prefix
self.gcs_validations_prefix = gcs_validations_prefix
self.gcs_datadocs_prefix = gcs_datadocs_prefix
super().__init__(batch_kwargs=self.get_batch_kwargs(), **kwargs)
def create_data_context(self) -> BaseDataContext:
"""Create and return the `DataContext` with a BigQuery `DataSource`."""
# Get the credentials information for the BigQuery data source from the BigQuery
# Airflow connection
conn = BaseHook.get_connection(self.bigquery_conn_id)
connection_json = conn.extra_dejson
credentials_path = connection_json.get("extra__google_cloud_platform__key_path")
data_context_config = DataContextConfig(
config_version=2,
datasources={
"bq_datasource": {
"credentials": {
"url": "bigquery://"
+ self.gcp_project
+ "/"
+ self.bq_dataset_name
+ "?credentials_path="
+ credentials_path
},
"class_name": "SqlAlchemyDatasource",
"module_name": "great_expectations.datasource",
"data_asset_type": {
"module_name": "great_expectations.dataset",
"class_name": "SqlAlchemyDataset",
},
}
},
store_backend_defaults=GCSStoreBackendDefaults(
default_bucket_name=self.gcs_bucket,
default_project_name=self.gcp_project,
validations_store_prefix=self.gcs_validations_prefix,
expectations_store_prefix=self.gcs_expectations_prefix,
data_docs_prefix=self.gcs_datadocs_prefix,
),
)
return BaseDataContext(project_config=data_context_config)
def get_batch_kwargs(self) -> Dict:
# Tell GE where to fetch the batch of data to be validated.
batch_kwargs = {
"datasource": "bq_datasource",
}
# Check that only one of the arguments is passed to set a data context (or none)
if self.query and self.table:
raise ValueError("Only one of query or table can be specified.")
if self.query:
batch_kwargs["query"] = self.query
batch_kwargs["data_asset_name"] = self.bq_dataset_name
batch_kwargs["bigquery_temp_table"] = self.get_temp_table_name(
"ge_" + datetime.datetime.now().strftime("%Y%m%d") + "_", 10
)
elif self.table:
batch_kwargs["table"] = self.table
batch_kwargs["data_asset_name"] = | |
observed samle
rects2 = plt.bar(index, house_selling_model(chunks, r)[1], bar_width,alpha=opacity,color='black',label='Achieved')
#Plot the minimum values of each observed sample
rects1 = plt.bar(index + bar_width, house_selling_model(chunks, r)[0], bar_width, alpha=opacity,color='darkred',label='Optimal')
#Label
#Label
plt.xlabel('Stops', size = 50)
plt.ylabel('Load Values', size = 50)
plt.title('Loads in each Run with N = {} for House Selling Model'.format(chunks), size = 60)
plt.xticks(index + (bar_width/2), tuple(range(1,n_groups_house+1)))
plt.xticks(fontsize= 30)
plt.yticks(fontsize= 30)
plt.xlim([0-bar_width/2,index.size])
plt.legend(prop={'size': 25})
plt.savefig('house_selling_figures/hs_{}_'.format(chunks) + time.strftime("%Y-%m-%d %H%M%S") + '.png')
plt.figure(figsize=(30,25))
# Times Plot
#Plot the achieved values of each observed sample
rects2 = plt.bar(index, house_selling_model(chunks, r)[5], bar_width,alpha=opacity,color='darkblue',label='Time instance difference from optimal')
plt.xlabel('Stops', size = 50)
plt.ylabel('Time Instances', size = 50)
plt.title('Times in each Run with N = {} for House Selling Model'.format(chunks), size = 60)
plt.xticks(index, tuple(range(1,n_groups_house+1)))
plt.xticks(fontsize= 30)
plt.yticks(fontsize= 30)
plt.xlim([0-bar_width/2,index.size])
plt.legend(prop={'size': 25})
plt.plot()
plt.savefig('house_selling_figures/hs_times_{}_'.format(chunks) + time.strftime("%Y-%m-%d %H%M%S") + '.png')
#Display the dataframe
runs_data = {'Run': list(range(1,len(house_selling_model(chunks, r)[0])+1)),'Optimal': house_selling_model(chunks, r)[0],'Load when Offloading': house_selling_model(chunks, r)[1],
'Load Difference': house_selling_model(chunks, r)[2],}
runs_frame = pd.DataFrame(runs_data, columns = ['Run','Optimal','Load when Offloading', 'Load Difference'])
runs_frame.index += 1
display(runs_frame)
runs_frame.to_csv('house_selling_figures/dataframes/hs_data_{}_'.format(chunks) + time.strftime("%Y-%m-%d %H%M%S") + '.csv')
# # RANDOM AND SECRETARY MODELS Vs OPTIMAL
# In[8]:
#Plot the different models (random(P) for different probabilities and seecretary model) to compare with the optimal for each model
#Set the rpb_model(eg. rpb_200) and the secretary model(eg. secretary model(200))
def avg_loads_by_stop(rpb_model, secretary_model, house_selling_model):
fig, ax = plt.subplots(1, 1,figsize=(30,25))
bar_width = 0.4
opacity = 0.8
optimal_means = [np.mean(rpb_model[0][0]),np.mean(rpb_model[1][0]),np.mean(rpb_model[2][0]),np.mean(rpb_model[3][0]),
np.mean(rpb_model[4][0]), np.mean(secretary_model[0]), np.mean(house_selling_model[0])]
achieved_means = [np.mean(rpb_model[0][1]),np.mean(rpb_model[1][1]),np.mean(rpb_model[2][1]),np.mean(rpb_model[3][1]),
np.mean(rpb_model[4][1]),np.mean(secretary_model[1]), np.mean(house_selling_model[1])]
all_means = np.array([np.mean(rpb_model[0][0]), np.mean(rpb_model[0][1]),np.mean(rpb_model[1][1]),np.mean(rpb_model[2][1]),np.mean(rpb_model[3][1]),
np.mean(rpb_model[4][1]),np.mean(secretary_model[1]), np.mean(house_selling_model[1])])
comparison = all_means - all_means[0][None]
comp = list(comparison)
comp.pop(0)
#Plot the achieved values of each observed samle
rects2 = plt.bar(np.arange(8) + bar_width, all_means, bar_width,alpha=opacity,color = '#99ccff',label='Means')
rects2[0].set_color('g')
#Label
x_ticks_labels = ['Optimal', 'Random(P = 0.05)','Random(P = 0.1)','Random(P = 0.2)','Random(P = 0.3)','Random(P = 0.5)', 'Secretary', 'House Selling']
plt.xlabel('Models', size = 40)
plt.ylabel('Load Values', size = 40)
plt.title('Avg Loads by Stop for each Model for selected Chunk', size=50)
plt.xticks(np.arange(8) + bar_width/2, ('Optimal','Random(P = 0.05)','Random(P = 0.1)','Random(P = 0.2)','Random(P = 0.3)','Random(P = 0.5)', 'Secretary', 'House Selling'), rotation=35)
plt.xticks(fontsize= 35)
plt.yticks(fontsize= 35)
# for p in ax.patches:
# ax.annotate("%.2f" % p.get_height(), (p.get_x() + p.get_width() / 2., p.get_height()),
# ha='center', va='center', rotation=0, xytext=(0, 20), textcoords='offset points')
plt.legend(prop={'size': 25})
plt.savefig('averages/Averages for chosen N_' + time.strftime("%Y-%m-%d %H%M%S") + '.png')
#Display the dataframe
runs_data = {'Model': ['Random(P = 0.05)','Random(P = 0.1)','Random(P = 0.2)','Random(P = 0.3)','Random(P = 0.5)', 'Secretary', 'House Selling'],
'Optimal Means': optimal_means,
'Offloading Means': achieved_means,
'Mean Load Difference': comp} #np.array(achieved_means) - np.array(optimal_means)}
runs_frame = pd.DataFrame(runs_data, columns = ['Model','Optimal Means',
'Offloading Means',
'Mean Load Difference'])
runs_frame.index += 1
fig = plt.figure(figsize=(20,20))
ax1 = plt.subplot(111)
ret = ax1.bar(runs_frame['Model'], runs_frame['Mean Load Difference'], color = '#99ccff')
ret[np.where(runs_frame['Mean Load Difference'] == runs_frame['Mean Load Difference'].min())[0][0]].set_color('#404040')
plt.xticks(fontsize= 30, rotation = 90)
plt.yticks(fontsize= 30)
plt.xlabel('Models', size = 40)
plt.ylabel('Load Difference', size = 40)
plt.title('Load Mean Differences', size = 50)
for p in ax1.patches:
ax1.annotate("%.2f" % p.get_height(), (p.get_x() + p.get_width() / 2., p.get_height()),
ha='center', va='center', rotation=0, xytext=(0, 20), textcoords='offset points')
plt.savefig('averages/Best_Model_' + time.strftime("%Y-%m-%d %H%M%S") + '.png')
display(runs_frame)
runs_frame.to_csv('averages/dataframes/averages_data_' + time.strftime("%Y-%m-%d %H%M%S") + '.csv')
# In[9]:
#These are the Random(P) probability models
#ALL COMBINATIONS OF CHUNKS AND PROBABILITIES
#RUN THESE BEFORE PROCEEDING TO ANALYSIS
# rpb_20 = [random_prob_model(20, 0.05),
# random_prob_model(20, 0.1),
# random_prob_model(20, 0.2),
# random_prob_model(20, 0.3),
# random_prob_model(20, 0.5)]
# rpb_50 = [random_prob_model(50, 0.05),
# random_prob_model(50, 0.1),
# random_prob_model(50, 0.2),
# random_prob_model(50, 0.3),
# random_prob_model(50, 0.5)]
# rpb_80 = [random_prob_model(80, 0.05),
# random_prob_model(80, 0.1),
# random_prob_model(80, 0.2),
# random_prob_model(80, 0.3),
# random_prob_model(80, 0.5)]
# rpb_100 = [random_prob_model(100, 0.05),
# random_prob_model(100, 0.1),
# random_prob_model(100, 0.2),
# random_prob_model(100, 0.3),
# random_prob_model(100, 0.5)]
# rpb_150 = [random_prob_model(150, 0.05),
# random_prob_model(150, 0.1),
# random_prob_model(150, 0.2),
# random_prob_model(150, 0.3),
# random_prob_model(150, 0.5)]
# rpb_200 = [random_prob_model(200, 0.05),
# random_prob_model(200, 0.1),
# random_prob_model(200, 0.2),
# random_prob_model(200, 0.3),
# random_prob_model(200, 0.5)]
#EXAMPLES, run the models by changing the chunk number (eg. rpb_200) and the square bracket value for the Probability
#Probabilities
# 0 = 0.05
# 1 = 0.1
# 2 = 0.2
# 3 = 0.3
# 4 = 0.5
#Chunks N
# 20,50,80,100,150,200
#For the House Selling model define chunks and the factor r (eg r = 0.1)
# #MODELS
# randomP_simulation_run(rpb_200[1],200)
# secretary_simulation_run(200)
# house_selling_simulation_run(200, 0)
# avg_loads_by_stop(rpb_200, secretary_model(200), house_selling_model(200, 0.015))
# 50 0.052
# 100 0.064
# 150 0.046
# 200 0.015
# In[13]:
def main():
user_input = str(input("Please enter the name of the .csv file you want to view: "))
print(file(user_input))
#Generate the dataset for the Random(P) Model
rpb_20 = [random_prob_model(20, 0.05),random_prob_model(20, 0.1),random_prob_model(20, 0.2),random_prob_model(20, 0.3),random_prob_model(20, 0.5)]
rpb_50 = [random_prob_model(50, 0.05),random_prob_model(50, 0.1),random_prob_model(50, 0.2),random_prob_model(50, 0.3),random_prob_model(50, 0.5)]
rpb_80 = [random_prob_model(80, 0.05),random_prob_model(80, 0.1),random_prob_model(80, 0.2),random_prob_model(80, 0.3),random_prob_model(80, 0.5)]
rpb_100 = [random_prob_model(100, 0.05),random_prob_model(100, 0.1),random_prob_model(100, 0.2),random_prob_model(100, 0.3),random_prob_model(100, 0.5)]
rpb_150 = [random_prob_model(150, 0.05),random_prob_model(150, 0.1),random_prob_model(150, 0.2),random_prob_model(150, 0.3),random_prob_model(150, 0.5)]
rpb_200 = [random_prob_model(200, 0.05),random_prob_model(200, 0.1),random_prob_model(200, 0.2),random_prob_model(200, 0.3),random_prob_model(200, 0.5)]
loop = True
while(loop):
selection = str(input("You can choose from:\n 1 = Random(P) Model\n 2 = Secretary Model\n 3 = House Selling Model\n 4 = Average of Models\nEnter your selection: "))
if selection == '1':
chunk_selection = int(input("Please enter the number of chunks you want to analyze. You can choose from [20,50,80,100,150,200]: "))
if chunk_selection == 20:
probability_selection = int(input("Please enter the probability you want.\n\nYou can choose from:\n 0 = 0.05\n 1 = 0.1\n 2 = 0.2\n 3 = 0.3\n 4 = 0.5\n\nEnter your selection: "))
randomP_simulation_run(rpb_20[probability_selection], chunk_selection)
if chunk_selection == 50:
probability_selection = int(input("Please enter the probability you want.\n\n\nYou can choose from:\n 0 = 0.05\n 1 = 0.1\n 2 = 0.2\n 3 = 0.3\n 4 = 0.5\n\nEnter your selection: "))
randomP_simulation_run(rpb_50[probability_selection], chunk_selection)
if chunk_selection == 80:
probability_selection = int(input("Please enter the probability you want.\n\n\nYou can choose from:\n 0 = 0.05\n 1 = 0.1\n 2 = 0.2\n 3 = 0.3\n 4 = 0.5\n\nEnter your selection: "))
randomP_simulation_run(rpb_80[probability_selection], chunk_selection)
if chunk_selection == 100:
probability_selection = int(input("Please enter the probability you want.\n\n\nYou can choose from:\n 0 = 0.05\n 1 = 0.1\n 2 = 0.2\n 3 = 0.3\n 4 = 0.5\n\nEnter your selection: "))
randomP_simulation_run(rpb_100[probability_selection], chunk_selection)
if chunk_selection == 150:
probability_selection = int(input("Please enter the probability you want.\n\n\nYou can choose from:\n 0 = 0.05\n 1 = 0.1\n 2 = 0.2\n 3 = 0.3\n 4 = 0.5\n\nEnter your selection: "))
randomP_simulation_run(rpb_150[probability_selection], chunk_selection)
if chunk_selection == 200:
probability_selection = int(input("Please enter the probability you want.\n\n\nYou can choose from:\n 0 = 0.05\n 1 = 0.1\n 2 = 0.2\n 3 = 0.3\n 4 = 0.5\n\nEnter your selection: "))
randomP_simulation_run(rpb_200[probability_selection], chunk_selection)
print("\nYour result figures have been saved. You can view them in the /randomp_figures/ folder!\n\n")
elif selection == '2':
chunk_selection = int(input("Please enter the number of chunks you want to analyze. You can choose from [20,50,80,100,150,200]: "))
secretary_simulation_run(chunk_selection)
print("\nYour result figures have been saved. You can view them in the /secretary_figures/ folder!\n\n")
elif selection == '3':
chunk_selection = int(input("Please enter the number of chunks you want to analyze. You can choose from [20,50,80,100,150,200]: "))
r_factor = float(input("Please enter the R factor you want to use: "))
house_selling_simulation_run(chunk_selection,r_factor)
print("\nYour result figures have been saved. You can view them in the /house_selling_figures/ folder!\nDataframe .csv is in the /dataframes/ folder\n\n")
elif selection == '4':
chunk_selection = int(input("Please enter the number of chunks you want to analyze. You can choose from [20,50,80,100,150,200]: "))
if chunk_selection == 20:
r_factor = float(input("Please enter the R factor you want to use: "))
avg_loads_by_stop(rpb_20, secretary_model(chunk_selection), house_selling_model(chunk_selection, r_factor))
if chunk_selection == 50:
r_factor = float(input("Please enter the R factor you want to use: "))
avg_loads_by_stop(rpb_50, secretary_model(chunk_selection), house_selling_model(chunk_selection, r_factor))
if chunk_selection == 80:
r_factor = float(input("Please enter the R factor you want to use: "))
avg_loads_by_stop(rpb_80, secretary_model(chunk_selection), house_selling_model(chunk_selection, r_factor))
if chunk_selection == 100:
r_factor = float(input("Please enter the R factor you want to use: "))
avg_loads_by_stop(rpb_100, secretary_model(chunk_selection), house_selling_model(chunk_selection, r_factor))
if chunk_selection == 150:
r_factor = float(input("Please enter the R factor you want to use: "))
avg_loads_by_stop(rpb_150, secretary_model(chunk_selection), house_selling_model(chunk_selection, r_factor))
if chunk_selection == 200:
r_factor = float(input("Please enter the R factor you want to use: "))
avg_loads_by_stop(rpb_200, secretary_model(chunk_selection), house_selling_model(chunk_selection, r_factor))
print("\nYour result figures have been saved. You can view them in the /averages/ folder!\nDataframe .csv is in the /dataframes/ folder\n\n")
else:
print("Error! Please enter a valid selection!\n")
repeat = str(input("Do you want to repeat? If not type 'exit' or 'N' to go back. Else enter 'Y' to continue: "))
while (repeat != 'Y' and repeat != 'N' and repeat != 'exit' ):
print("Sorry! I didn't | |
<reponame>adgilbert/med-seg<gh_stars>0
import logging
import os
from abc import abstractmethod
from typing import Callable, Tuple
import cv2
import numpy as np
import pandas as pd
import scipy.stats as st
import torch
from scipy.spatial.distance import cosine as cosine_dist
from scipy.spatial.distance import directed_hausdorff
from torch.nn.functional import one_hot
from seg_utils.post_process_masks import get_largest_contour
from seg_utils.utils import convert_binary_output_to_classes, clean_dict
from .Curvature.single_mask_processing import Mask2Contour
try:
from surface_distance.metrics import compute_surface_distances, compute_average_surface_distance
except ImportError:
print("Could not import surface distance metrics. Install from https://github.com/deepmind/surface-distance if "
"surface distance metrics will be used.")
def show_img(img):
import matplotlib.pyplot as plt
plt.imshow(img)
plt.show()
def convert_to_classes(inp): return torch.argmax(inp, dim=1) # convert an input array to class values
def check_input_format(output, target):
""" check input format of outpt and target
output: BxCxHxW tensor where C is the number of classes
target: Bx1xHxW tensor where every entry is an int in the range [0, C-1]
"""
try:
assert len(output.shape) == 4
assert len(target.shape) == 4
assert target.shape[1] == 1
assert torch.max(target) <= output.shape[1] - 1
except AssertionError:
raise ValueError(f"Shape error: \nOutput should be [B, C, H, W], found {output.shape} "
f"\nTarget should be [B, 1, H, W], found {target.shape}. "
f"\nMax target should be <= C-1, found {torch.max(target)}")
class SliceMerger(object):
def __init__(self, merge: tuple):
""" merge is a tuple giving the slices along dimension 1 to merge"""
assert len(merge) > 1, "must provide at least two slices to merge"
self.merge = merge
def __call__(self, output: torch.Tensor, target: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
""" merges all slices given by merge in dimension 1 using max
output and target should """
check_input_format(output, target)
# create copies and detach from computation graph
output = output.clone().detach()
target = target.clone().detach()
slices = np.arange(output.shape[1]) # all slices
new_slice = self.merge[0] # save res in this slice
output[:, new_slice] = torch.max(output[:, self.merge], 1)[0] # just want max_vals
for s in slices:
if s in self.merge:
target[target == s] = new_slice # merge vals in target
else:
subtract_val = sum(s > self.merge[1:]) # subtract one for every removed slice
target[target == s] -= subtract_val
keep_slices = [s for s in slices if s not in self.merge or s == new_slice]
output = output[:, keep_slices]
return output, target
def get_intersection_and_sums(output: torch.Tensor, target: torch.Tensor) -> tuple:
"""
Calculates the intersection between output and target as well as the individual sums of each
Args:
output: BxCxHxW tensor where C is the number of classes
target: Bx1xHxW tensor where every entry is an int in the range [0, C-1]
Returns:
intersection: area of intersection between output and target [B, C] array
output_sum: sum of output for each class [B, C] array
target_sum: sump of target for each class [B, C] array
"""
check_input_format(output, target)
num_classes = output.shape[1]
# convert output and target to [C, B*H*W] tensors where every value is one-hot encoded and C is classes
# for output have to first find chosen class then convert to one_hot
output = one_hot(convert_to_classes(output), num_classes).type(torch.int).permute(3, 0, 1, 2).view(num_classes, -1)
target = one_hot(target.squeeze(1), num_classes).type(torch.int).permute(3, 0, 1, 2).contiguous().view(num_classes,
-1)
assert output.shape == target.shape, f"output/target shape mismatch after processing ({output.shape}!={target.shape}"
intersection = (output * target).sum(dim=1)
output_sum = output.sum(dim=1)
target_sum = target.sum(dim=1)
return intersection, output_sum, target_sum
class MetricBase:
def __init__(self, out_type: str = "segs", modifier: Callable = None, lower_is_better: bool = False,
requires_target=True, calculate_during_train=True):
"""
Abstract base class which all metrics should inherit from
:param out_type: what key to use in output/target dictionary
:param modifier: a callable function which should take (output, target) as input and return output/target
:param lower_is_better: decides whether or lower or higher value of the metric is better
"""
self.best = {"train": None}
self.phase = "train"
self.results = list()
self._type = out_type # which key in output/target dict to use
self.target_name = out_type
self.lower_is_better = lower_is_better
self.modifier = modifier
self.requires_target = requires_target
self.calculate_during_train = calculate_during_train
@property
def type(self):
return self._type
def set_target(self, target_name: str):
self.target_name = target_name
def reset_target(self):
self.target_name = self._type
def check_best(self, res):
if res is not None:
if self.best[self.phase] is None or (not self.lower_is_better and res >= self.best[self.phase]) or \
(self.lower_is_better and res <= self.best[self.phase]):
self.best[self.phase] = res
return True
return False
def epoch_reset(self, phase):
# reset results for next epoch
self.results = list()
self.phase = phase
if phase not in self.best:
self.best[phase] = None
def reduce(self, method="median"):
assert method in dir(self), f"reduction method {method} not found"
if len(self.results) > 0:
return self.__getattribute__(method)()
else:
logging.debug("0 results found. Returning None")
return None
def absmean(self):
return np.mean(abs(np.array(self.results)))
def mean(self):
mean = np.mean(np.array(self.results))
if np.isnan(mean):
logging.warning("Found NAN in results, using nanmean")
mean = np.nanmean(np.array(self.results))
return mean
def median(self):
med = np.median(np.array(self.results))
if np.isnan(med):
logging.warning("Found NAN in results, using nanmedian")
med = np.nanmedian(np.array(self.results))
return med
def absmedian(self):
return np.median(abs(np.array(self.results)))
def std(self):
std = np.std(np.array(self.results))
if np.isnan(std):
logging.warning("Found NAN in results, using nanstd")
std = np.nanstd(np.array(self.results))
return std
def ci_95(self):
res = np.array(self.results)
return st.t.interval(0.95, len(res) - 1, loc=np.mean(res), scale=st.sem(res))
def ci_low(self):
return self.ci_95()[0]
def ci_high(self):
return self.ci_95()[1]
def median_absolute_deviation(self):
""" defined as median(|X_i - median(X)|)"""
med = self.median() # will handle nan and print warning.
res = np.array(self.results)
res = res[~np.isnan(res)]
return np.median(abs(res - med))
def set_best(self, phase, val):
""" used for setting the best value when restoring"""
self.best[phase] = val
@abstractmethod
def process_single(self, output, target=None):
# implement here the actual metric, should return a val to add to self.results
# a return value of None indicates that something went wrong in the processing. This class will ignore and
# continue, but the subclass should log a warning message.
pass
def __call__(self, outputs: dict, targets: dict = None, confidence=None):
if self.requires_target and (targets is None or self._type not in targets):
return []
if not self.calculate_during_train and self.phase == "train":
return []
self.batch_results = list()
if confidence is None:
confidence = [True] * outputs["segs"].shape[0]
if targets is None or self._type not in targets:
for o, c in zip(outputs[self._type], confidence):
if c and o is not None:
o = o.unsqueeze(0)
res = self.process_single(o)
if res is not None:
self.results.append(res)
self.batch_results.append(res)
else:
self.batch_results.append(None)
else:
for o, t, c in zip(outputs[self._type], targets[self._type], confidence):
if c and o is not None:
o, t = o.unsqueeze(0), t.unsqueeze(0)
if self.modifier is not None:
o, t = self.modifier(o, t)
res = self.process_single(o, t)
if res is not None:
self.results.append(res)
self.batch_results.append(res)
else:
self.batch_results.append(None)
return self.batch_results
class IoU(MetricBase):
""" wrapper around kornia to keep everything in one place
Matching cross-entropy loss format, output is a [B, C, H, W] float tensor where C is number of classes
target is a [B, H, W] integer tensor where every entry is in the range [0, C-1]
Function first converts output to match target and then uses kornia to compute mean_iou
"""
def __init__(self, num_classes: int, include_classes=None):
MetricBase.__init__(self, out_type="segs")
self.num_classes = num_classes
self.include_classes = include_classes # which classes to include
def process_single(self, output: torch.Tensor, target: torch.Tensor = None) -> float:
intersection, output_sum, target_sum = get_intersection_and_sums(output, target)
iou = intersection.type(torch.float) / (output_sum + target_sum - intersection).type(torch.float)
if self.include_classes is None:
return torch.mean(iou[1:]) # return mean of non-background classes.
else:
raise NotImplementedError("add handling for specific include classes")
class DiceScore(MetricBase):
def __init__(self, include_classes: list = None, modifier: Callable = None):
"""
dices score for a segmentation result
Args:
include_classes: (list) which classes to include in the output, default is [1:] excluding background
modifier: (Callable) an optional function to apply to output before calculations
"""
MetricBase.__init__(self, out_type="segs", modifier=modifier)
self.include_classes = include_classes # which classes to include in the DiceScore
def process_single(self, output: torch.Tensor, target: torch.Tensor = None) -> float:
"""
Processes a single example
Args:
output: BxCxHxW tensor where C is the number of classes
target: Bx1xHxW tensor where every entry is an int in the range [0, C-1]
Returns: (float) the mean dice score
"""
if output.shape[1] == 1:
output = convert_binary_output_to_classes(output)
intersection, output_sum, target_sum = get_intersection_and_sums(output, target)
dice = 2 * intersection.type(torch.float) / (output_sum + target_sum).type(torch.float)
if self.include_classes is None:
return torch.mean(dice[1:]) # same as iou, return mean of values for non-background classes
else:
return torch.mean(dice[self.include_classes]) # return mean of values for designated classes
class Bias(MetricBase):
def __init__(self, include_classes: list = None, modifier: Callable | |
#!/usr/bin/env python
from collections import deque
from geometry_msgs.msg import Pose, PoseStamped, TwistStamped
import math
import rospy
from scipy.spatial import KDTree
from std_msgs.msg import Int32
from styx_msgs.msg import Lane, Waypoint
import sys
import tf
'''
This node will publish waypoints from the car's current position to some `x`
distance ahead.
'''
# Constants
# Number of waypoints we will publish (reduced as suggested by "alangordon" in
# the Udacity Self-Driving Car Slack on 04/05/2018, 8:28 AM,
# https://carnd.slack.com/archives/C6NVDVAQ3/p1522909695000091, accessed
# 04/05/2018)
LOOKAHEAD_WPS = 50
# Undefined waypoint index
WP_UNDEFINED = -1
# Update rate of the main loop (in Hz)
UPDATE_RATE = 30
class WaypointUpdater(object):
def __init__(self):
""" Initialize the waypoint updater node:
- Subscribe to relevant topics
- Initialize members variables
- Publish 'final_waypoints' topic
"""
rospy.init_node('waypoint_updater')
# Set start time of the node
self.start_time = rospy.Time.now().to_sec()
# Subscribe to 'current pose' topic
# (Current ego position)
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
self.ego_pose = PoseStamped()
# Subscribe to 'base_waypoints' topic
# (List of track waypoints; will only be send once)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
self.waypoints = None
self.waypoint_velocities = []
self.num_waypoints = 0
self.is_init = False
# From Udacity SDC-ND Programming a Real Self-Driving Car
# Project Walkthrough (Term 3)
self.waypoint_tree = None
# Subscribe to 'traffic_waypoint' topic
# (Index of waypoint closest to the next red traffic light. If the next
# traffic light is not red, 'traffic_waypoint' is expected to be -1)
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)
self.wp_traffic_light = WP_UNDEFINED
# Subscribe to 'obstacle_waypoint' topic
# (Index of waypoint closest to the next obstacle. If there is no
# obstacle ahead, 'obstacle_waypoint' is expected to be -1)
rospy.Subscriber('/obstacle_waypoint', Int32, self.obstacle_cb)
self.wp_obstacle = WP_UNDEFINED
# Subscribe to 'current_velocity' topic
rospy.Subscriber('/current_velocity', TwistStamped, self.velocity_cb)
self.current_velocity = 0.0
# Publish waypoints ahead of the vehicle
# (Starting with the waypoint just ahead of the vehicle)
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane,
queue_size = 1)
self.final_waypoints = Lane()
for i in range(LOOKAHEAD_WPS):
self.final_waypoints.waypoints.append(Waypoint())
# Reset parameters
self.decel_max = 0.0
self.accel_max = 0.0
self.velocity_max = 0.0
# Start node
self.loop()
def loop(self):
"""Main loop (publish waypoints at a fixed rate)
(adapted from Udacity SDC-ND Programming a Real Self-Driving Car
Project Walkthrough (Term 3))
"""
rate = rospy.Rate(UPDATE_RATE)
while not rospy.is_shutdown():
if (self.is_init):
self.publish_waypoints()
rate.sleep()
def publish_waypoints(self):
""" Publishes new waypoints for the waypoint follower node (starting
with the next waypoint for the ego vehicle).
"""
# Get start position
ego_pos = self.ego_pose.pose.position
# Find waypoint closest to ego vehicle
# (adapted from Udacity SDC-ND Programming a Real Self-Driving Car
# Project Walkthrough (Term 3))
closest_id = self.waypoint_tree.query([ego_pos.x, ego_pos.y], 1)[1]
# Determine vehicle yaw (from quarternion representation)
# (adapted from https://answers.ros.org/question/69754/quaternion-
# transformations-in-python/?answer=69799#post-id-69799,
# accessed 03/26/2018)
ego_orient = self.ego_pose.pose.orientation
q = (ego_orient.x, ego_orient.y, ego_orient.z, ego_orient.w)
euler_angles = tf.transformations.euler_from_quaternion(q)
ego_yaw = euler_angles[2]
# Check if the closest waypoint is in front or behind of ego
# vehicle (consider only 2D (x, y) projection)
# (adapted from Udacity Path Planning Project (Starter Code) and
# code posted by <NAME> in the Udacity Self-Driving Car Slack on
# 08/13/2017, 5:10 AM, https://carnd.slack.com/archives/C5ZS5SBA8/
# p1502593836232659, accessed: 03/10/2018).
first_id = closest_id
pos_closest = self.waypoints[closest_id].pose.pose.position
heading = math.atan2(pos_closest.y - ego_pos.y,
pos_closest.x - ego_pos.x)
heading = math.fmod(heading + 2 * math.pi, 2 * math.pi)
ego_yaw = math.fmod(ego_yaw + 2 * math.pi, 2 * math.pi)
angle = math.fabs(ego_yaw - heading)
if (angle > math.pi):
angle = 2 * math.pi - angle;
if (angle > math.pi / 2):
# Waypoint is behind vehicle --> select next
first_id = (first_id + 1) % self.num_waypoints
# Create list of next waypoints (consider track wrap-around)
# (update waypoint velocities in the process)
self.final_waypoints.header.stamp = self.ego_pose.header.stamp
self.calculate_velocities(self.final_waypoints.waypoints, first_id,
self.current_velocity, ego_pos)
# Publish next waypoints
self.final_waypoints_pub.publish(self.final_waypoints)
def calculate_velocities(self, waypoints, first_id, last_velocity,
last_position):
""" Set velocities for next waypoints for ego vehicle
Arguments:
waypoints -- Next waypoints for the ego vehicle (expected size:
LOOKAHEAD_WPS)
first_id -- ID (absolute) of next waypoint for ego vehicle
last_velocity -- Current velocity of ego vehicle
last_position -- Current position of ego vehicle
"""
stop_id = self.get_stop_point(waypoints, first_id)
for i in xrange(LOOKAHEAD_WPS):
# Set waypoint
wp_id = (first_id + i) % self.num_waypoints
waypoints[i] = self.waypoints[wp_id]
# After stop point?
if stop_id != WP_UNDEFINED and i >= stop_id:
waypoints[i].twist.twist.linear.x = 0.0
continue
# Get distance between last position and waypoint
wp_position = waypoints[i].pose.pose.position
dist = self.distance(last_position, wp_position)
# Determine next velocity
# (adapted from waypoint_loader.py)
wp_velocity = self.waypoint_velocities[wp_id]
velocity_diff = wp_velocity - last_velocity
if velocity_diff < 0.0:
# Decelerate
min_velocity = max(0.0, last_velocity
- math.sqrt(2 * self.decel_max * dist))
wp_velocity = max(min_velocity, wp_velocity)
elif velocity_diff > 0.0:
# Accelerate
max_velocity = min(self.velocity_max,
last_velocity + math.sqrt(2 *
self.accel_max * dist))
wp_velocity = min(max_velocity, wp_velocity)
# Consider stop point
if stop_id != WP_UNDEFINED:
dist = self.distance_path(waypoints, i, stop_id)
v_decel = math.sqrt(self.decel_max / 2 * dist)
if v_decel < 1.0:
v_decel = 0.0
wp_velocity = min(wp_velocity, v_decel)
# Set waypoint velocity
waypoints[i].twist.twist.linear.x = wp_velocity
# Next (consider track wrap-around)
last_velocity = wp_velocity
last_position = wp_position
def get_stop_point(self, waypoints, first_id):
""" Check if next traffic light/obstacle is in range of next set of
waypoints for ego vehicle
Arguments:
waypoints -- Next waypoints for the ego vehicle (expected size:
LOOKAHEAD_WPS)
first_id -- ID (absolute) of next waypoint for ego vehicle
Return:
ID of stopping point in set of waypoints
"""
# Make IDs relative
obstacle_id = self.waypoint_in_range(self.wp_obstacle, first_id)
traffic_light_id = self.waypoint_in_range(self.wp_traffic_light,
first_id)
# Stop?
stop_id = obstacle_id
if (traffic_light_id != WP_UNDEFINED and
(stop_id == WP_UNDEFINED or traffic_light_id < stop_id)):
stop_id = traffic_light_id
return stop_id
def waypoint_in_range(self, wp_id, first_id):
""" Check if a given waypoint (defined through its absolute ID) is in
range of the list of next waypoints (where the first waypoint is
defined through 'first_id')
Arguments:
wp_id -- Waypoint ID (absolute)
first_id -- ID of first waypoint in 'waypoints' (absolute)
Return:
Relative position of the waypoint in the list of waypoints (when
the waypoint is in range). WP_UNDEFINED, otherwise.
"""
if wp_id == WP_UNDEFINED:
return WP_UNDEFINED
# Make ID relative
if wp_id < first_id:
wp_id = self.num_waypoints + wp_id - first_id
else:
wp_id = wp_id - first_id
# Request a full stop a few waypoints before the stop line
# (to prevent driving over the stop line (e.g. due to latency from the
# controllers, node update rates, etc.) at which point the traffic
# light will not be detected in front of the car anymore)
#wp_id = (wp_id - 4) if wp_id > 3 else 0
wp_id = (wp_id - 2) if wp_id > 1 else 0
# Is the waypoint in range?
if wp_id >= LOOKAHEAD_WPS:
return WP_UNDEFINED
return wp_id
def pose_cb(self, ego_pose):
""" Callback function for ego vehicle pose (position, orientation)
updates.
Arguments:
ego_pose: Current ego pose
"""
self.ego_pose = ego_pose
def waypoints_cb(self, waypoints):
""" Receives a list of track waypoints and stores them internally
Arguments:
waypoints -- List of track waypoints
"""
if self.waypoints:
return
self.waypoints = waypoints.waypoints
self.num_waypoints = len(self.waypoints)
for wp in waypoints.waypoints:
self.waypoint_velocities.append(wp.twist.twist.linear.x)
# Adapted from Udacity SDC-ND Programming a Real Self-Driving Car
# Project Walkthrough (Term 3)
waypoints_2d = [[wp.pose.pose.position.x, wp.pose.pose.position.y]\
for wp in self.waypoints]
self.waypoint_tree = KDTree(waypoints_2d)
# Get limits
self.decel_max = -rospy.get_param('/dbw_node/decel_limit')
self.accel_max = rospy.get_param('/dbw_node/accel_limit')
self.velocity_max = rospy.get_param('/waypoint_loader/velocity') / 3.6
# Mark node as ready
self.is_init = True
def traffic_cb(self, wp_traffic_light):
""" Receives the index of the waypoint that corresponds to the
stopline of the next red traffic light. An index of 'WP_UNDEFINED'
signals that no red traffic light (TL) is ahead (or close by)
Arguments:
wp_traffic_light -- Index of waypoint close to next TL stopline
"""
self.wp_traffic_light = wp_traffic_light.data
#if self.wp_traffic_light != WP_UNDEFINED:
# self.check_waypoint_id(self.waypoints, self.wp_traffic_light)
def obstacle_cb(self, wp_obstacle):
""" Receives the index of the waypoint that corresponds to the next
obstacle. An index of 'WP_UNDEFINED' signals that no obstacle is
ahead (or close by)
Arguments:
wp_obstacle -- Index of waypoint close to next obstacle
"""
self.wp_obstacle | |
format != b"tagdone" and (
line.startswith(b"Date: ") or line.startswith(b"date: ")
):
date = line[6:]
format = b"tag"
elif format == b"tag" and line == b"":
# when looking for tags (subject: from: etc) they
# end once you find a blank line in the source
format = b"tagdone"
elif message or line:
message.append(line)
comments.append(line)
eatdiff(message)
eatdiff(comments)
# Remember the exact starting line of the patch diffs before consuming
# empty lines, for external use by TortoiseHg and others
self.diffstartline = len(comments)
eatempty(message)
eatempty(comments)
# make sure message isn't empty
if format and format.startswith(b"tag") and subject:
message.insert(0, subject)
self.message = message
self.comments = comments
self.user = user
self.date = date
self.parent = parent
# nodeid and branch are for external use by TortoiseHg and others
self.nodeid = nodeid
self.branch = branch
self.haspatch = diffstart > 1
self.plainmode = (
plainmode
or b'# HG changeset patch' not in self.comments
and any(
c.startswith(b'Date: ') or c.startswith(b'From: ')
for c in self.comments
)
)
def setuser(self, user):
try:
inserthgheader(self.comments, b'# User ', user)
except ValueError:
if self.plainmode:
insertplainheader(self.comments, b'From', user)
else:
tmp = [b'# HG changeset patch', b'# User ' + user]
self.comments = tmp + self.comments
self.user = user
def setdate(self, date):
try:
inserthgheader(self.comments, b'# Date ', date)
except ValueError:
if self.plainmode:
insertplainheader(self.comments, b'Date', date)
else:
tmp = [b'# HG changeset patch', b'# Date ' + date]
self.comments = tmp + self.comments
self.date = date
def setparent(self, parent):
try:
inserthgheader(self.comments, b'# Parent ', parent)
except ValueError:
if not self.plainmode:
tmp = [b'# HG changeset patch', b'# Parent ' + parent]
self.comments = tmp + self.comments
self.parent = parent
def setmessage(self, message):
if self.comments:
self._delmsg()
self.message = [message]
if message:
if self.plainmode and self.comments and self.comments[-1]:
self.comments.append(b'')
self.comments.append(message)
def __bytes__(self):
s = b'\n'.join(self.comments).rstrip()
if not s:
return b''
return s + b'\n\n'
__str__ = encoding.strmethod(__bytes__)
def _delmsg(self):
"""Remove existing message, keeping the rest of the comments fields.
If comments contains 'subject: ', message will prepend
the field and a blank line."""
if self.message:
subj = b'subject: ' + self.message[0].lower()
for i in pycompat.xrange(len(self.comments)):
if subj == self.comments[i].lower():
del self.comments[i]
self.message = self.message[2:]
break
ci = 0
for mi in self.message:
while mi != self.comments[ci]:
ci += 1
del self.comments[ci]
def newcommit(repo, phase, *args, **kwargs):
"""helper dedicated to ensure a commit respect mq.secret setting
It should be used instead of repo.commit inside the mq source for operation
creating new changeset.
"""
repo = repo.unfiltered()
if phase is None:
if repo.ui.configbool(b'mq', b'secret'):
phase = phases.secret
overrides = {(b'ui', b'allowemptycommit'): True}
if phase is not None:
overrides[(b'phases', b'new-commit')] = phase
with repo.ui.configoverride(overrides, b'mq'):
repo.ui.setconfig(b'ui', b'allowemptycommit', True)
return repo.commit(*args, **kwargs)
class AbortNoCleanup(error.Abort):
pass
class queue(object):
def __init__(self, ui, baseui, path, patchdir=None):
self.basepath = path
try:
with open(os.path.join(path, b'patches.queue'), 'rb') as fh:
cur = fh.read().rstrip()
if not cur:
curpath = os.path.join(path, b'patches')
else:
curpath = os.path.join(path, b'patches-' + cur)
except IOError:
curpath = os.path.join(path, b'patches')
self.path = patchdir or curpath
self.opener = vfsmod.vfs(self.path)
self.ui = ui
self.baseui = baseui
self.applieddirty = False
self.seriesdirty = False
self.added = []
self.seriespath = b"series"
self.statuspath = b"status"
self.guardspath = b"guards"
self.activeguards = None
self.guardsdirty = False
# Handle mq.git as a bool with extended values
gitmode = ui.config(b'mq', b'git').lower()
boolmode = stringutil.parsebool(gitmode)
if boolmode is not None:
if boolmode:
gitmode = b'yes'
else:
gitmode = b'no'
self.gitmode = gitmode
# deprecated config: mq.plain
self.plainmode = ui.configbool(b'mq', b'plain')
self.checkapplied = True
@util.propertycache
def applied(self):
def parselines(lines):
for l in lines:
entry = l.split(b':', 1)
if len(entry) > 1:
n, name = entry
yield statusentry(bin(n), name)
elif l.strip():
self.ui.warn(
_(b'malformated mq status line: %s\n')
% stringutil.pprint(entry)
)
# else we ignore empty lines
try:
lines = self.opener.read(self.statuspath).splitlines()
return list(parselines(lines))
except IOError as e:
if e.errno == errno.ENOENT:
return []
raise
@util.propertycache
def fullseries(self):
try:
return self.opener.read(self.seriespath).splitlines()
except IOError as e:
if e.errno == errno.ENOENT:
return []
raise
@util.propertycache
def series(self):
self.parseseries()
return self.series
@util.propertycache
def seriesguards(self):
self.parseseries()
return self.seriesguards
def invalidate(self):
for a in 'applied fullseries series seriesguards'.split():
if a in self.__dict__:
delattr(self, a)
self.applieddirty = False
self.seriesdirty = False
self.guardsdirty = False
self.activeguards = None
def diffopts(self, opts=None, patchfn=None, plain=False):
"""Return diff options tweaked for this mq use, possibly upgrading to
git format, and possibly plain and without lossy options."""
diffopts = patchmod.difffeatureopts(
self.ui,
opts,
git=True,
whitespace=not plain,
formatchanging=not plain,
)
if self.gitmode == b'auto':
diffopts.upgrade = True
elif self.gitmode == b'keep':
pass
elif self.gitmode in (b'yes', b'no'):
diffopts.git = self.gitmode == b'yes'
else:
raise error.Abort(
_(b'mq.git option can be auto/keep/yes/no got %s')
% self.gitmode
)
if patchfn:
diffopts = self.patchopts(diffopts, patchfn)
return diffopts
def patchopts(self, diffopts, *patches):
"""Return a copy of input diff options with git set to true if
referenced patch is a git patch and should be preserved as such.
"""
diffopts = diffopts.copy()
if not diffopts.git and self.gitmode == b'keep':
for patchfn in patches:
patchf = self.opener(patchfn, b'r')
# if the patch was a git patch, refresh it as a git patch
diffopts.git = any(
line.startswith(b'diff --git') for line in patchf
)
patchf.close()
return diffopts
def join(self, *p):
return os.path.join(self.path, *p)
def findseries(self, patch):
def matchpatch(l):
l = l.split(b'#', 1)[0]
return l.strip() == patch
for index, l in enumerate(self.fullseries):
if matchpatch(l):
return index
return None
guard_re = re.compile(br'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
def parseseries(self):
self.series = []
self.seriesguards = []
for l in self.fullseries:
h = l.find(b'#')
if h == -1:
patch = l
comment = b''
elif h == 0:
continue
else:
patch = l[:h]
comment = l[h:]
patch = patch.strip()
if patch:
if patch in self.series:
raise error.Abort(
_(b'%s appears more than once in %s')
% (patch, self.join(self.seriespath))
)
self.series.append(patch)
self.seriesguards.append(self.guard_re.findall(comment))
def checkguard(self, guard):
if not guard:
return _(b'guard cannot be an empty string')
bad_chars = b'# \t\r\n\f'
first = guard[0]
if first in b'-+':
return _(b'guard %r starts with invalid character: %r') % (
guard,
first,
)
for c in bad_chars:
if c in guard:
return _(b'invalid character in guard %r: %r') % (guard, c)
def setactive(self, guards):
for guard in guards:
bad = self.checkguard(guard)
if bad:
raise error.Abort(bad)
guards = sorted(set(guards))
self.ui.debug(b'active guards: %s\n' % b' '.join(guards))
self.activeguards = guards
self.guardsdirty = True
def active(self):
if self.activeguards is None:
self.activeguards = []
try:
guards = self.opener.read(self.guardspath).split()
except IOError as err:
if err.errno != errno.ENOENT:
raise
guards = []
for i, guard in enumerate(guards):
bad = self.checkguard(guard)
if bad:
self.ui.warn(
b'%s:%d: %s\n'
% (self.join(self.guardspath), i + 1, bad)
)
else:
self.activeguards.append(guard)
return self.activeguards
def setguards(self, idx, guards):
for g in guards:
if len(g) < 2:
raise error.Abort(_(b'guard %r too short') % g)
if g[0] not in b'-+':
raise error.Abort(_(b'guard %r starts with invalid char') % g)
bad = self.checkguard(g[1:])
if bad:
raise error.Abort(bad)
drop = self.guard_re.sub(b'', self.fullseries[idx])
self.fullseries[idx] = drop + b''.join([b' #' + g for g in guards])
self.parseseries()
self.seriesdirty = True
def pushable(self, idx):
if isinstance(idx, bytes):
idx = self.series.index(idx)
patchguards = self.seriesguards[idx]
if not patchguards:
return True, None
guards = self.active()
exactneg = [
g for g in patchguards if g.startswith(b'-') and g[1:] in guards
]
if exactneg:
return False, stringutil.pprint(exactneg[0])
pos = [g for g in patchguards if g.startswith(b'+')]
exactpos = [g for g in pos if g[1:] in guards]
if pos:
if exactpos:
return True, stringutil.pprint(exactpos[0])
return False, b' '.join([stringutil.pprint(p) for p in pos])
return True, b''
def explainpushable(self, idx, all_patches=False):
if all_patches:
write = self.ui.write
else:
write = self.ui.warn
if all_patches or self.ui.verbose:
if isinstance(idx, bytes):
idx = self.series.index(idx)
pushable, why = self.pushable(idx)
if all_patches and pushable:
if why is None:
write(
_(b'allowing %s - no guards in effect\n')
% self.series[idx]
)
else:
if not why:
write(
_(b'allowing %s - no matching negative guards\n')
% self.series[idx]
)
else:
write(
_(b'allowing %s - guarded by %s\n')
% (self.series[idx], why)
)
if not pushable:
if why:
write(
_(b'skipping %s - guarded by %s\n')
% (self.series[idx], why)
)
| |
- I11i
if 47 - 47: oO0o . i1IIi * I1ii11iIi11i % OOooOOo % IiII / Oo0Ooo
if ( len ( self . registered_rlocs ) == 0 ) : self . registered = False
return
if 39 - 39: i11iIiiIii . OOooOOo + Oo0Ooo
if 92 - 92: O0 * Oo0Ooo / o0oOOo0O0Ooo % OoO0O00
def merge_rles_in_site_eid ( self ) :
if 87 - 87: OoooooooOO / I11i . O0
if 77 - 77: OOooOOo + oO0o * iIii1I11I1II1 / oO0o / OOooOOo . i11iIiiIii
if 92 - 92: Oo0Ooo . o0oOOo0O0Ooo % OoooooooOO * i11iIiiIii * OoO0O00 * o0oOOo0O0Ooo
if 48 - 48: iII111i * I1ii11iIi11i * oO0o % O0 . OoO0O00
i1I1I = { }
for ii1I1i11 in self . registered_rlocs :
if ( ii1I1i11 . rle == None ) : continue
for i1ooOoO in ii1I1i11 . rle . rle_nodes :
I1Iii1I = i1ooOoO . address . print_address_no_iid ( )
i1I1I [ I1Iii1I ] = i1ooOoO . address
if 37 - 37: I11i . O0 - Oo0Ooo % iII111i
break
if 11 - 11: I11i % OoooooooOO
if 96 - 96: i11iIiiIii * O0 + iIii1I11I1II1 . I11i * IiII + I1Ii111
if 84 - 84: I1ii11iIi11i / o0oOOo0O0Ooo * II111iiii . i11iIiiIii
if 68 - 68: OOooOOo . ooOoO0o / OOooOOo + i1IIi / I1IiiI
if 80 - 80: Oo0Ooo + Oo0Ooo + oO0o % i1IIi / ooOoO0o
self . merge_rlocs_in_site_eid ( )
if 24 - 24: i11iIiiIii - ooOoO0o * iII111i - Ii1I . iIii1I11I1II1 . I1IiiI
if 81 - 81: OoOoOO00 * OoOoOO00 + OOooOOo . I11i - oO0o
if 85 - 85: O0 * I1IiiI . Oo0Ooo - IiII
if 84 - 84: I1Ii111 . iIii1I11I1II1 . O0 * I1ii11iIi11i
if 59 - 59: i1IIi . o0oOOo0O0Ooo . Oo0Ooo * I1Ii111 + OoooooooOO
if 11 - 11: I11i * ooOoO0o % iIii1I11I1II1 - O0
if 68 - 68: ooOoO0o * OoooooooOO - OoooooooOO
if 59 - 59: Ii1I / I11i / I1Ii111 + IiII * I1ii11iIi11i
iIOOoo0 = [ ]
for ii1I1i11 in self . registered_rlocs :
if ( self . registered_rlocs . index ( ii1I1i11 ) == 0 ) :
iIOOoo0 . append ( ii1I1i11 )
continue
if 17 - 17: I1ii11iIi11i + OoooooooOO / iIii1I11I1II1 . II111iiii + Oo0Ooo
if ( ii1I1i11 . rle == None ) : iIOOoo0 . append ( ii1I1i11 )
if 7 - 7: O0 - I1ii11iIi11i - iIii1I11I1II1
self . registered_rlocs = iIOOoo0
if 96 - 96: OoOoOO00 . I1IiiI . I11i * OoooooooOO + OoooooooOO * O0
if 90 - 90: I11i + I1ii11iIi11i + OoooooooOO + OoOoOO00 + IiII / iII111i
if 75 - 75: i11iIiiIii
if 27 - 27: I11i - IiII - I1Ii111
if 90 - 90: OoO0O00 . oO0o * O0 / I11i % O0 + I1Ii111
if 48 - 48: iIii1I11I1II1 . i11iIiiIii / OoooooooOO . i1IIi . o0oOOo0O0Ooo
if 84 - 84: Ii1I
O0OOOO0000O = lisp_rle ( "" )
oO0o00000o = { }
O0O00O = None
for IIII in self . individual_registrations . values ( ) :
if ( IIII . registered == False ) : continue
iIiIiiIII1I1 = IIII . registered_rlocs [ 0 ] . rle
if ( iIiIiiIII1I1 == None ) : continue
if 8 - 8: i11iIiiIii * i1IIi . Oo0Ooo + I11i * I11i . OoOoOO00
O0O00O = IIII . registered_rlocs [ 0 ] . rloc_name
for ooOOo0O in iIiIiiIII1I1 . rle_nodes :
I1Iii1I = ooOOo0O . address . print_address_no_iid ( )
if ( oO0o00000o . has_key ( I1Iii1I ) ) : break
if 37 - 37: Ii1I * O0 - I1Ii111
i1ooOoO = lisp_rle_node ( )
i1ooOoO . address . copy_address ( ooOOo0O . address )
i1ooOoO . level = ooOOo0O . level
i1ooOoO . rloc_name = O0O00O
O0OOOO0000O . rle_nodes . append ( i1ooOoO )
oO0o00000o [ I1Iii1I ] = ooOOo0O . address
if 33 - 33: iIii1I11I1II1 . I11i
if 63 - 63: oO0o - iII111i
if 13 - 13: I1Ii111 / i1IIi % OoooooooOO / I11i
if 66 - 66: I1Ii111 % o0oOOo0O0Ooo . iII111i . ooOoO0o + OOooOOo * II111iiii
if 33 - 33: oO0o
if 64 - 64: OoO0O00 % Oo0Ooo % I11i . iII111i % I1IiiI
if ( len ( O0OOOO0000O . rle_nodes ) == 0 ) : O0OOOO0000O = None
if ( len ( self . registered_rlocs ) != 0 ) :
self . registered_rlocs [ 0 ] . rle = O0OOOO0000O
if ( O0O00O ) : self . registered_rlocs [ 0 ] . rloc_name = None
if 50 - 50: i1IIi + ooOoO0o - iIii1I11I1II1
if 45 - 45: OoooooooOO / o0oOOo0O0Ooo / iII111i
if 72 - 72: I1Ii111
if 94 - 94: ooOoO0o . IiII - Ii1I + I1ii11iIi11i / ooOoO0o
if 10 - 10: ooOoO0o . OOooOOo * O0 % II111iiii
if ( i1I1I . keys ( ) == oO0o00000o . keys ( ) ) : return ( False )
if 12 - 12: oO0o + I1IiiI * Oo0Ooo - iII111i
lprint ( "{} {} from {} to {}" . format ( green ( self . print_eid_tuple ( ) , False ) , bold ( "RLE change" , False ) ,
# i11iIiiIii / OoO0O00 % I1Ii111 * OoOoOO00 . o0oOOo0O0Ooo / ooOoO0o
i1I1I . keys ( ) , oO0o00000o . keys ( ) ) )
if 100 - 100: I1IiiI
return ( True )
if 27 - 27: OoOoOO00 * O0 - I11i
if 98 - 98: OoOoOO00 % I1ii11iIi11i / OoOoOO00 % o0oOOo0O0Ooo / I1ii11iIi11i
def add_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_sites_by_eid . add_cache ( self . eid , self )
else :
iiiI1iI11i1i1 = lisp_sites_by_eid . lookup_cache ( self . group , True )
if ( iiiI1iI11i1i1 == None ) :
iiiI1iI11i1i1 = lisp_site_eid ( self . site )
iiiI1iI11i1i1 . eid . copy_address ( self . group )
iiiI1iI11i1i1 . group . copy_address ( self . group )
lisp_sites_by_eid . add_cache ( self . group , iiiI1iI11i1i1 )
if 21 - 21: I1IiiI * IiII - Oo0Ooo % ooOoO0o * i1IIi
if 23 - 23: I11i * II111iiii + OoooooooOO . i1IIi + OoO0O00 + OoOoOO00
if 52 - 52: iII111i * OoOoOO00
if 80 - 80: I1Ii111 / IiII * o0oOOo0O0Ooo - OoOoOO00 / iIii1I11I1II1
if 38 - 38: II111iiii / I11i + IiII % OoooooooOO
iiiI1iI11i1i1 . parent_for_more_specifics = self . parent_for_more_specifics
if 27 - 27: OoOoOO00 * OoO0O00 * OOooOOo % I1IiiI * o0oOOo0O0Ooo + I1ii11iIi11i
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( iiiI1iI11i1i1 . group )
iiiI1iI11i1i1 . add_source_entry ( self )
if 73 - 73: i1IIi
if 52 - 52: IiII / i11iIiiIii * O0
if 67 - 67: OOooOOo / I11i - I1Ii111 % i11iIiiIii
def delete_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_sites_by_eid . delete_cache ( self . eid )
else :
iiiI1iI11i1i1 = lisp_sites_by_eid . lookup_cache ( self . group , True )
if ( iiiI1iI11i1i1 == None ) : return
if 3 - 3: oO0o + iII111i + OOooOOo
IIII = iiiI1iI11i1i1 . lookup_source_cache ( self . eid , True )
if ( IIII == None ) : return
if 54 - 54: i11iIiiIii + OoO0O00 - IiII - iII111i / I11i
if ( iiiI1iI11i1i1 . source_cache == None ) : return
if 85 - 85: OOooOOo * OOooOOo * I1Ii111 - ooOoO0o . O0 % iII111i
iiiI1iI11i1i1 . source_cache . delete_cache ( self . eid )
if ( iiiI1iI11i1i1 . source_cache . cache_size ( ) == 0 ) :
lisp_sites_by_eid . delete_cache ( self . group )
if 5 - 5: | |
<filename>pymmFunctions.py
#!/usr/bin/env python3
'''
This is a set of functions used by pymm scripts
This file is organized into 4 main sections:
* CONFIG CHECK STUFF
* PYMM ADMIN / LOGGING STUFF
* FILE CHECK STUFF
* SYSTEM / ENVIRONMENT STUFF
'''
import configparser
from datetime import date
import glob
import hashlib
import json
import os
import platform
import re
import subprocess
import sys
import shutil
import time
# nonstandard libraries:
import Levenshtein
# local modules:
try:
import dbReporters
import loggers
import makeMetadata
import MySQLqueries
import directoryScanner
except:
from . import dbReporters
from . import loggers
from . import makeMetadata
from . import MySQLqueries
from . import directoryScanner
################################################################
#
# CONFIG CHECK STUFF
#
def read_config():
pymmDirectory = os.path.dirname(os.path.abspath(__file__))
configPath = os.path.join(pymmDirectory,'pymmconfig','config.ini')
if not os.path.isfile(configPath):
print('''
CONFIGURATION PROBLEM:\n
YOU HAVE NOT YET SET CONFIG.INI OR IT IS MISSING.\n
RUN pymmconfig.py TO CREATE CONFIG.INI AND CHOOSE YOUR DESIRED SETTINGS.\n
NOW EXITING.
''')
sys.exit()
config = configparser.SafeConfigParser()
config.read(configPath)
return config
def check_missing_ingest_paths(pymmConfig):
requiredPaths = {
'outdir_ingestsip':'the ingestSip.py output path',
'aip_staging':'the AIP storage path',
'resourcespace_deliver':'the resourcespace output path'
}
missingPaths = 0
for path in requiredPaths.items():
if not os.path.isdir(pymmConfig['paths'][path[0]]):
missingPaths += 1
print('''
CONFIGURATION PROBLEM:
You have not yet set a valid directory for '{}.' Please run pymmConfig.py,
edit the config file directly,
or use '--{}' to set {}.
HINT: Check that the filepath is entered correctly.
'''.format(path[0],path[0],path[1])
)
if missingPaths > 0:
print("\nYou are missing some required file paths and we have to quit. Sorry.")
sys.exit()
#
# END CONFIG CHECK STUFF
#
################################################################
################################################################
#
# PYMM ADMIN / LOGGING STUFF
#
# have to import dbAccess after init config to avoid circular error
try:
import dbAccess
except:
from . import dbAccess
def cleanup_package(CurrentIngest,pathForDeletion,reason,outcome=None):
# print(pathForDeletion)
inputType = CurrentIngest.InputObject.inputType
dontDelete = False
if reason == "ABORTING":
status = 'ABORTING'
event = 'ingestion end'
if not outcome:
outcome = (
"Something went critically wrong... "
"The ingest process was aborted."
"\n{}\nand its contents have been deleted.".format(
pathForDeletion
)
)
# put the things back
try:
if inputType == 'file':
_object = [
thing.path for thing in
os.scandir(CurrentIngest.packageObjectDir)
if thing.is_file()
][0]
if os.path.isfile(CurrentIngest.InputObject.inputPath):
pass
else:
shutil.move(
_object,
CurrentIngest.InputObject.inputParent
)
else:
if not os.path.isdir(CurrentIngest.InputObject.inputPath):
os.mkdir(CurrentIngest.InputObject.inputPath)
for _object in os.scandir(CurrentIngest.packageObjectDir):
if _object.name not in ('resourcespace','prores'):
shutil.move(
_object.path,
CurrentIngest.InputObject.inputPath
)
except:
dontDelete = True
outcome = ("COULD NOT REPLACE ORIGINAL COPIES!! \
NOT DELETING {}!".format(pathForDeletion))
print(outcome)
elif reason == 'done':
status = 'OK'
event = 'deletion'
outcome = (
"Deleting original copies "
"of object at {}".format(pathForDeletion)
)
if os.path.isdir(pathForDeletion) and dontDelete == False:
try:
shutil.rmtree(pathForDeletion)
except:
outcome = (
"Could not delete the package at "
+pathForDeletion+
". Try deleting it manually?"
)
print(outcome)
CurrentIngest.caller = 'pymmFunctions.cleanup_package()'
loggers.end_log(
CurrentIngest,
event,
outcome,
status
)
def validate_SIP_structure(SIPpath):
'''
Check that all the top-level stuff expected in a package exists.
Don't go too deep...
Current expected structure is:
UUID/
UUID/
metadata/
objects_manifest_UUID_iso8601.txt
objectCanonicalName_pbcore.xml
logs/
ingestLog.txt
ffmpeglog
rsyncLog
objects/
masterobject1_framemd5.md5
masterobject2_framemd5.md5
masterobject1_mediainfo.xml
masterobject2_mediainfo.xml
resourcespace/
resourcespace_mediainfo.xml
objects/
masterobject1
masterobject2
resourcespace/
resourcespace1
resourcespace2
# (changed this 7/16/18) hashdeep_manifest_UUID_iso8601.txt
'''
structureValidated = True
status = "OK"
_UUID = os.path.basename(SIPpath)
# define the directories to check
ingestDir = os.path.join(SIPpath,_UUID)
metadataDir = os.path.join(ingestDir,'metadata')
logDir = os.path.join(metadataDir,'logs')
objectMetadataDir = os.path.join(metadataDir,'objects')
objectDir = os.path.join(ingestDir,'objects')
dirs = [ingestDir,metadataDir,logDir,objectMetadataDir,objectDir]
reasonsFailed = []
# check that they exist
# I should log the non-existence of any of these
# maybe rename the SIP to FAILED-UUID?
for thing in dirs:
if not os.path.isdir(thing):
structureValidated = False
failure = "missing {}".format(os.path.basename(thing))
reasonsFailed.append(failure)
print(failure)
# use glob to search for the existence of
# 1) hashdeep manifest
# 2) pbcore xml file
objectManifestPattern = os.path.join(
metadataDir,
'objects_manifest_*'
)
manifest = glob.glob(objectManifestPattern)
if manifest == []:
failure = "missing a hashdeep manifest for the SIP object directory"
reasonsFailed.append(failure)
print(failure)
structureValidated = False
pbcorePattern = os.path.join(metadataDir,'*_pbcore.xml')
pbcore = glob.glob(pbcorePattern)
if pbcore == []:
failure = "missing a pbcore xml description for the object"
reasonsFailed.append(failure)
print(failure)
structureValidated = False
if structureValidated:
outcome = "SIP validated against expected structure"
else:
outcome = "SIP failed to validate for these reasons:\n~ {}\n".format(
"\n~ ".join(reasonsFailed)
)
return structureValidated,outcome
def database_connection(user):
connection = dbAccess.DB(user)
try:
connection.connect()
return connection
except:
print("DB connection problem...")
return False
def do_query(connection,sql,*args):
'''
must be passed an open mysql.connector.connection.MySQLConnection object
'''
cursor = connection.query(sql,*args)
return cursor
def parse_pbcore_xml(pbcoreFile):
pbcoreString = ''
with open(pbcoreFile,'r') as f:
for line in f:
pbcoreString += line
return pbcoreString
#
# END PYMM ADMIN / LOGGING STUFF
#
################################################################
################################################################
#
# FILE CHECK STUFF
#
def is_video(inputPath):
# Look for a video stream with codec_type == 'video'
ffprobe = [
'ffprobe',
'-v','error',
'-i',inputPath,
'-print_format','json',
'-show_streams',
'-select_streams','v'
]
try:
probe = subprocess.run(ffprobe,stdout=subprocess.PIPE)
out = probe.stdout.decode('utf-8')
output = json.loads(out)
try:
codec_type = output['streams'][0]['codec_type']
if codec_type == 'video':
if not any([x in output['streams'][0]['codec_name'] for x in ('jpeg','jpg','png')]):
return True
else:
return False
except:
return False
except:
return False
def is_audio(inputPath):
print("THIS ISN'T A VIDEO FILE\n"
'maybe this is an audio file?')
# DO THE SAME AS ABOVE BUT codec_type == 'audio'
ffprobe = [
'ffprobe',
'-v','error',
'-i',inputPath,
'-print_format','json',
'-show_streams',
'-select_streams','a'
]
try:
probe = subprocess.run(ffprobe,stdout=subprocess.PIPE)
out = probe.stdout.decode('utf-8')
output = json.loads(out)
try:
codec_type = output['streams'][0]['codec_type']
if codec_type == 'audio':
print("This appears to be an audio file!")
return True
except:
print("THIS DOESN'T SMELL LIKE AN AUDIO FILE EITHER")
# print(output)
return False
except:
print("INVALID FILE INPUT, NOT AUDIO EITHER")
return False
def is_av(inputPath):
'''
run tests for video, then audio, then DPX seq, then give up.
@FIXME - this should return a more verbose/useful
explanation of failed tests.
Currently the expected return value os Boolean when is_av() is called.
'''
_is_video = is_video(inputPath)
_is_audio = False
_is_dpx = False
_is_dpx_av = False
if _is_video == True:
return 'VIDEO'
else:
_is_audio = is_audio(inputPath)
if _is_audio:
return 'AUDIO'
else:
if os.path.isdir(inputPath):
# if it's a folder, see if it's a DPX sequence
try:
# test for a valid folder structure
_is_dpx,details = directoryScanner.main(inputPath)
print(_is_dpx)
print(details)
except:
print('error scanning input!')
return False
if _is_dpx:
if _is_dpx == 'dpx':
print('THIS IS AN IMAGE SEQUENCE!')
return 'DPX'
else:
# if it passes the folder structure, run
# mediainfo to check for dpx contents
status, failedDirs = test_sequence_reel_dir(inputPath)
if status == True:
print('THIS IS AN IMAGE SEQUENCE!')
return 'DPX'
else:
print(
'ERROR: check out this list of '
'problem directories: {}'.format(failedDirs)
)
return False
else:
return None
else:
return None
def test_sequence_reel_dir(reelPath):
'''
Take a directory that should contain only a wav file
and a corresponding directory with an image sequence in it.
If there's a problem with one or more of the directories return
it/them in a list.
'''
failedDirs = []
failures = 0
for item in os.scandir(reelPath):
if item.name == 'documentation':
break
if item.is_dir():
print(item.path)
if item.name.lower() == 'dpx':
_is_dpx = is_dpx_sequence(item.path)
if not _is_dpx:
failedDirs.append(item.path)
failures += 1
else:
failedDirs.append(item.path)
failures += 1
if failures > 0:
return False, failedDirs
else:
return True, failedDirs
def is_dpx_sequence(inputPath):
'''
run mediainfo on the 'dpx' folder
'''
_is_dpx_av = False
try:
format = get_mediainfo_value(inputPath,'General','Format')
if any([('dpx','directory') for x in format.lower()]):
_is_dpx_av = True
else:
pass
except:
_is_dpx_av = False
return _is_dpx_av
def check_policy(ingestType,inputPath):
print('do policy check stuff')
policyStatus = "result of check against mediaconch policy"
return policyStatus
def dir_or_file(inputPath):
if os.path.isdir(inputPath):
return 'dir'
elif os.path.isfile(inputPath):
return 'file'
else:
return False
def get_base(inputPath,base='basename'):
bases = {'basename':'','baseMinusExtension':'','ext_original':''}
if not base in bases.keys():
return "_:(_"
else:
try:
basename = os.path.basename(inputPath)
bases['basename'] = basename
baseAndExt = os.path.splitext(basename)
baseMinusExtension = baseAndExt[0]
bases['baseMinusExtension'] = baseMinusExtension
ext_original = baseAndExt[1]
bases['ext_original'] = ext_original
return bases[base]
except:
print("error getting basename")
return "_:(_"
def abspath_list(directory):
paths = []
for filename in os.listdir(directory):
path = os.path.abspath(os.path.join(directory, filename))
# path = path.replace(' ','\\ ')
paths.append(path)
return paths
def check_dir_filename_distances(directory):
'''
Check a directory to be ingested for wildly divergent filenames.
We will currently only want to allow single-level directories of
files that represent parts of a whole and thus have fairly
similar filenames.
'''
_list = abspath_list(directory)
names = []
for name in _list:
if os.path.isfile(name):
if not os.path.basename(name).startswith('.'):
names.append(name)
median = Levenshtein.median(names)
# print(median)
outliers = 0 # start a counter for the number of files that diverge from the median name
outlierList = [] # and list them
for name in names:
distance = Levenshtein.distance(median,name)
# print(distance)
if distance > 15:
outliers += 1
outlierList.append(name)
return outliers,outlierList
def check_for_outliers(inputPath):
'''
Use distance check function to approve/deny
viability of directory ingest.
'''
goodNames = True
outliers, outlierList = check_dir_filename_distances(inputPath)
if outliers > 0:
outlierListString = '\n'.join(outlierList)
warning = (
"Hey, there are {} files that seem like they might not belong"\
" in the input directory. If you think this is incorrect, check"\
" the filenames below. Maybe make them more consistent.\n"\
"Here's a list of possible outliers:\n{}".format(
str(outliers),
outlierListString
)
)
return False,outlierList
else:
return True,None
def list_files(_input):
'''
Take in an absolute path of a directory and return a list of the paths
for everything in it.
'''
if os.path.isdir(_input):
source_list = []
for _file in os.listdir(_input):
if os.path.isdir(_file) and not _file.lower() == 'documentation':
print("you have unexpected subdirectories!")
else:
source_list.append(os.path.join(_input,_file))
source_list.sort()
return source_list
else:
print("you're trying to list files but the input is a file. go away.")
# sys.exit()
pass
def get_temp_id(_string):
'''
Generate a hash of a string (i.e., of an input path) that can be used
to produce a *locally* unique temporary ID during the ingestSIP process.
For convenience (?) I'm cutting it down to 10 digits.
example: ingestSIP -i 'foo.mov' --> tempID = a8bcd6d073
where:
sha256 = a8bcd6d073c91f6132f6d64674ecaf658a33c4aedde4046b0b7bf64e9c723073
'''
pathHash = hashlib.sha256(_string.encode()).hexdigest()
tempID = pathHash[:10]
return tempID
def rename_dir(_dir,newName):
if os.path.isdir(_dir):
path = os.path.dirname(_dir)
newPath = os.path.join(path,newName)
try:
newDir = os.rename(_dir,newPath)
return newPath
except OSError as e:
print("OOPS: {}".format(e))
else:
print("{} is not a directory so go away.".format(_dir))
def convert_millis(milli):
'''
Lifted directly from IFIscripts. Written by <NAME>.
Requires an integer that is the number of milliseconds.
For example mediainfo returns '00:51:58.824' as a string '3118.824'
so you gotta remove the period, convert to integer, and parse here.
Accepts milliseconds and returns this value as HH:MM:SS.NNN
'''
# get the number of seconds and milliseconds
a = datetime.timedelta(milliseconds=milli)
# convert to a handy string that looks like '0:51:58.824000'
# so we can check for milliseconds present
b = str(a)
# no millseconds are present if there is no remainder. We need milliseconds!
if len(b) == 7:
b += '.000000'
# convert seconds-based tuple to H:M:S:ms tuple
timestamp = datetime.datetime.strptime(b, "%H:%M:%S.%f").time()
# turn that into a string like '0:51:58.824000'
c = str(timestamp)
if len(c) == 8:
c += '.000000'
# trim off the unneeded zeros
return str(c)[:-3]
def get_audio_sample_rate(inputPath):
# get the sample rate for an audio file
_type = 'Audio'
fieldName = 'SamplingRate'
rate = get_mediainfo_value(
inputPath,
_type,
fieldName
)
return rate
def get_encoded_date(inputPath):
encodedDate = get_mediainfo_value(
inputPath,
'General',
'Encoded_Date'
)
return encodedDate
def get_mediainfo_value(inputPath,_type,fieldName):
'''
inspired by IFIscripts and StackOverflow answer by <NAME>.
Note: you don't | |
import os
import obspy
import pickle
import numpy as np
import scipy.integrate as ig
import matplotlib.pyplot as plt
from matplotlib.ticker import StrMethodFormatter, NullFormatter
from mtspec import mtspec
from . import utils as ut
from . import config as cfg
# VARIABLES READ FROM CONFIG
SUPPORTED_SAVE_METHODS = ['pickle']
BW_METHOD=cfg.SPECTRAL["BW_METHOD"]
PLOT_COLUMNS = cfg.SPECTRAL["PLOT_COLUMNS"]
BINNING_PARAMS = cfg.SPECTRAL["BIN_PARS"]
BIN = True
SCALE_PARSEVAL = cfg.SPECTRAL["SCALE_PARSEVAL"]
ROTATE_NOISE = cfg.SPECTRAL["ROTATE_NOISE"]
ROT_METHOD = cfg.SPECTRAL["ROT_METHOD"]
ROT_PARS = cfg.SPECTRAL['ROT_PARS']
SNR_TOLERENCE = cfg.SPECTRAL["SNR_TOLERENCE"]
MIN_POINTS = cfg.SPECTRAL["MIN_POINTS"]
ASSERT_BANDWIDTHS = cfg.SPECTRAL["ASSERT_BANDWIDTHS"]
SBANDS = cfg.SPECTRAL["S_BANDS"]
# classes
class Spectrum(object):
"""
Spectrum class.
"""
freq=np.array([])
amp=np.array([])
meta={}
id = " "
kind = " "
event = " "
freq_lims = np.array([0.,0.])
__tr=obspy.Trace(np.array([]))
bamp=np.array([])
bfreq=np.array([])
def __init__(self, kind, tr=None, **kwargs):
# if a trace is passed assume it needs to be converted to frequency.
if tr is not None:
self.__set_metadata_from_trace(tr, kind)
self.__calc_spectra(**kwargs)
self.psd_to_amp()
self.__bin_spectrum(**BINNING_PARAMS)
def psd_to_amp(self):
"""
Converts Power Spectral Density (PSD) to spectral amplitude.
amp = [PSD*fs*len(PSD)]^0.5
fs is sampling rate in Hz
"""
# self.amp = np.sqrt(
# self.amp*self.meta['delta']*len(self.amp))
# if self.bamp.size > 0:
# self.bamp = np.sqrt(
# self.bamp*self.meta['delta']*len(self.amp))
self.amp = np.sqrt(
(self.amp*len(self.freq))/self.meta['sampling_rate'])
if self.bamp.size > 0:
self.bamp = np.sqrt(
(self.bamp*len(self.freq))/self.meta['sampling_rate'])
def amp_to_psd(self):
"""
Converts Power Spectral Density (PSD) to spectral amplitude.
amp = [PSD*fs*len(PSD)]^0.5
fs is sampling rate in Hz
"""
self.amp = np.power(self.amp, 2) / (
self.meta['sampling_rate'] * len(self.amp))
if self.bamp.size > 0:
self.bamp = np.power(self.bamp, 2) / (
self.meta['sampling_rate'] * len(self.bamp))
def quick_vis(self, **kwargs):
fig, ax = plt.subplots(1,1)
ax.set_title("Event Id: {}".format(self.event))
ax.loglog(self.freq, self.amp, label=self.id, **kwargs)
ax.legend()
ax.set_xlabel('freq [Hz]')
ax.set_ylabel('spectral amp')
def integrate(self):
self.amp /= (2*np.pi*self.freq)
self.bamp /= (2*np.pi*self.bfreq)
def differentiate(self):
self.amp *= (2*np.pi*self.freq)
self.bamp *= (2*np.pi*self.bfreq)
def __set_metadata_from_trace(self, tr, kind):
self.__tr = tr.copy() # make a copy so you dont delete original
self.meta = self.__sanitise_trace_meta(dict(self.__tr.stats))
self.id = self.__tr.id
self.kind = kind
try:
self.event = str(self.meta['otime'])
except KeyError:
self.event = None
def __calc_spectra(self, **kwargs):
amp, freq = mtspec(self.__tr.data, self.meta['delta'], 3, **kwargs)
del self.__tr
# forget the 0 frequency, probably just noise anyway
self.amp, self.freq = amp[1:], freq[1:]
def __sanitise_trace_meta(self, m):
nm = {}
for k, v in m.items():
if k not in ['processing', 'sac', 'calib', '__format']:
if type(v) not in [float, int, str, np.float64, np.float32]:
# print(k, type(v))
nm.update({k:str(v)})
else:
nm.update({k:v})
return nm
def __bin_spectrum(self, smin=0.001, smax=200, bins=101):
# define the range of bins to use to average amplitudes and smooth spectrum
space = np.logspace(np.log10(smin), np.log10(smax), bins)
# initialise numpy arrays
bamps = np.zeros(int(len(space)-1)); bfreqs = np.zeros(int(len(space)-1));
# iterate through bins to find mean log-amplitude and bin center (log space)
for i, bbb in enumerate(zip(space[:-1], space[1:])):
bb, bf = bbb
a = 10**np.log10(self.amp[(self.freq>=bb)&(self.freq<=bf)]).mean()
bamps[i] = a;
bfreqs[i] = 10**(np.mean([np.log10(bb), np.log10(bf)]))
# remove nan values
self.bfreq = bfreqs[np.logical_not(np.isnan(bamps))]
self.bamp = bamps[np.logical_not(np.isnan(bamps))]
self.BAMP = bamps
self.BFREQ = bfreqs
class Signal(Spectrum):
"""
Signal is a subclass of spectrum intended to compute the spectrum of a signal
trace.
"""
# Signal class has an additional model attributes with the model params
# and a model function
model = None
pass_snr = True
ubfreqs = np.array([])
def __init__(self, tr=None, **kwargs):
super().__init__('signal', tr=tr, **kwargs)
def set_model(self, model):
self.model = model
def get_model(self):
return self.model
def set_ubfreqs(self, ubfreqs):
self.ubfreqs = ubfreqs
def get_ubfreqs(self):
return self.ubfreqs
def set_pass_snr(self, p):
self.pass_snr = p
def get_pass_snr(self):
return self.pass_snr
class Noise(Spectrum):
"""
Noise is a subclass of spectrum intended to compute the spectrum of a noise
trace.
"""
def __init__(self, tr=None, **kwargs):
super().__init__('noise', tr=tr, **kwargs)
class SNP(object):
"""
Lower level container class to associate signal and noise spectrum objects.
"""
signal = None
noise = None
bsnr = np.array([0.])
event = " "
ubfreqs = np.array([])
itrpn = True
ROTATED = False
def __init__(self, signal, noise, interpolate_noise=True):
self.__check_ids(signal, noise)
self.signal = signal
self.noise = noise
self.pair = (self.signal, self.noise)
self.__set_metadata(interpolate_noise)
if SCALE_PARSEVAL:
self.__scale_noise_parseval()
if self.intrp:
self.__interp_noise_to_signal()
self.__get_snr()
def integrate(self):
for s in self.pair:
s.integrate()
# must recalculate usable frequency-bandwidth
if self.intrp:
self.__get_snr()
def differentiate(self):
for s in self.pair:
s.differentiate()
# must recalculate usable frequency-bandwidth
if self.intrp:
self.__get_snr()
def psd_to_amp(self):
for s in self.pair:
s.psd_to_amp()
def amp_to_psd(self):
for s in self.pair:
s.amp_to_psd()
@property
def bsnr(self):
return self._bsnr
@bsnr.setter
def bsnr(self, arr):
# assert type(arr) is type(np.array())
self._bsnr = arr
def __scale_noise_parseval(self):
self.noise.amp *= np.sqrt(len(self.signal.amp)/len(self.noise.amp))
self.noise.bamp *= np.sqrt(len(self.signal.amp)/len(self.noise.amp))
def __rotate_noise(self):
if ROT_METHOD == 1:
self.noise.bamp, th1, th2 = ut.rotate_noise_full(
self.noise.bfreq, self.noise.bamp, self.signal.bamp,
ret_angle=True, **ROT_PARS)
if th1==0 or th2==0:
print("th1={}, th2={}".format(th1, th2))
print("rotation failed for {}".format(self.signal.id))
self.noise.amp = ut.rotate_noise_full(
self.noise.freq, self.noise.amp, self.signal.amp,
th1=th1, th2=th2, **ROT_PARS)
if ROT_METHOD == 2:
rot = ut.non_lin_boost_noise_func(self.noise.bfreq,
self.noise.bamp, self.signal.bamp, **ROT_PARS)
self.noise.bamp *= rot
self.noise.amp *= np.interp(self.noise.freq, self.noise.bfreq, rot)
def __calc_bsnr(self):
if ROTATE_NOISE and self.ROTATED == False:
self.ROTATED = True
self.__rotate_noise()
# set bsnr to the object
self.bsnr=self.signal.bamp/self.noise.bamp
def __get_snr(self):
self.__calc_bsnr()
self.__find_bsnr_limits()
self.__update_lims_to_meta()
if ASSERT_BANDWIDTHS:
self.__assert_bandwidths_test()
def __assert_bandwidths_test(self):
mns = np.zeros(len(SBANDS))
for i, bws in enumerate(SBANDS):
inds = np.where((self.signal.freq >=bws[0]) &
(self.signal.freq < bws[1]))[0]
mns[i] = np.mean(self.signal.amp[inds])/np.mean(self.noise.amp[inds])
if np.any(mns < SNR_TOLERENCE):
self.signal.set_pass_snr(False)
def __update_lims_to_meta(self):
if self.signal.ubfreqs.size > 0:
self.signal.meta['lower-f-bound'] = self.signal.ubfreqs[0]
self.signal.meta['upper-f-bound'] = self.signal.ubfreqs[1]
else:
self.signal.meta['lower-f-bound'] = None
self.signal.meta['upper-f-bound'] = None
self.signal.meta["pass_snr"] = self.signal.pass_snr
def quick_vis(self, ax=None):
if ax is None:
fig, ax = plt.subplots(1,1)
else:
ret=True
ax.set_title("Event Id: {}".format(self.event))
ax.loglog(self.noise.freq, self.noise.amp, 'b--',label='noise')
ax.loglog(self.signal.freq, self.signal.amp, 'k', label=self.signal.id)
if self.signal.model is not None:
if self.signal.model.result is not None:
ax.loglog(self.signal.model.mod_freq,
10**self.signal.model.result.best_fit, color='green',
label='best fit model')
if self.ubfreqs.size > 0:
if self.signal.pass_snr:
for lim in self.ubfreqs:
ax.vlines(lim,
np.min([self.noise.amp.min(), self.signal.amp.min()]),
np.max([self.noise.amp.max(), self.signal.amp.max()]),
color='r', linestyles='dashed')
else:
ax.set_title("SNR TEST FAILED")
ax.xaxis.set_major_formatter(StrMethodFormatter('{x:.2f}'))
ax.xaxis.set_minor_formatter(NullFormatter())
ax.legend()
ax.set_xlabel('freq [Hz]')
ax.set_ylabel('spectral amp')
# if ret:
# return ax
def __set_metadata(self, intrp):
# global setting
self.intrp = intrp
# exposing these attributes to the highest level *lazyprogrammer*
self.event = self.signal.event
self.id = self.signal.id
def __find_bsnr_limits(self):
"""
Find the upper and lower frequncy limits of the bandwidth measure of
signal-to-noise.
"""
blw = np.where(self.bsnr>=SNR_TOLERENCE)[0]
if blw.size <= MIN_POINTS:
self.signal.set_pass_snr(False)
else:
if BW_METHOD==1:
self.set_ubfreqs(self.find_optimal_signal_bandwidth(
self.signal.bfreq, self.bsnr, SNR_TOLERENCE))
if BW_METHOD==2:
self.set_ubfreqs(self.find_optimal_signal_bandwidth_2())
def set_ubfreqs(self, ubfreqs):
self.ubfreqs = ubfreqs
self.signal.set_ubfreqs(ubfreqs)
def find_optimal_signal_bandwidth(self, freq, bsnr, bsnr_thresh, pctl=0.99, plot=False):
"""
Attempts to find the largest signal bandwidth above an arbitraty signal-to-Noise.
We first map the SNR
function to a space between -1, 1 by subtracting the SNR
threshold then taking the sign) taking the integral
"""
inte = ig.cumtrapz((np.sign(bsnr-bsnr_thresh)))
inte /= inte.max()
inte[inte<=0] = -1
fh = np.abs(inte-pctl).argmin() - 1
fl = np.abs(inte-(1-pctl)).argmin()
tryCount=0
while (fl >= fh) or fl==0:
inte[fl] = 1
fl = np.abs(inte+1-pctl).argmin()
tryCount += 1
if tryCount == 3:
print('WARNING: {} is too noisy.'.format(self.id))
self.signal.set_pass_snr(False)
break
# if fl > 1:
# fl -= 2
if not plot:
if fh-fl < 3:
self.signal.set_pass_snr(False)
return np.array([freq[fl], freq[fh]])
else:
import matplotlib.pyplot as plt
plt.plot(freq, np.sign(bsnr-bsnr_thresh), color='grey',
label='sign(bsnr-bsnr limit)')
plt.plot(freq[1:], inte, color='k', lw=2,
label='int[sign(bsnr-bsnr limit)]')
plt.vlines(freq[fl], inte.min(), inte.max(), linestyles='dashed',
label='{}% & {}%'.format(100 -int(pctl*100), int(pctl*100)))
plt.vlines(freq[fh], inte.min(), inte.max(), linestyles='dashed', color='g')
plt.title('ID:{}, low f:{:.2f}, high f:{:.2f}'.format(str(self.id),
freq[fl], freq[fh]))
plt.legend()
plt.ylabel("arb. units")
plt.xlabel("freq [Hz]")
def find_optimal_signal_bandwidth_2(self, plot=False):
# get freq and ratio function
f = self.signal.bfreq; a = self.bsnr
# get index of freqs > peak bsnr and < peak bsnr
indsgt = np.where(f>f[a==a.max()])
indslt = np.where(f<f[a==a.max()])
# get those freqs
fh = f[indsgt]; fl = f[indslt]
try:
ufl = fh[np.where(a[indsgt]-SNR_TOLERENCE<=0)[0]-1][0]
lfl = fl[np.where(a[indslt]-SNR_TOLERENCE<=0)[0]+1][-1]
except IndexError as msg:
print(msg)
print('-'*20)
print("Doesn't meet at one end")
self.signal.pass_snr = False
return np.array([])
if not plot:
return np.array([lfl, ufl])
else:
plt.loglog(f, a, label=name)
plt.hlines(SNR_TOLERENCE, f.min(), f.max())
plt.vlines(f[a==a.max()], a.min(), a.max())
plt.vlines(fh[np.where(a[indsgt]-SNR_TOLERENCE<=0)[0]-1][0], a.min()*2, a.max()/2)
plt.vlines(fl[np.where(a[indslt]-SNR_TOLERENCE<=0)[0]+1][-1], a.min()*2, a.max()/2)
def __check_ids(self, signal, noise):
if signal.id.upper() != noise.id.upper():
raise ValueError(
"ID mismatch between signal: {} and noise: ".format(
signal.id, noise.id))
if signal.kind.lower() == noise.kind.lower():
raise ValueError(
"Cannot pair similar spectrum kinds: {} with {}".format(
signal.kind, noise.kind))
def __interp_noise_to_signal(self):
self.noise.amp = np.interp(
self.signal.freq, self.noise.freq, self.noise.amp)
#self.noise.diff_freq = self.noise.freq[np.where(self.noise.freq <= self.signal.freq.min())]
self.noise.freq = self.signal.freq.copy()
self.noise._Spectrum__bin_spectrum(**BINNING_PARAMS) # need to recalc bins after interp.
def __str__(self):
return 'SNP(id:{}, event:{})'.format(self.id, self.event)
def __repr__(self):
return 'SNP(id:' + self.id + ', event:' + self.event + ')'
class Spectra(object):
global PLOT_COLUMNS
"""
Higher order container class for a group of SNP objects from a single event.
"""
sorter=lambda x: x.signal.meta['repi']
group = dict()
event = None
def __init__(self, group=None):
if group is not None:
self.__check_group(group)
self.__set_group_dict(group)
| |
#!/usr/bin/env python
"""
NAME:
sparser.py
SYNOPSIS:
sparser.py [options] filename
DESCRIPTION:
The sparser.py script is a Specified PARSER. It is unique (as far as I can
tell) because it doesn't care about the delimiter(s). The user specifies
what is expected, and the order, for each line of text. All of the heavy
lifting is handled by pyparsing (http://pyparsing.sf.net).
OPTIONS:
-h,--help this message
-v,--version version
-d,--debug turn on debug messages
EXAMPLES:
1. As standalone
sparser.py myfile
2. As library
import sparser
...
#Copyright (C) 2006 <NAME> <EMAIL>
#
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 675 Mass Ave, Cambridge, MA 02139, USA.
"""
#===imports======================
import sys
import os
import getopt
import re
import gzip
from pyparsing import *
#===globals======================
modname = "sparser"
__version__ = "0.1"
#--option args--
debug_p = 0
#opt_b=None #string arg, default is undefined
#---positional args, default is empty---
pargs = []
#---other---
#===utilities====================
def msg(txt):
"""Send message to stdout."""
sys.stdout.write(txt)
sys.stdout.flush()
def debug(ftn, txt):
"""Used for debugging."""
if debug_p:
sys.stdout.write("%s.%s:%s\n" % (modname, ftn, txt))
sys.stdout.flush()
def fatal(ftn, txt):
"""If can't continue."""
msg = "%s.%s:FATAL:%s\n" % (modname, ftn, txt)
raise SystemExit(msg)
def usage():
"""Prints the docstring."""
print(__doc__)
#====================================
class ToInteger(TokenConverter):
"""Converter to make token into an integer."""
def postParse( self, instring, loc, tokenlist ):
return int(tokenlist[0])
class ToFloat(TokenConverter):
"""Converter to make token into a float."""
def postParse( self, instring, loc, tokenlist ):
return float(tokenlist[0])
class ParseFileLineByLine:
"""
Bring data from text files into a program, optionally parsing each line
according to specifications in a parse definition file.
ParseFileLineByLine instances can be used like normal file objects (i.e. by
calling readline(), readlines(), and write()), but can also be used as
sequences of lines in for-loops.
ParseFileLineByLine objects also handle compression transparently. i.e. it
is possible to read lines from a compressed text file as if it were not
compressed. Compression is deduced from the file name suffixes '.Z'
(compress/uncompress), '.gz' (gzip/gunzip), and '.bz2' (bzip2).
The parse definition file name is developed based on the input file name.
If the input file name is 'basename.ext', then the definition file is
'basename_def.ext'. If a definition file specific to the input file is not
found, then the program searches for the file 'sparse.def' which would be
the definition file for all files in that directory without a file specific
definition file.
Finally, ParseFileLineByLine objects accept file names that start with '~'
or '~user' to indicate a home directory, as well as URLs (for reading
only).
Constructor:
ParseFileLineByLine(|filename|, |mode|='"r"'), where |filename| is the name
of the file (or a URL) and |mode| is one of '"r"' (read), '"w"' (write) or
'"a"' (append, not supported for .Z files).
"""
def __init__(self, filename, mode = 'r'):
"""Opens input file, and if available the definition file. If the
definition file is available __init__ will then create some pyparsing
helper variables. """
if mode not in ['r', 'w', 'a']:
raise IOError(0, 'Illegal mode: ' + repr(mode))
if string.find(filename, ':/') > 1: # URL
if mode == 'w':
raise IOError("can't write to a URL")
import urllib.request, urllib.parse, urllib.error
self.file = urllib.request.urlopen(filename)
else:
filename = os.path.expanduser(filename)
if mode == 'r' or mode == 'a':
if not os.path.exists(filename):
raise IOError(2, 'No such file or directory: ' + filename)
filen, file_extension = os.path.splitext(filename)
command_dict = {
('.Z', 'r'):
"self.file = os.popen('uncompress -c ' + filename, mode)",
('.gz', 'r'):
"self.file = gzip.GzipFile(filename, 'rb')",
('.bz2', 'r'):
"self.file = os.popen('bzip2 -dc ' + filename, mode)",
('.Z', 'w'):
"self.file = os.popen('compress > ' + filename, mode)",
('.gz', 'w'):
"self.file = gzip.GzipFile(filename, 'wb')",
('.bz2', 'w'):
"self.file = os.popen('bzip2 > ' + filename, mode)",
('.Z', 'a'):
"raise IOError, (0, 'Can\'t append to .Z files')",
('.gz', 'a'):
"self.file = gzip.GzipFile(filename, 'ab')",
('.bz2', 'a'):
"raise IOError, (0, 'Can\'t append to .bz2 files')",
}
exec(command_dict.get((file_extension, mode),
'self.file = open(filename, mode)'))
self.grammar = None
# Try to find a parse ('*_def.ext') definition file. First try to find
# a file specific parse definition file, then look for 'sparse.def'
# that would be the definition file for all files within the directory.
# The definition file is pure Python. The one variable that needs to
# be specified is 'parse'. The 'parse' variable is a list of tuples
# defining the name, type, and because it is a list, the order of
# variables on each line in the data file. The variable name is a
# string, the type variable is defined as integer, real, and qString.
# parse = [
# ('year', integer),
# ('month', integer),
# ('day', integer),
# ('value', real),
# ]
definition_file_one = filen + "_def" + file_extension
definition_file_two = os.path.dirname(filen) + os.sep + "sparse.def"
if os.path.exists(definition_file_one):
self.parsedef = definition_file_one
elif os.path.exists(definition_file_two):
self.parsedef = definition_file_two
else:
self.parsedef = None
return None
# Create some handy pyparsing constructs. I kept 'decimal_sep' so that
# could easily change to parse if the decimal separator is a ",".
decimal_sep = "."
sign = oneOf("+ -")
# part of printables without decimal_sep, +, -
special_chars = string.replace('!"#$%&\'()*,./:;<=>?@[\\]^_`{|}~',
decimal_sep, "")
integer = ToInteger(
Combine(Optional(sign) +
Word(nums))).setName("integer")
positive_integer = ToInteger(
Combine(Optional("+") +
Word(nums))).setName("integer")
negative_integer = ToInteger(
Combine("-" +
Word(nums))).setName("integer")
real = ToFloat(
Combine(Optional(sign) +
Word(nums) +
decimal_sep +
Optional(Word(nums)) +
Optional(oneOf("E e") +
Word(nums)))).setName("real")
positive_real = ToFloat(
Combine(Optional("+") +
Word(nums) +
decimal_sep +
Optional(Word(nums)) +
Optional(oneOf("E e") +
Word(nums)))).setName("real")
negative_real = ToFloat(
Combine("-" +
Word(nums) +
decimal_sep +
Optional(Word(nums)) +
Optional(oneOf("E e") +
Word(nums)))).setName("real")
qString = ( sglQuotedString | dblQuotedString ).setName("qString")
# add other characters we should skip over between interesting fields
integer_junk = Optional(
Suppress(
Word(alphas +
special_chars +
decimal_sep))).setName("integer_junk")
real_junk = Optional(
Suppress(
Word(alphas +
special_chars))).setName("real_junk")
qString_junk = SkipTo(qString).setName("qString_junk")
# Now that 'integer', 'real', and 'qString' have been assigned I can
# execute the definition file.
exec(compile(open(self.parsedef).read(), self.parsedef, 'exec'))
# Build the grammar, combination of the 'integer', 'real, 'qString',
# and '*_junk' variables assigned above in the order specified in the
# definition file.
grammar = []
for nam, expr in parse:
grammar.append( eval(expr.name + "_junk"))
grammar.append( expr.setResultsName(nam) )
self.grammar = And( grammar[1:] + [restOfLine] )
def __del__(self):
"""Delete (close) the file wrapper."""
self.close()
def __getitem__(self, item):
"""Used in 'for line in fp:' idiom."""
line = self.readline()
if not line:
raise IndexError
return line
def readline(self):
"""Reads (and optionally parses) a single line."""
line = self.file.readline()
if self.grammar and line:
try:
return self.grammar.parseString(line).asDict()
except ParseException:
return self.readline()
else:
return line
def readlines(self):
"""Returns a list of all lines (optionally parsed) in the file."""
if self.grammar:
tot = []
# Used this way instead of a 'for' loop against
# self.file.readlines() so that there wasn't two copies of the file
# in memory.
while 1:
line = self.file.readline()
if not line:
break
tot.append(line)
return tot
return self.file.readlines()
def write(self, data):
"""Write to a file."""
self.file.write(data)
def writelines(self, list):
"""Write a list to a file. Each item in the list is a line in the
file.
"""
for line in list:
self.file.write(line)
def close(self):
"""Close the file."""
self.file.close()
def flush(self):
"""Flush in memory contents to file."""
self.file.flush()
#=============================
def main(pargs):
"""This should only be used for testing. The primary mode of operation is
as an imported library.
"""
input_file = sys.argv[1]
fp = ParseFileLineByLine(input_file)
for i in fp:
print(i)
#-------------------------
if __name__ == '__main__':
ftn | |
<gh_stars>1-10
"""
Copyright (c) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging as log
import numpy as np
from extensions.middle.PartialInfer import PartialInfer
from extensions.middle.TensorIterator_utils import delete_selects_from
from extensions.ops.TensorIterator_ops import TensorIteratorCondition, TensorIteratorBackEdge
from extensions.ops.elementwise import Mul
from mo.graph.graph import Graph
from mo.middle.replacement import MiddleReplacementPattern
from mo.ops.const import Const
def make_nodes_1D(nodes: list):
"""
Reshape every node from nodes from 0D to 1D (nodes should have shape attribute).
"""
for node in nodes:
assert node.shape is None or len(node.shape) == 0
node.shape = np.array([1], dtype=np.int64)
if node.value is not None:
node.value = np.reshape(node.value, node.shape)
def looking_for_op_in_list(nodes: list, op: str):
for node in nodes:
if node.has_valid('op') and node.op == op:
return node
return None
class LoopConditionMatcher(MiddleReplacementPattern):
"""
This pattern match condition for TensorIterator in while loops in TF.
The structure of pattern without Data nodes between ops. Every node is named as op attribute of this node
(data nodes is marked by (data)):
Const----
|
v
Const -> Enter -> Merge ---------------------> Switch -> Identity -> Add -> NextIteration
| ^
---> Less ----| |
^ | |
Maximum -> Minimum -> Enter-| | |
^ v |
Shape -> StridedSlice -> Enter -| LogicalAnd --> LoopCond (data)
v ^ |
---> Less ----| |
| v
Const -> Enter -> Merge ---------------------> Switch -> Identity -> Add -> NextIteration
^
|
Const----
"""
enabled = True
graph_condition = [lambda graph: graph.graph['is_cyclic']]
def run_after(self):
return []
def run_before(self):
from extensions.middle.TensorIteratorMerge import TensorIteratorMerge
return [TensorIteratorMerge]
@staticmethod
def pattern():
log.debug('+++++++++++++++ ConditionMatching ++++++++++++++++')
return dict(
nodes=[
('Enter_1_less', dict(kind='op', op='Enter')),
('Strided_slice', dict(kind='op', op='StridedSlice')),
('Strided_slice_data', dict(kind='data')),
('Enter_1_less_data', dict(kind='data')),
('Less_1', dict(kind='op', op='Less')),
('Merge_1', dict(kind='op', op='Merge')),
('Merge_1_data', dict(kind='data')),
('Less_1_data', dict(kind='data')),
('Less_2', dict(kind='op', op='Less')),
('Merge_2', dict(kind='op', op='Merge')),
('Merge_2_data', dict(kind='data')),
('Less_2_data', dict(kind='data')),
('Enter_2_less', dict(kind='op', op='Enter')),
('Enter_2_less_data', dict(kind='data')),
('minimum_data', dict(kind='data')),
('and', dict(kind='op', op='LogicalAnd')),
('and_data', dict(kind='data')),
('loop_cond', dict(kind='op', op='LoopCond')),
('loop_cond_data', dict(kind='data')),
('init_1', dict(kind='op', op='Const')),
('init_1_data', dict(kind='data')),
('Enter_1', dict(kind='op', op='Enter')),
('Enter_1_data', dict(kind='data')),
('init_2', dict(kind='op', op='Const')),
('init_2_data', dict(kind='data')),
('Enter_2', dict(kind='op', op='Enter')),
('Enter_2_data', dict(kind='data')),
('Switch_1', dict(kind='op', op='Switch')),
('Switch_1_data', dict(kind='data')),
('Identity_1', dict(kind='op', op='Identity')),
('Identity_1_data', dict(kind='data')),
('add_1', dict(kind='op', op='Add')),
('add_1_y', dict(kind='op', op='Const')),
('add_1_y_data', dict(kind='data')),
('add_1_data', dict(kind='data')),
('NextIteration_1', dict(kind='op', op='NextIteration')),
('Switch_2', dict(kind='op', op='Switch')),
('Switch_2_data', dict(kind='data')),
('Identity_2', dict(kind='op', op='Identity')),
('Identity_2_data', dict(kind='data')),
('add_2', dict(kind='op', op='Add')),
('add_2_y', dict(kind='op', op='Const')),
('add_2_y_data', dict(kind='data')),
('add_2_data', dict(kind='data')),
('NextIteration_2', dict(kind='op', op='NextIteration')),
],
edges=[
('Strided_slice', 'Strided_slice_data'),
('Strided_slice_data', 'Enter_1_less'),
('Enter_1_less', 'Enter_1_less_data'),
('Enter_1_less_data', 'Less_1'),
('Less_1', 'Less_1_data'),
('Less_1_data', 'and'),
('and', 'and_data'),
('and_data', 'loop_cond'),
('loop_cond', 'loop_cond_data'),
('loop_cond_data', 'Switch_1'),
('loop_cond_data', 'Switch_2'),
('init_1', 'init_1_data'),
('init_1_data', 'Enter_1'),
('Enter_1', 'Enter_1_data'),
('Enter_1_data', 'Merge_1'),
('Merge_1', 'Merge_1_data'),
('Merge_1_data', 'Less_1'),
('Merge_1_data', 'Switch_1'),
('Switch_1', 'Switch_1_data'),
('Switch_1_data', 'Identity_1'),
('Identity_1', 'Identity_1_data'),
('Identity_1_data', 'add_1'),
('add_1_y', 'add_1_y_data'),
('add_1_y_data', 'add_1'),
('add_1', 'add_1_data'),
('add_1_data', 'NextIteration_1'),
('Merge_2_data', 'Switch_2'),
('Switch_2', 'Switch_2_data'),
('Switch_2_data', 'Identity_2'),
('Identity_2', 'Identity_2_data'),
('Identity_2_data', 'add_2'),
('add_2_y', 'add_2_y_data'),
('add_2_y_data', 'add_2'),
('add_2', 'add_2_data'),
('add_2_data', 'NextIteration_2'),
('minimum_data', 'Enter_2_less'),
('Enter_2_less', 'Enter_2_less_data'),
('Enter_2_less_data', 'Less_2'),
('init_2', 'init_2_data'),
('init_2_data', 'Enter_2'),
('Enter_2', 'Enter_2_data'),
('Enter_2_data', 'Merge_2'),
('Merge_2', 'Merge_2_data'),
('Merge_2_data', 'Less_2'),
('Less_2', 'Less_2_data'),
('Less_2_data', 'and'),
],
)
@staticmethod
def looking_for_iteration_counter(graph: Graph, match: dict):
types = ['TensorIteratorInput', 'TensorIteratorOutput']
candidates = np.array([match['Identity_1_data'], match['Identity_2_data']])
results = np.array([False for i in range(len(candidates))])
for i, candidat in enumerate(candidates):
for node in candidat.out_nodes():
if node['op'] in types:
results[i] = True
assert not np.all(results)
assert sum(results) == 1
return candidates[results == True][0]
@staticmethod
def check_dynamic_seq_len(graph: Graph, match: dict):
"""
Cycle is dynamic if at least one of the boundaries isn't constant OR this boundaries is different from tensor
shape.
"""
dynamic_seq_len = match['Enter_1_less_data'].value is None or match['Enter_2_less_data'].value is None or \
not np.array_equal(match['Enter_1_less_data'].value, match['Enter_2_less_data'].value)
return dynamic_seq_len
def replace_pattern(self, graph: Graph, match: dict):
log.debug('================== ConditionFind ===============')
# init_1
init_1 = match['init_1_data'].value
assert init_1 is not None
init_1 = int(init_1)
# init_2
init_2 = match['init_2_data'].value
assert init_2 is not None
init_2 = int(init_2)
# step_1
assert match['add_1_y_data'].value is not None
step_1 = int(match['add_1_y_data'].value)
# step_2
assert match['add_2_y_data'].value is not None
step_2 = int(match['add_2_y_data'].value)
dynamic_seq_len = self.check_dynamic_seq_len(graph, match)
# Create condition node and delete all useless nodes from condition pattern
loop_condition = match['loop_cond_data']
iterator_data = self.looking_for_iteration_counter(graph, match)
condition_attrs = dict(time=dict(init=init_2, step=step_2), iter=dict(init=init_1, step=step_1),
name=match['loop_cond'].name + '/TensorIteratorCondition_')
condition = TensorIteratorCondition(graph, attrs=condition_attrs)
condition_data = condition.create_node_with_data(inputs=[match['Strided_slice_data'], match['minimum_data']],
data_nodes=[loop_condition, iterator_data])
safe_nodes = ['loop_cond_data', 'Identity_1_data', 'Identity_2_data', 'Strided_slice', 'Strided_slice_data',
'minimum', 'minimum_data']
identity_ops = [n.op for n in iterator_data.out_nodes()]
if 'GreaterEqual' in identity_ops:
greater_equal_id = [n.id for n in iterator_data.out_nodes() if n.op == 'GreaterEqual'][0]
if dynamic_seq_len:
# Add BackEdge for time iterator node
backedge = TensorIteratorBackEdge(graph, dict(name='/TimeIterator/TensorIteratorBackEdge_'))
backedge_data = backedge.create_node_with_data(inputs=[match['init_2_data'], match['add_2_data'],
condition_data[0]],)
graph.remove_edge(match['add_2'].in_node(0).id, match['add_2'].id)
graph.add_edge(backedge_data.id, match['add_2'].id, **{'in': 0})
graph.remove_edge(iterator_data.id, greater_equal_id)
graph.add_edge(backedge_data.id, greater_equal_id, **{'in': 0})
# nodes for time iterator
safe_nodes += ['init_2_data', 'init_2', 'Identity_2_data', 'add_2_data', 'add_2', 'add_2_y', 'add_2_y_data']
# Manually reshape all iterator nodes (for time) from 0D to 1D
iterator_data_nodes = [backedge_data, match['add_2_data'], match['add_2_y_data'], match['add_2_y'],
match['init_2_data'], match['init_2']]
make_nodes_1D(iterator_data_nodes)
else:
# Delete Selects from this cycle to make it not dynamic:
greater_equal_idxs = [n.id for n in iterator_data.out_nodes() if n.op == 'GreaterEqual']
delete_selects_from(graph, greater_equal_idxs)
# Delete useless nodes
nodes_for_remove = []
for node in match.keys():
if node not in safe_nodes:
nodes_for_remove.append(match[node].id)
graph.remove_nodes_from(nodes_for_remove)
class SimpleConditionMatcher(MiddleReplacementPattern):
enabled = True
graph_condition = [lambda graph: graph.graph['is_cyclic']]
def run_after(self):
return [LoopConditionMatcher]
def run_before(self):
from extensions.middle.TensorIteratorMerge import TensorIteratorMerge
return [TensorIteratorMerge]
@staticmethod
def pattern():
log.debug('+++++++++++++++ SimpleConditionMatching ++++++++++++++++')
return dict(
nodes=[
('Enter_1_less', dict(kind='op', op='Enter')),
('Strided_slice', dict(kind='op', op='StridedSlice')),
('Strided_slice_data', dict(kind='data')),
('Enter_1_less_data', dict(kind='data')),
('Less_1', dict(kind='op', op='Less')),
('Merge_1', dict(kind='op', op='Merge')),
('Merge_1_data', dict(kind='data')),
('Less_1_data', dict(kind='data')),
('loop_cond', dict(kind='op', op='LoopCond')),
('loop_cond_data', dict(kind='data')),
('init_1', dict(kind='op', op='Const')),
('init_1_data', dict(kind='data')),
('Enter_1', dict(kind='op', op='Enter')),
('Enter_1_data', dict(kind='data')),
('Switch_1', dict(kind='op', op='Switch')),
('Switch_1_data', dict(kind='data')),
('Identity_1', dict(kind='op', op='Identity')),
('Identity_1_data', dict(kind='data')),
('add_1', dict(kind='op', op='Add')),
('add_1_y', dict(kind='op', op='Const')),
('add_1_y_data', dict(kind='data')),
('add_1_data', dict(kind='data')),
('NextIteration_1', dict(kind='op', op='NextIteration')),
],
edges=[
('Strided_slice', 'Strided_slice_data'),
('Strided_slice_data', 'Enter_1_less'),
('Enter_1_less', 'Enter_1_less_data'),
('Enter_1_less_data', 'Less_1'),
('Less_1', 'Less_1_data'),
('Less_1_data', 'loop_cond'),
('loop_cond', 'loop_cond_data'),
('loop_cond_data', 'Switch_1'),
('init_1', 'init_1_data'),
('init_1_data', 'Enter_1'),
('Enter_1', 'Enter_1_data'),
('Enter_1_data', 'Merge_1'),
('Merge_1', 'Merge_1_data'),
('Merge_1_data', 'Less_1'),
('Merge_1_data', 'Switch_1'),
('Switch_1', 'Switch_1_data'),
('Switch_1_data', 'Identity_1'),
('Identity_1', 'Identity_1_data'),
('Identity_1_data', 'add_1'),
('add_1_y', 'add_1_y_data'),
('add_1_y_data', 'add_1'),
('add_1', 'add_1_data'),
('add_1_data', 'NextIteration_1'),
],
)
@staticmethod
def replace_pattern(graph: Graph, match: dict):
log.debug('================== SimpleConditionFind ===============')
# init_1
init_1 = match['init_1_data'].value
assert init_1 is not None
init_1 = int(init_1)
# step_1
assert match['add_1_y_data'].value is not None
step_1 = int(match['add_1_y_data'].value)
match['loop_cond_data'].value = None
# Create condition node and delete all useless nodes from condition pattern
condition_attrs = dict(iter=dict(init=init_1, step=step_1),
name=match['loop_cond'].name + '/TensorIteratorCondition_')
condition = TensorIteratorCondition(graph, attrs=condition_attrs)
condition.create_node_with_data(inputs=[match['Strided_slice_data']],
data_nodes=[match['loop_cond_data'], match['Identity_1_data']])
# Delete useless nodes
safe_nodes = ['loop_cond_data', 'Identity_1_data', 'Strided_slice', 'Strided_slice_data']
nodes_for_remove = []
for node in match.keys():
if node not in safe_nodes:
nodes_for_remove.append(match[node].id)
graph.remove_nodes_from(nodes_for_remove)
class DynamicDecoderConditionMatcher(MiddleReplacementPattern):
"""
This pattern match condition for dynamic decoder and create TensorIteratorCondition node instead of it.
"""
enabled = True
graph_condition = [lambda graph: graph.graph['is_cyclic']]
def run_after(self):
return [SimpleConditionMatcher]
def run_before(self):
from extensions.middle.TensorIteratorMerge import TensorIteratorMerge
return [TensorIteratorMerge]
@staticmethod
def pattern():
log.debug('+++++++++++++++ DynamicDecoderConditionMatching ++++++++++++++++')
return dict(
nodes=[
('loop_cond', dict(kind='op', op='LoopCond')),
('loop_cond_data', dict(kind='data')),
('logical_not', dict(kind='op', op='Not')),
('logical_not_data', dict(kind='data')),
('all', dict(kind='op', op='ReduceAnd')),
('all_data', dict(kind='data')),
('Merge_16', dict(kind='op', op='Merge')),
('merge_16_data', dict(kind='data')),
('NextIteration_16', dict(kind='op', op='NextIteration')),
('nextIteration_data', dict(kind='data')),
('Switch', dict(kind='op', op='Switch')),
('switch_data', dict(kind='data')),
('Identity', dict(kind='op', op='Identity')),
('identity_data', dict(kind='data')),
('add', dict(kind='op', op='Add')),
('add_data', dict(kind='data')),
('Less_enter', dict(kind='op', op='Enter')),
('Less_enter_data', dict(kind='data')),
('And', dict(kind='op', op='LogicalAnd')),
('And_data', dict(kind='data')),
('Less', dict(kind='op', op='Less')),
('Less_data', dict(kind='data')),
('TensorIteratorOutput', dict(kind='op', op='TensorIteratorOutput')),
('TensorIteratorOutput_1', dict(kind='op', op='TensorIteratorOutput')),
],
edges=[
('NextIteration_16', 'nextIteration_data'),
('nextIteration_data', 'Merge_16'),
('Merge_16', 'merge_16_data'),
('merge_16_data', 'all'),
('all', 'all_data'),
('all_data', 'logical_not'),
('logical_not', 'logical_not_data'),
('Less_enter', 'Less_enter_data'),
('Less_enter_data', 'Less'),
('Less', 'Less_data'),
('Less_data', 'And'),
('logical_not_data', 'And'),
('And', 'And_data'),
('And_data', 'loop_cond'),
('loop_cond', 'loop_cond_data'),
('loop_cond_data', 'Switch'),
('Switch', 'switch_data'),
('switch_data', 'Identity'),
('Identity', 'identity_data'),
('identity_data', 'add'),
('add', 'add_data'),
('identity_data', 'TensorIteratorOutput'),
('identity_data', 'TensorIteratorOutput_1'),
],
)
@staticmethod
def replace_pattern(graph: Graph, match: dict):
"""
Create condition node and delete all useless nodes (like Switch/Merge/Identity) from condition pattern
"""
log.debug('================== DynamicDecoderConditionFind ==================')
# Create and connect condition node for dynamic decoder in TF
loop_condiiton = match['loop_cond_data']
iterator_data = match['identity_data']
condition_attrs = dict(name=match['loop_cond'].name + '/TensorIteratorCondition_')
condition = TensorIteratorCondition(graph, attrs=condition_attrs)
condition.create_node_with_data(inputs=[match['Less_enter'].in_node()],
data_nodes=[loop_condiiton, iterator_data])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.