input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method symbol_assign_disk_pool_ownership" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if ('system_id' not in params) or (params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `symbol_assign_disk_pool_ownership`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `symbol_assign_disk_pool_ownership`")
resource_path = '/storage-systems/{system-id}/symbol/assignDiskPoolOwnership'.replace('{format}', 'json')
path_params = {}
if 'system_id' in params:
path_params['system-id'] = params['system_id']
query_params = {}
if 'controller' in params:
query_params['controller'] = params['controller']
if 'verbose_error_response' in params:
query_params['verboseErrorResponse'] = params['verbose_error_response']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def symbol_assign_drives_as_hot_spares(self, system_id, body, **kwargs):
"""
This procedure causes the controller to automatically assign the specified number of drives as hot spares, in addition to any previously assigned hot spares.
Documented return codes: ok, illegalParam, noHeap, noSparesAssigned, someSparesAssigned, tryAlternate.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.symbol_assign_drives_as_hot_spares(system_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str system_id: The unique identifier of the storage-system. This may be the id or the WWN. (required)
:param int body: The number of new hot spare drives to be added to the array's pool of hot spares. (required)
:param str controller: Controller selection
:param bool verbose_error_response:
:return: str
If the method is called asynchronously,
returns the request thread.
:raises: ValueError
If the required params are not provided or if the response data format is unknown.
TypeError:
When the data type of response data is different from what we are expecting
ApiException:
Occurs when we get a HTTP error code (422 and above).
"""
all_params = ['system_id', 'body', 'controller', 'verbose_error_response']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method symbol_assign_drives_as_hot_spares" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if ('system_id' not in params) or (params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `symbol_assign_drives_as_hot_spares`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `symbol_assign_drives_as_hot_spares`")
resource_path = '/storage-systems/{system-id}/symbol/assignDrivesAsHotSpares'.replace('{format}', 'json')
path_params = {}
if 'system_id' in params:
path_params['system-id'] = params['system_id']
query_params = {}
if 'controller' in params:
query_params['controller'] = params['controller']
if 'verbose_error_response' in params:
query_params['verboseErrorResponse'] = params['verbose_error_response']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def symbol_assign_specific_drives_as_hot_spares(self, system_id, body, **kwargs):
"""
Instructs the SYMbol Server's controller to create hot spare drives out of the given drives.
Documented return codes: ok, illegalParam, noHeap, noSparesAssigned, someSparesAssigned, tryAlternate, sparesSmallUnassigned.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.symbol_assign_specific_drives_as_hot_spares(system_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str system_id: The unique identifier of the storage-system. This may be the id or the WWN. (required)
:param DriveRefList body: A list of drive reference values, which identifies the drives to be assigned as hot spares. (required)
:param str controller: Controller selection
:param bool verbose_error_response:
:return: str
If the method is called asynchronously,
returns the request thread.
:raises: ValueError
If the required params are not provided or if the response data format is unknown.
TypeError:
When the data type of response data is different from what we are expecting
ApiException:
Occurs when we get a HTTP error code (422 and above).
"""
all_params = ['system_id', 'body', 'controller', 'verbose_error_response']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method symbol_assign_specific_drives_as_hot_spares" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if ('system_id' not in params) or (params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `symbol_assign_specific_drives_as_hot_spares`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `symbol_assign_specific_drives_as_hot_spares`")
resource_path = '/storage-systems/{system-id}/symbol/assignSpecificDrivesAsHotSpares'.replace('{format}', 'json')
path_params = {}
if 'system_id' in params:
path_params['system-id'] = params['system_id']
query_params = {}
if 'controller' in params:
query_params['controller'] = params['controller']
if 'verbose_error_response' in params:
query_params['verboseErrorResponse'] = params['verbose_error_response']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def symbol_assign_volume_group_ownership(self, system_id, body, **kwargs):
"""
Instructs the SYMbol Server's controller to transfer ownership of a volume group and its associated volumes to another controller.
Documented return codes: ok, noHeap, notDualActive, tryAlternate, internalError, invalidRequest, iconFailure, cacheSyncFailure, invalidControllerref, invalidVolumegroupref, modesenseError, controllerInServiceMode.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.symbol_assign_volume_group_ownership(system_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str system_id: The unique identifier of the storage-system. This may be the id or the WWN. (required)
:param VolumeGroupOwnershipUpdateDescriptor body: A descriptor that specifies the volume group being modified, and the controller that is to take ownership of the volume group, and thus all volumes defined on the volume group. (required)
:param str controller: Controller selection
:param bool verbose_error_response:
:return: str
If the method is called asynchronously,
returns the request thread.
:raises: ValueError
If the required params are not provided or if the response data format is unknown.
TypeError:
When the data type of response data is different from what we are expecting
ApiException:
Occurs when we get a HTTP error code (422 and above).
"""
all_params = ['system_id', 'body', 'controller', 'verbose_error_response']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method symbol_assign_volume_group_ownership" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if ('system_id' not in params) or (params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `symbol_assign_volume_group_ownership`")
# verify the | |
fifth and sixth are alpha3.
energy_alpha_simulation = compute_energy_array(data, "alpha", "end", m_a)
z_alpha = data["alpha_z_end"]
# Loop over all slices (i.e. cells in the z direction)
for slice_number in range(1, size_z):
## Kinetic energy in the lab frame before fusion
E_kinetic_com_before = E_com[slice_number]
## Total (kinetic + mass) energy in the lab frame after
## proton + boron 11 -> alpha + beryllium 8
E_total_com_after = E_kinetic_com_before + E_fusion + (m_a + m_be)*scc.c**2
## Corresponding momentum norm squared of alpha1/beryllium
p_sq_after = E_com_to_p_sq_com(m_a, m_be, E_total_com_after)
## Corresponding kinetic energy for alpha1
energy_alpha1_theory = p_sq_to_kinetic_energy(p_sq_after, m_a)
## Corresponding kinetic energy for beryllium
energy_beryllium_theory = p_sq_to_kinetic_energy(p_sq_after, m_be)
## Corresponding kinetic energy for alpha2 + alpha3 after beryllium decay
energy_alpha2_plus_3_theory = energy_beryllium_theory + E_decay
## Compute the theoretical maximum and minimum energy of alpha2 and alpha3. This
## calculation is done nonrelativistically, by noting that the maximum (minimum) energy
## corresponds to an alpha emitted exactly in the (opposite) direction of the beryllium
## in the center of mass frame. This calculation involves solving a polynomial equation of
## order 2 in p_alpha23.
max_p_alpha23 = 0.5*(np.sqrt(p_sq_after) + \
np.sqrt(4*m_a*energy_alpha2_plus_3_theory - p_sq_after))
min_p_alpha23 = 0.5*(np.sqrt(p_sq_after) - \
np.sqrt(4*m_a*energy_alpha2_plus_3_theory - p_sq_after))
max_energy_alpha23 = max_p_alpha23**2/(2.*m_a)
min_energy_alpha23 = min_p_alpha23**2/(2.*m_a)
## Get the energy of all alphas in the slice
energy_alpha_slice = energy_alpha_simulation[(z_alpha >= slice_number)* \
(z_alpha < (slice_number + 1))]
## Energy of alphas1 (here, first macroparticle of each fusion event) in the slice
energy_alpha1_simulation = energy_alpha_slice[::6]
## Energy of alphas2 (here, third macroparticle of each fusion event) in the slice
energy_alpha2_simulation = energy_alpha_slice[2::6]
## Energy of alphas3 (here, fifth macroparticle of each fusion event) in the slice
energy_alpha3_simulation = energy_alpha_slice[4::6]
assert(np.all(is_close(energy_alpha1_simulation, energy_alpha1_theory, rtol=5.e-8)))
assert(is_close(np.amax(energy_alpha2_simulation), max_energy_alpha23, rtol=1.e-2))
assert(is_close(np.amin(energy_alpha2_simulation), min_energy_alpha23, rtol=1.e-2))
assert(is_close(np.amax(energy_alpha3_simulation), max_energy_alpha23, rtol=1.e-2))
assert(is_close(np.amin(energy_alpha3_simulation), min_energy_alpha23, rtol=1.e-2))
def check_initial_energy2(data):
## In WarpX, the initial momentum of the alphas is computed assuming that the fusion process
## takes place in two steps:
## (1): proton + boron 11 -> alpha + beryllium 8
## (2): beryllium 8 -> alpha + alpha
## The alpha generated in the first step (labeled alpha1) generally has a different initial
## energy distribution than the alphas generated in the second step (labeled alpha2 and
## alpha3).
## In the second test, we are in the boron rest frame. In this case, the momentum of each alpha
## follows a continuous distribution within a given range. In this function, we verify that
## this range is as expected by comparing the maximum and minimum energy of the obtained
## macroparticles to the theoretical maximum and minimum. Be aware that the range for alpha1
## is not the same as the range for alpha2 and alpha3 (typically alpha1 particles will carry
## more energy).
## Note that in the simulations, 6 macroparticles are generated during for each fusion event.
## The first and second macroparticles are alpha1 particles. The third and fourth are alpha2.
## The fifth and sixth are alpha3.
energy_alpha_simulation = compute_energy_array(data, "alpha", "end", m_a)
z_alpha = data["alpha_z_end"]
# Loop over all slices (i.e. cells in the z direction)
for slice_number in range(1, size_z):
## For simplicity, all the calculations in this functino are done nonrelativistically
## Proton kinetic energy in the lab frame before fusion
E_proton_nonrelativistic = Energy_step*slice_number**2
## Corresponding square norm of proton momentum
p_proton_sq = 2.*scc.m_p*E_proton_nonrelativistic
## Kinetic energy in the lab frame after
## proton + boron 11 -> alpha + beryllium 8
E_after_fusion = E_proton_nonrelativistic + E_fusion
## Compute the theoretical maximum and minimum energy of alpha1 in the lab frame. This
## calculation is done by noting that the maximum (minimum) energy corresponds to an alpha
## emitted exactly in the (opposite) direction of the proton in the lab frame. This
## calculation involves solving a polynomial equation of order 2 in p_alpha1.
max_p_alpha1 = (m_a/m_be*np.sqrt(p_proton_sq) + \
np.sqrt(-m_a/m_be*p_proton_sq + 2.*E_after_fusion*m_a*(m_a/m_be + 1.))) / \
(m_a/m_be + 1.)
min_p_alpha1 = (m_a/m_be*np.sqrt(p_proton_sq) - \
np.sqrt(-m_a/m_be*p_proton_sq + 2.*E_after_fusion*m_a*(m_a/m_be + 1.))) / \
(m_a/m_be + 1.)
max_energy_alpha1 = max_p_alpha1**2/(2*m_a)
min_energy_alpha1 = min_p_alpha1**2/(2*m_a)
## Corresponding max/min kinetic energy of Beryllium in the lab frame
max_E_beryllium = E_after_fusion - min_energy_alpha1
min_E_beryllium = E_after_fusion - max_energy_alpha1
## Corresponding max/min momentum square of Beryllium in the lab frame
max_p_sq_beryllium = 2.*m_be*max_E_beryllium
min_p_sq_beryllium = 2.*m_be*min_E_beryllium
## Corresponding max/min kinetic energy in the lab frame for alpha2 + alpha3 after
## Beryllium decay
max_energy_alpha2_plus_3 = max_E_beryllium + E_decay
min_energy_alpha2_plus_3 = min_E_beryllium + E_decay
## Compute the theoretical maximum and minimum energy of alpha2 and alpha3 in the lab
## frame. This calculation is done by noting that the maximum (minimum) energy corresponds
## to an alpha emitted exactly in the (opposite) direction of a beryllium with energy
## max_E_beryllium (min_E_beryllium). This calculation involves solving a polynomial
## equation of order 2 in p_alpha23.
max_p_alpha23 = 0.5*(np.sqrt(max_p_sq_beryllium) + \
np.sqrt(4*m_a*max_energy_alpha2_plus_3 - max_p_sq_beryllium))
min_p_alpha23 = 0.5*(np.sqrt(min_p_sq_beryllium) - \
np.sqrt(4*m_a*min_energy_alpha2_plus_3 - min_p_sq_beryllium))
max_energy_alpha23 = max_p_alpha23**2/(2*m_a)
min_energy_alpha23 = min_p_alpha23**2/(2*m_a)
## Get the energy of all alphas in the slice
energy_alpha_slice = energy_alpha_simulation[(z_alpha >= slice_number)* \
(z_alpha < (slice_number + 1))]
## Energy of alphas1 (here, first macroparticle of each fusion event) in the slice
energy_alpha1_simulation = energy_alpha_slice[::6]
## Energy of alphas2 (here, third macroparticle of each fusion event) in the slice
energy_alpha2_simulation = energy_alpha_slice[2::6]
## Energy of alphas3 (here, fifth macroparticle of each fusion event) in the slice
energy_alpha3_simulation = energy_alpha_slice[4::6]
assert(is_close(np.amax(energy_alpha1_simulation), max_energy_alpha1, rtol=1.e-2))
assert(is_close(np.amin(energy_alpha1_simulation), min_energy_alpha1, rtol=1.e-2))
## Tolerance is quite high below because we don't have a lot of alphas to produce good
## statistics and an event like alpha1 emitted exactly in direction of proton & alpha2
## emitted exactly in direction opposite to Beryllium is somewhat rare.
assert(is_close(np.amax(energy_alpha2_simulation), max_energy_alpha23, rtol=2.5e-1))
assert(is_close(np.amin(energy_alpha2_simulation), min_energy_alpha23, rtol=2.5e-1))
assert(is_close(np.amax(energy_alpha3_simulation), max_energy_alpha23, rtol=2.5e-1))
assert(is_close(np.amin(energy_alpha3_simulation), min_energy_alpha23, rtol=2.5e-1))
def check_xy_isotropy(data):
## Checks that the alpha particles are emitted isotropically in x and y
average_px_sq = np.average(data["alpha_px_end"]*data["alpha_px_end"])
average_py_sq = np.average(data["alpha_py_end"]*data["alpha_py_end"])
average_pz_sq = np.average(data["alpha_pz_end"]*data["alpha_pz_end"])
assert(is_close(average_px_sq, average_py_sq, rtol = 5.e-2))
assert(average_pz_sq > average_px_sq)
assert(average_pz_sq > average_py_sq)
def sigmav_thermal_fit_lowE_nonresonant(T):
## Temperature T is in keV
## Returns the nonresonant average of cross section multiplied by relative velocity in m^3/s,
## in the range T <= 70 keV, as described by equation 9 of <NAME> and <NAME>,
## Nuclear Fusion, 40, 865 (2000).
E0 = (E_Gamow_keV/4.)**(1./3.) * T**(2./3.)
DE0 = 4.*np.sqrt(T*E0/3.)
C0 = 197.*1.e3
C1 = 0.24*1.e3
C2 = 2.31e-4*1.e3
tau = 3.*E0/T
Seff = C0*(1.+5./(12.*tau)) + C1*(E0+35./36.*T) + C2*(E0**2 + 89./36.*E0*T)
## nonresonant sigma times vrel, in barn meter per second
sigmav_nr_bmps = np.sqrt(2*T*keV_to_Joule/m_reduced) * DE0*Seff/T**2 * np.exp(-tau)
## Return result in cubic meter per second
return sigmav_nr_bmps*barn_to_square_meter
def sigmav_thermal_fit_lowE_resonant(T):
## Temperature T is in keV
## Returns the resonant average of cross section multiplied by relative velocity in m^3/s,
## in the range T <= 70 keV, as described by equation 11 of <NAME> and <NAME>,
## Nuclear Fusion, 40, 865 (2000).
return 5.41e-21 * np.exp(-148./T) / T**(3./2.)
def sigmav_thermal_fit_lowE(T):
## Temperature T is in keV
## Returns the average of cross section multiplied by relative velocity in m^3/s, using the
## fits described in section 3.1 of <NAME> and <NAME>, Nuclear Fusion, 40, 865 (2000).
## The fits are valid for T <= 70 keV.
return sigmav_thermal_fit_lowE_nonresonant(T) + sigmav_thermal_fit_lowE_resonant(T)
def expected_alpha_thermal(T, proton_density, boron_density, dV, dt):
## Computes the expected number of produced alpha particles when the protons and borons follow
## a Maxwellian distribution with a temperature T, in keV. This uses the thermal fits described
## in <NAME> and <NAME>, Nuclear Fusion, 40, 865 (2000).
## The fit used here is only valid in the range T <= 70 keV.
assert((T >=0) and (T<=70))
sigma_times_vrel = sigmav_thermal_fit_lowE(T)
## Factor 3 is here because each fusion event produces 3 alphas.
return 3.*proton_density*boron_density*sigma_times_vrel*dV*dt
def check_thermal_alpha_yield(data):
## Checks that the number of alpha particles in test3 is as expected
Temperature = 44. # keV
proton_density = 1.e28 # | |
from __future__ import division
import sys
import os
sys.path.append(os.path.abspath("."))
sys.dont_write_bytecode = True
__author__ = "COSAL"
from mos.search.cacher import tfidf_cache
from mos.search_store.code_store import CodeStore, ASTDistanceStore, ExecStore
from mos.search_store.elastic_store import ContextStore
from mos.blocks.contest_meta_block import ContestMeta
from mos.search import multi_objective as mo
from utils import logger, cache, stat
from joblib import Parallel, delayed
import multiprocessing
import properties
LOGGER = logger.get_logger(os.path.basename(__file__.split(".")[0]))
def get_code_store():
return CodeStore()
def get_ast_distance_store():
return ASTDistanceStore()
def get_exec_store():
return ExecStore()
def get_elastic_store():
return ContextStore(index_name=ContextStore.CONTEXT_INDEX)
def make_search_query(language, tokens, token_key):
return {
"bool": {
"should": {
"match": {
token_key: " ".join(tokens)
}
},
"must": {
"match": {"language": language}
}
}
}
def contextual_search(language, tokens, token_key, search_limit):
search_query = make_search_query(language, tokens, token_key)
search_results = {}
for i, hit in enumerate(get_elastic_store().search(search_query, search_limit)["hits"]["hits"]):
search_results[hit["_source"]["uid"]] = {
"uid": hit["_source"]["uid"],
"tokens": hit["_source"][token_key],
"score": hit["_score"],
}
return search_results
def ast_search(source_uid, search_limit):
search_results = {}
similar_nodes = get_ast_distance_store().get_most_similar_nodes(source_uid, search_limit)
if not similar_nodes:
return search_results
for node in similar_nodes:
target_uid = node["uid2"] if node["uid1"] == source_uid else node["uid1"]
search_results[target_uid] = {
"uid": target_uid,
"score": node["distance"]
}
return search_results
def semantic_search(source_file_path, language, search_limit):
search_results = {}
exec_store = get_exec_store()
n_similar_nodes = exec_store.count_exec_distances_for_file(source_file_path)
if n_similar_nodes == 0:
return search_results
similar_nodes = exec_store.get_exec_distances_for_file(source_file_path)
source_file_path = cache.get_repo_local_path(source_file_path)
query_snippets = {}
result_files = {}
for similar_node in similar_nodes:
if similar_node["file1"] == source_file_path:
q_snippet = similar_node["method1"]
file_result = result_files.get(similar_node["file2"], {})
if q_snippet not in file_result or file_result[q_snippet]["score"] < similar_node["score"]:
file_result[q_snippet] = similar_node
result_files[similar_node["file2"]] = file_result
elif similar_node["file2"] == source_file_path:
q_snippet = similar_node["method2"]
file_result = result_files.get(similar_node["file1"], {})
if q_snippet not in file_result or file_result[q_snippet]["score"] < similar_node["score"]:
file_result[q_snippet] = similar_node
result_files[similar_node["file1"]] = file_result
search_results = {}
for target_file, file_result in result_files.items():
target_id = get_code_store().get_code_block_id(target_file, language)
if not target_id:
continue
score = 0.0
for q_snippet, similar_node in file_result.items():
score += similar_node["score"]
search_results[target_id] = {
"uid": target_id,
"score": score
}
return search_results
def merge_search_results(search_results, defaults):
search_fields = search_results.keys()
all_keys = set()
for field in search_fields:
all_keys = all_keys.union(search_results[field].keys())
clean_results = {}
partial_results = {}
for uid in all_keys:
result = {"uid": uid}
all_present = True
for field in search_fields:
if uid in search_results[field]:
result[field] = search_results[field][uid]["score"]
else:
all_present = False
result[field] = defaults[field]
if all_present:
clean_results[uid] = result
else:
partial_results[uid] = result
return clean_results, partial_results
def get_top_tokens(tokens):
all_top_tokens = tfidf_cache.load_top_tfidf_tokens()
return list(set(tokens).intersection(all_top_tokens))
def get_contest_details(uid):
cb = get_code_store().fetch_code_block(uid)
contest_meta = cb.get("contestMeta", None)
if contest_meta:
return ContestMeta.from_bson(contest_meta)
return None
def get_contest_meta(search_results):
contests_meta = []
for search_result in search_results:
contests_meta.append(get_contest_details(search_result["uid"]))
return contests_meta
def search_submission(code_block_db, search_params=["ast", "contextualTokens", "semantic"], search_limit=1000):
"""
:param code_block_db: Instance of code block db
:param search_params: List of params to search. Options - [ast, contextualTokens/tfidfTokens]
:param search_limit: Limit of search. Optional. Defaults to 1000
:return: List of search results
"""
defaults = {}
mo_keys = []
mo_betters = {}
mo_preferences = {"ast": 1, "context": 2}
search_results = {}
if "ast" in search_params:
search_results["ast"] = ast_search(code_block_db["uid"], search_limit)
mo_keys.append("ast")
mo_betters["ast"] = mo.less
defaults["ast"] = float("inf")
if "contextualTokens" in search_params or "tfidfTokens" in search_params:
tokens = get_top_tokens(code_block_db["contextualTokens"])
token_key = "contextualTokens"
if "tfidfTokens" in search_params:
tokens = get_top_tokens(tokens)
token_key = "tfidfTokens"
search_results["context"] = contextual_search(properties.LANGUAGE_JAVA, tokens, token_key, search_limit)
mo_keys.append("context")
mo_betters["context"] = mo.more
defaults["context"] = float("-inf")
if "semantic" in search_params:
search_results["semantic"] = semantic_search(code_block_db["sourceFile"], properties.LANGUAGE_JAVA, search_limit)
mo_keys.append("semantic")
mo_betters["ast"] = mo.more
defaults["ast"] = float("-inf")
clean_results, partial_results = merge_search_results(search_results, defaults)
results = clean_results if len(clean_results) > 0 else partial_results
sorted_results = mo.nsga(results, keys=mo_keys, betters=mo_betters, preferences=mo_preferences)
return sorted_results
def rank_results(query_contest_meta, contests_meta):
query_contest_key = query_contest_meta.make_key()
best_rank = None
matches = []
for i, contest_meta in enumerate(contests_meta):
if best_rank is not None and i >= 12:
break
result_contest_key = contest_meta.make_key()
if best_rank is None and query_contest_key == result_contest_key:
best_rank = i + 1
if i < 12 and query_contest_key == result_contest_key:
matches.append(i + 1)
rank_meta = {
"best_rank": best_rank,
"matches": matches
}
return rank_meta
def get_meta_file(file_prefix):
return os.path.join(properties.CONFIG.CODE_HOME, "meta_results",
properties.CONFIG.get_dataset(), "search", "%s.pkl" % file_prefix)
def run_block(code_block_db, search_params, search_limit, index, n_code_blocks, save_n_top=20):
uid = code_block_db["uid"]
if index % 1 == 0:
LOGGER.info("Processing document %d/%d ... " % (index + 1, n_code_blocks))
ast_computed_count = get_ast_distance_store().count_nodes_for_uid(uid)
if ast_computed_count == 0:
if index % 10 == 0:
LOGGER.info("@COSAL: Looks like ast comparison has not been performed for this node.!!")
return None
if code_block_db.get("contestMeta", None) is None:
return None
query_contest_meta = ContestMeta.from_bson(code_block_db["contestMeta"])
search_results = search_submission(code_block_db, search_params, search_limit)
contests_meta = get_contest_meta(search_results)
rank_meta = rank_results(query_contest_meta, contests_meta)
# if len(search_results) > save_n_top:
# search_results = search_results[:save_n_top]
data_results = {
"uid": uid,
# "search_results": search_results,
"rank_meta": rank_meta
}
return data_results
def runner_parallel(search_params, save_file_prefix, search_limit=1000):
LOGGER.info("# Running submission search for python documents for search_params: '%s' .... " % ','.join(search_params))
index = 0
n_code_blocks = get_code_store().count_code_blocks(language=properties.LANGUAGE_PYTHON)
test_limit = None # TODO. Remove this
save_n_top = 20
code_blocks = get_code_store().get_random_code_blocks(language=properties.LANGUAGE_PYTHON, limit=30)
# cleaned = {}
# for index, code_block_db in enumerate(code_blocks):
# node = run_block(code_block_db, search_params, search_limit, n_code_blocks, save_n_top)
# print(123)
# if node is None:
# print(456)
# continue
# print(789)
# cleaned[node["uid"]] = node
nodes = Parallel(n_jobs=8)(delayed(run_block)(code_block_db, search_params, search_limit, index, n_code_blocks, save_n_top)
for index, code_block_db in enumerate(code_blocks))
cleaned = {}
for node in nodes:
if node is None:
continue
cleaned[node["uid"]] = node
pkl_file = get_meta_file(save_file_prefix)
cache.save_pickle(pkl_file, cleaned)
# def runner(search_params, search_limit=1000, file_prefix="ast_context"):
# index = 0
# n_code_blocks = get_code_store().count_code_blocks(language=properties.LANGUAGE_PYTHON)
# data_results = {}
# for code_block_db in get_code_store().get_code_blocks(properties.LANGUAGE_PYTHON):
# # for code_block_db in [get_code_store().fetch_code_block("e466d35a4a8e4403b027ba840053ea56")]:
# uid = code_block_db["uid"]
# index += 1
# if index % 1 == 0:
# LOGGER.info("Processing document %d/%d ... " % (index, n_code_blocks))
# ast_computed_count = get_ast_distance_store().count_nodes_for_uid(uid)
# if ast_computed_count == 0:
# LOGGER.info("@COSAL: Looks like ast comparison has not been performed for this node.!!")
# continue
# if code_block_db.get("contestMeta", None) is None:
# continue
# query_contest_meta = ContestMeta.from_bson(code_block_db["contestMeta"])
# search_results = search_submission(code_block_db, search_params, search_limit)
# rank_meta = rank_results(query_contest_meta, search_results)
# data_results[uid] = {
# "search_results": search_results,
# "rank_meta": rank_meta
# }
# if len(data_results) == 1000:
# break
# pkl_file = get_meta_file(file_prefix)
# cache.save_pickle(pkl_file, data_results)
def get_problem_counts(db_code_blocks):
pc = {}
for cb in db_code_blocks:
key = ContestMeta.from_bson(cb["contestMeta"]).make_key()
if key not in pc:
pc[key] = 1
else:
pc[key] += 1
return pc
def analyze_pkl(file_prefix):
meta_data = cache.load_pickle(get_meta_file(file_prefix))
best_ranks = []
all_matches_in_top_10 = []
n_top_10 = 0
tp, fp, fn = 0, 0, 0
py_seen, java_seen = set(), set()
for uid, data_result in meta_data.items():
res_key = ContestMeta.from_bson(get_code_store().fetch_code_block(uid)["contestMeta"]).make_key()
py_seen.add(res_key)
rank_meta = data_result["rank_meta"]
best_rank = rank_meta["best_rank"]
if best_rank == 1:
java_seen.add(res_key)
if best_rank <= 1:
# Predicted rank as 1
tp += 1
elif best_rank <= 10:
# Predicted rank in top 10
fp += 1
elif best_rank > 10:
# Not predicted in top 10 hence failed to rank
fn += 1
best_ranks.append(rank_meta["best_rank"])
matches_in_top_10 = rank_meta["matches_in_top_10"]
if matches_in_top_10 > 0:
n_top_10 += 1
all_matches_in_top_10.append(matches_in_top_10)
prec = tp / (tp + fp)
print("Precision : %.4f" % prec)
## Recall
java_cbs = get_code_store().get_code_blocks(language=properties.LANGUAGE_JAVA)
java_problem_counts = get_problem_counts(java_cbs)
r_total = 0
r_fn = 0
for key in py_seen:
r_total += java_problem_counts[key]
for key in py_seen - java_seen:
r_fn += java_problem_counts[key]
rec = (r_total - r_fn) / r_total
print("Recall : %.4f" % rec)
f1 = 2 * prec * rec / (prec + rec)
print("F1 : %.4f" % f1)
# print("## N top 10 ratio: %d/%d " % (n_top_10, len(meta_data)))
# print("### Best Rank Summary:")
# best_rank_stats = stat.Stat(best_ranks)
# print(best_rank_stats.report())
# print("### All Matches in top-10 Summary:")
# all_matches_in_top_10_stats = stat.Stat(all_matches_in_top_10)
# print(all_matches_in_top_10_stats.report())
def analyze_search_pkl(file_prefix):
print("# Processing %s ... " % file_prefix)
def ranks_less_than_k(ranks, k):
cnt = 0
for r in ranks:
if r <= k:
cnt += 1
else:
break
return cnt
meta_data = cache.load_pickle(get_meta_file(file_prefix))
n = len(meta_data)
rr = []
p_1, p_3, p_5, p_10 = [], [], [], []
r_1, r_3, r_5, r_10 = [], [], [], []
for uid, data_result in meta_data.items():
rank_meta = data_result["rank_meta"]
best_rank = rank_meta["best_rank"]
matches = sorted(rank_meta["matches"])
if matches:
rr.append(1/matches[0])
else:
rr.append(0)
r_1.append(1 if matches and matches[0] <= 1 else 0)
r_3.append(1 if matches and matches[0] <= 3 else 0)
r_5.append(1 if matches and matches[0] <= 5 else 0)
r_10.append(1 if matches and matches[0] <= 10 else 0)
p_1.append(ranks_less_than_k(matches, 1) / 1)
p_3.append(ranks_less_than_k(matches, 3) / 3)
p_5.append(ranks_less_than_k(matches, 5) / 5)
p_10.append(ranks_less_than_k(matches, 10) / 10)
print("```")
print("MRR : %0.4f\n" % (sum(rr) / n))
print("Precision@1 : %0.4f" % (sum(p_1) / n))
print("Precision@3 : %0.4f" % (sum(p_3) / n))
print("Precision@5 : %0.4f" % (sum(p_5) / n))
print("Precision@10: %0.4f\n" % (sum(p_10) / n))
print("Recall@1 : %0.4f" % (sum(r_1) / n))
print("Recall@3 : %0.4f" | |
in range(21, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h_relu1 = self.slice1(X)
h_relu2 = self.slice2(h_relu1)
h_relu3 = self.slice3(h_relu2)
h_relu4 = self.slice4(h_relu3)
h_relu5 = self.slice5(h_relu4)
out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
return out
class VGGLoss(nn.Module):
def __init__(self):
super(VGGLoss, self).__init__()
self.vgg = VGG19()
self.gram = Gram()
if torch.cuda.is_available():
self.vgg.cuda()
self.gram.cuda()
self.vgg.eval()
set_requires_grad(self.vgg, False)
self.L1Loss = nn.L1Loss()
self.criterion2 = nn.MSELoss()
self.weights = [1.0 / 32, 1.0 / 16, 1.0 / 8, 1.0 , 1.0]
def forward(self, x, y):
# x_vgg, y_vgg = self.vgg(x), self.vgg(y)
contentloss = 0
styleloss = 0
x_vgg = self.vgg(x)
with torch.no_grad():
y_vgg = self.vgg(y)
for i in range(len(x_vgg)):
styleloss += self.weights[i] * self.criterion2(self.gram(x_vgg[i]), self.gram(y_vgg[i].detach()))
contentloss = self.L1Loss(x_vgg[3], y_vgg[3].detach())
allloss = self.L1Loss(x,y) + contentloss + 100 * styleloss
return allloss
# Defines the generator that consists of Resnet blocks between a few
# downsampling/upsampling operations.
# Code and idea originally from <NAME>'s architecture.
# https://github.com/jcjohnson/fast-neural-style/
class ResnetGenerator(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from <NAME>'s neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False, no_antialias_up=False, opt=None):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.opt = opt
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
if(no_antialias):
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
else:
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=1, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True),
Downsample(ngf * mult * 2)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
if no_antialias_up:
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
else:
model += [Upsample(ngf * mult),
nn.Conv2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=1,
padding=1, # output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input, layers=[], encode_only=False):
if -1 in layers:
layers.append(len(self.model))
if len(layers) > 0:
feat = input
feats = []
for layer_id, layer in enumerate(self.model):
# print(layer_id, layer)
feat = layer(feat)
if layer_id in layers:
# print("%d: adding the output of %s %d" % (layer_id, layer.__class__.__name__, feat.size(1)))
feats.append(feat)
else:
# print("%d: skipping %s %d" % (layer_id, layer.__class__.__name__, feat.size(1)))
pass
if layer_id == layers[-1] and encode_only:
# print('encoder only return features')
return feats # return intermediate features alone; stop in the last layers
return feat, feats # return both output and intermediate features
else:
"""Standard forward"""
fake = self.model(input)
return fake
class ResnetDecoder(nn.Module):
"""Resnet-based decoder that consists of a few Resnet blocks + a few upsampling operations.
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False):
"""Construct a Resnet-based decoder
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetDecoder, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = []
n_downsampling = 2
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
if(no_antialias):
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
else:
model += [Upsample(ngf * mult),
nn.Conv2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=1,
padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetEncoder(nn.Module):
"""Resnet-based encoder that consists of a few downsampling + several Resnet blocks
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False):
"""Construct a Resnet-based encoder
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetEncoder, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
if(no_antialias):
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
else:
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=1, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True),
Downsample(ngf * mult * 2)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class MobileResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
super(MobileResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
conv_block = []
p | |
target, source, env):
"""
In python 3, and in some of our tests, sys.stdout is
a String io object, and it takes unicode strings only
This code assumes s is a regular string.
"""
sys.stdout.write(s + "\n")
def __call__(self, target, source, env,
exitstatfunc=_null,
presub=_null,
show=_null,
execute=_null,
chdir=_null,
executor=None):
if not is_List(target):
target = [target]
if not is_List(source):
source = [source]
if presub is _null:
presub = self.presub
if presub is _null:
presub = print_actions_presub
if exitstatfunc is _null: exitstatfunc = self.exitstatfunc
if show is _null: show = print_actions
if execute is _null: execute = execute_actions
if chdir is _null: chdir = self.chdir
save_cwd = None
if chdir:
save_cwd = os.getcwd()
try:
chdir = str(chdir.get_abspath())
except AttributeError:
if not is_String(chdir):
if executor:
chdir = str(executor.batches[0].targets[0].dir)
else:
chdir = str(target[0].dir)
if presub:
if executor:
target = executor.get_all_targets()
source = executor.get_all_sources()
t = ' and '.join(map(str, target))
l = '\n '.join(self.presub_lines(env))
out = "Building %s with action:\n %s\n" % (t, l)
sys.stdout.write(out)
cmd = None
if show and self.strfunction:
if executor:
target = executor.get_all_targets()
source = executor.get_all_sources()
try:
cmd = self.strfunction(target, source, env, executor)
except TypeError:
cmd = self.strfunction(target, source, env)
if cmd:
if chdir:
cmd = ('os.chdir(%s)\n' % repr(chdir)) + cmd
try:
get = env.get
except AttributeError:
print_func = self.print_cmd_line
else:
print_func = get('PRINT_CMD_LINE_FUNC')
if not print_func:
print_func = self.print_cmd_line
print_func(cmd, target, source, env)
stat = 0
if execute:
if chdir:
os.chdir(chdir)
try:
stat = self.execute(target, source, env, executor=executor)
if isinstance(stat, SCons.Errors.BuildError):
s = exitstatfunc(stat.status)
if s:
stat.status = s
else:
stat = s
else:
stat = exitstatfunc(stat)
finally:
if save_cwd:
os.chdir(save_cwd)
if cmd and save_cwd:
print_func('os.chdir(%s)' % repr(save_cwd), target, source, env)
return stat
def _string_from_cmd_list(cmd_list):
"""Takes a list of command line arguments and returns a pretty
representation for printing."""
cl = []
for arg in map(str, cmd_list):
if ' ' in arg or '\t' in arg:
arg = '"' + arg + '"'
cl.append(arg)
return ' '.join(cl)
default_ENV = None
def get_default_ENV(env):
"""
A fiddlin' little function that has an 'import SCons.Environment' which
can't be moved to the top level without creating an import loop. Since
this import creates a local variable named 'SCons', it blocks access to
the global variable, so we move it here to prevent complaints about local
variables being used uninitialized.
"""
global default_ENV
try:
return env['ENV']
except KeyError:
if not default_ENV:
import SCons.Environment
# This is a hideously expensive way to get a default shell
# environment. What it really should do is run the platform
# setup to get the default ENV. Fortunately, it's incredibly
# rare for an Environment not to have a shell environment, so
# we're not going to worry about it overmuch.
default_ENV = SCons.Environment.Environment()['ENV']
return default_ENV
def _subproc(scons_env, cmd, error = 'ignore', **kw):
"""Do common setup for a subprocess.Popen() call
This function is still in draft mode. We're going to need something like
it in the long run as more and more places use subprocess, but I'm sure
it'll have to be tweaked to get the full desired functionality.
one special arg (so far?), 'error', to tell what to do with exceptions.
"""
# allow std{in,out,err} to be "'devnull'". This is like
# subprocess.DEVNULL, which does not exist for Py2. Use the
# subprocess one if possible.
# Clean this up when Py2 support is dropped
try:
from subprocess import DEVNULL
except ImportError:
DEVNULL = None
for stream in 'stdin', 'stdout', 'stderr':
io = kw.get(stream)
if is_String(io) and io == 'devnull':
if DEVNULL:
kw[stream] = DEVNULL
else:
kw[stream] = open(os.devnull, "r+")
# Figure out what shell environment to use
ENV = kw.get('env', None)
if ENV is None: ENV = get_default_ENV(scons_env)
# Ensure that the ENV values are all strings:
new_env = {}
for key, value in ENV.items():
if is_List(value):
# If the value is a list, then we assume it is a path list,
# because that's a pretty common list-like value to stick
# in an environment variable:
value = SCons.Util.flatten_sequence(value)
new_env[key] = os.pathsep.join(map(str, value))
else:
# It's either a string or something else. If it's a string,
# we still want to call str() because it might be a *Unicode*
# string, which makes subprocess.Popen() gag. If it isn't a
# string or a list, then we just coerce it to a string, which
# is the proper way to handle Dir and File instances and will
# produce something reasonable for just about everything else:
new_env[key] = str(value)
kw['env'] = new_env
try:
pobj = subprocess.Popen(cmd, **kw)
except EnvironmentError as e:
if error == 'raise': raise
# return a dummy Popen instance that only returns error
class dummyPopen(object):
def __init__(self, e): self.exception = e
def communicate(self, input=None): return ('', '')
def wait(self): return -self.exception.errno
stdin = None
class f(object):
def read(self): return ''
def readline(self): return ''
def __iter__(self): return iter(())
stdout = stderr = f()
pobj = dummyPopen(e)
finally:
# clean up open file handles stored in parent's kw
for k, v in kw.items():
if inspect.ismethod(getattr(v, 'close', None)):
v.close()
return pobj
class CommandAction(_ActionAction):
"""Class for command-execution actions."""
def __init__(self, cmd, **kw):
# Cmd can actually be a list or a single item; if it's a
# single item it should be the command string to execute; if a
# list then it should be the words of the command string to
# execute. Only a single command should be executed by this
# object; lists of commands should be handled by embedding
# these objects in a ListAction object (which the Action()
# factory above does). cmd will be passed to
# Environment.subst_list() for substituting environment
# variables.
if SCons.Debug.track_instances: logInstanceCreation(self, 'Action.CommandAction')
_ActionAction.__init__(self, **kw)
if is_List(cmd):
if [c for c in cmd if is_List(c)]:
raise TypeError("CommandAction should be given only "
"a single command")
self.cmd_list = cmd
def __str__(self):
if is_List(self.cmd_list):
return ' '.join(map(str, self.cmd_list))
return str(self.cmd_list)
def process(self, target, source, env, executor=None):
if executor:
result = env.subst_list(self.cmd_list, 0, executor=executor)
else:
result = env.subst_list(self.cmd_list, 0, target, source)
silent = None
ignore = None
while True:
try: c = result[0][0][0]
except IndexError: c = None
if c == '@': silent = 1
elif c == '-': ignore = 1
else: break
result[0][0] = result[0][0][1:]
try:
if not result[0][0]:
result[0] = result[0][1:]
except IndexError:
pass
return result, ignore, silent
def strfunction(self, target, source, env, executor=None):
if self.cmdstr is None:
return None
if self.cmdstr is not _null:
from SCons.Subst import SUBST_RAW
if executor:
c = env.subst(self.cmdstr, SUBST_RAW, executor=executor)
else:
c = env.subst(self.cmdstr, SUBST_RAW, target, source)
if c:
return c
cmd_list, ignore, silent = self.process(target, source, env, executor)
if silent:
return ''
return _string_from_cmd_list(cmd_list[0])
def execute(self, target, source, env, executor=None):
"""Execute a command action.
This will handle lists of commands as well as individual commands,
because construction variable substitution may turn a single
"command" into a list. This means that this class can actually
handle lists of commands, even though that's not how we use it
externally.
"""
escape_list = SCons.Subst.escape_list
flatten_sequence = SCons.Util.flatten_sequence
try:
shell = env['SHELL']
except KeyError:
raise SCons.Errors.UserError('Missing SHELL construction variable.')
try:
spawn = env['SPAWN']
except KeyError:
raise SCons.Errors.UserError('Missing SPAWN construction variable.')
else:
if is_String(spawn):
spawn = env.subst(spawn, raw=1, conv=lambda x: x)
escape = env.get('ESCAPE', lambda x: x)
ENV = get_default_ENV(env)
# Ensure that the ENV values are all strings:
for key, value in ENV.items():
if not is_String(value):
if is_List(value):
# If the value is a list, then we assume it is a
# path list, because that's a pretty common list-like
# value to stick in an environment variable:
value = flatten_sequence(value)
ENV[key] = os.pathsep.join(map(str, value))
else:
# If it isn't a string or a list, then we just coerce
# it to a string, which is the proper way to handle
# Dir and File instances and will produce something
# reasonable for just about everything else:
ENV[key] = str(value)
| |
"""Python3 Steven
common proability distrubution
"""
import os
import numpy as np
import matplotlib.pyplot as plt
# from sympy import integrate
# import math
# from scipy.special import gamma,beta,factorial
# from permutationAndCom import permut,combinat
from distributions import *
from plotCommon import plotSub, scatterSub
#'''''''''''''''''''''''''''''''''''start plot distribution''''''''''''''''''''#
#---------------------------Discrete distribution------------------------------#
imgSavePath=r'..\images\\'
def testDiscrete_uniform_distribution(i=0):
N = 4
x = np.arange(0,N)
ax = plt.subplot(1,1,1)
plt.title('Discrete_uniform_distribution')
scatterSub(x, Discrete_uniform_distribution(x,N=N), ax,label='N=4')
N = 6
x = np.arange(0,N)
scatterSub(x, Discrete_uniform_distribution(x,N=N), ax,label='N=6')
N = 8
x = np.arange(0,N)
scatterSub(x, Discrete_uniform_distribution(x,N=N), ax,label='N=8')
#plt.xlim(0, 6)
plt.ylim(0, 0.35)
plt.savefig(imgSavePath + f'dsitribution{i}.png')
plt.show()
def testBinomial_distribution(i):
ax = plt.subplot(1,1,1)
plt.title('Binomial_distribution')
N = 15
x = np.arange(N)
plotSub(x, Binomial_distribution(N=N,p=0.2), ax, marker='.',label='N=15,p=0.2')
plotSub(x, Binomial_distribution(N=N,p=0.6), ax,marker='.',label='N=15,p=0.6')
N = 20
x = np.arange(N)
plotSub(x, Binomial_distribution(N=N,p=0.6), ax,marker='.',label='N=20,p=0.6')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testPoisson_distribution(i):
ax = plt.subplot(1,1,1)
plt.title('Poisson_distribution')
N=20
x = np.arange(N)
plotSub(x, Poisson_distribution(N=N), ax,marker='.',label='N=20,lam=1')
#scatterSub(x, Poisson_distribution(N=N), ax,label='N=20,lam=1')
plotSub(x, Poisson_distribution(N=N,lam=2), ax,marker='.',label='N=20,lam=2')
#scatterSub(x, Poisson_distribution(N=N,lam=2), ax,label='N=20,lam=2')
plotSub(x, Poisson_distribution(N=N,lam=4), ax,marker='.',label='N=20,lam=4')
#scatterSub(x, Poisson_distribution(N=N,lam=4), ax,label='N=20,lam=4')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testGeometric_distribution(i):
ax = plt.subplot(1,1,1)
plt.title('Geometric_distribution')
N = 15
x = np.arange(N)
plotSub(x, Geometric_distribution(N=N,p=0.2), ax,marker='.',label='N=20,p=0.2')
plotSub(x, Geometric_distribution(N=N,p=0.5), ax,marker='.',label='N=20,p=0.6')
plotSub(x, Geometric_distribution(N=N,p=0.8), ax,marker='.',label='N=25,p=0.6')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testHypergeometric_distribution(i):
ax = plt.subplot(1,1,1)
plt.title('Hypergeometric_distribution')
n = 100
x = np.arange(n)
plotSub(x, Hypergeometric_distribution(N=500,K=50,n=n), ax,marker='.',label='N=500,K=50,n=100')
n = 200
x = np.arange(n)
plotSub(x, Hypergeometric_distribution(N=500,K=60,n=n), ax,marker='.',label='N=500,K=60,n=200')
n = 300
x = np.arange(n)
plotSub(x, Hypergeometric_distribution(N=500,K=70,n=n), ax,marker='.',label='N=500,K=70,n=300')
plt.xlim(0, 65)
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testZipfsLaw(i):
ax = plt.subplot(1,1,1)
plt.title('ZipfsLaw')
N=10
x = np.arange(N)
plotSub(x, ZipfsLaw(), ax,marker='.',label='N=10, s=1')
plotSub(x, ZipfsLaw(N=10, s=2), ax,marker='.',label='N=10, s=2')
plotSub(x, ZipfsLaw(N=10, s=3), ax,marker='.',label='N=10, s=3')
plotSub(x, ZipfsLaw(N=10, s=4), ax,marker='.',label='N=10, s=4')
#N=20
#plotSub(x, ZipfsLaw(N=20, s=2), ax,label='N=20, s=2')
plt.yscale("log")
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testBeta_binomial_distribution(i):
ax = plt.subplot(1,1,1)
plt.title('Beta_binomial_distribution')
N=20
x = np.arange(N)
plotSub(x, Beta_binomial_distribution(N=N), ax,marker='.',label='alpha=0.2,bta=0.25')
plotSub(x, Beta_binomial_distribution(N=N,alpha=0.7,bta=2), ax,marker='.',label='alpha=0.7,bta=2')
plotSub(x, Beta_binomial_distribution(N=N,alpha=2,bta=2), ax,marker='.',label='alpha=2,bta=2')
plotSub(x, Beta_binomial_distribution(N=N,alpha=600,bta=400), ax,marker='.',label='alpha=600,bta=400')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testLogarithmic_distribution(i):
ax = plt.subplot(1,1,1)
plt.title('Logarithmic_distribution')
N=10
x = np.arange(N)
plotSub(x, Logarithmic_distribution(N=N), ax,marker='.',label='p=0.33')
plotSub(x, Logarithmic_distribution(N=N,p=0.66), ax,marker='.',label='p=0.66')
plotSub(x, Logarithmic_distribution(N=N,p=0.99), ax,marker='.',label='p=0.99')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testConway_Maxwell_Poisson_distribution(i):
ax = plt.subplot(1,1,1)
plt.title('Conway_Maxwell_Poisson_distribution')
N=20
x = np.arange(N)
plotSub(x, Conway_Maxwell_Poisson_distribution(N=N), ax,marker='.',label='lam=1, v=1.5')
plotSub(x, Conway_Maxwell_Poisson_distribution(N=N,lam=3, v=1.1), ax,marker='.',label='lam=3, v=1.1')
plotSub(x, Conway_Maxwell_Poisson_distribution(N=N,lam=5, v=0.7), ax,marker='.',label='lam=5, v=0.7')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testSkellam_distribution(i):
ax = plt.subplot(1,1,1)
plt.title('Skellam_distribution')
N=16
x = np.arange(N+1)-N/2
plotSub(x, Skellam_distribution(N=N), ax,marker='.',label='u1=1,u2=1')
plotSub(x, Skellam_distribution(N=N,u1=2,u2=2), ax,marker='.',label='u1=2,u2=2')
plotSub(x, Skellam_distribution(N=N,u1=3,u2=3), ax,marker='.',label='u1=3,u2=3')
plotSub(x, Skellam_distribution(N=N,u1=1,u2=3), ax,marker='.',label='u1=1,u2=3')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testYule_Simon_distribution(i):
ax = plt.subplot(1,1,1)
plt.title('Yule_Simon_distribution')
N=20
x = np.arange(N)+1
plotSub(x, Yule_Simon_distribution(N=N), ax,marker='.',label='ru=0.25')
plotSub(x, Yule_Simon_distribution(N=N,ru=0.5), ax,marker='.',label='ru=0.5')
plotSub(x, Yule_Simon_distribution(N=N,ru=1), ax,marker='.',label='ru=1')
plotSub(x, Yule_Simon_distribution(N=N,ru=2), ax,marker='.',label='ru=2')
plotSub(x, Yule_Simon_distribution(N=N,ru=4), ax,marker='.',label='ru=4')
plt.yscale("log")
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testZeta_distribution(i):
ax = plt.subplot(1,1,1)
plt.title('Zeta_distribution')
N=16
x = np.arange(N)+1
plotSub(x, Zeta_distribution(N=N), ax,marker='.',label='s=2')
plotSub(x, Zeta_distribution(N=N,s=3), ax,marker='.',label='s=3')
plotSub(x, Zeta_distribution(N=N,s=4), ax,marker='.',label='s=4')
plotSub(x, Zeta_distribution(N=N,s=5), ax,marker='.',label='s=5')
plt.yscale("log")
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
#---------------------------Continuous distribution------------------------------#
def testUniform_distribution(i):
x = np.linspace(1.0, 3.0, 10)
ax = plt.subplot(1,1,1)
plt.title('Uniform_distribution')
plotSub(x, Uniform_distribution(len(x)), ax,label='a=1,b=3')
x = np.linspace(2.0, 3.0, 10)
plotSub(x, Uniform_distribution(len(x),a=2,b=3), ax,label='a=2,b=3')
x = np.linspace(1.0, 4.0, 10)
plotSub(x, Uniform_distribution(len(x),a=1,b=4), ax,label='a=1,b=4')
#plt.xlim(0, 4)
plt.ylim(0, 1.2)
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testNormalDistribution(i):
x = np.linspace(-5.0, 5.0, 30)
ax = plt.subplot(1,1,1)
plt.title('Normal Distribution')
plotSub(x, NormalDistribution(x), ax,label='u=0,delta=1')
plotSub(x, NormalDistribution(x,u=-1,delta=0.5), ax,label='u=-1,delta=0.5')
plotSub(x, NormalDistribution(x,u=1,delta=2), ax,label='u=1,delta=2')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testCauchy(i):
x = np.linspace(-5.0, 5.0, 100)
ax = plt.subplot(1,1,1)
plt.title('Cauchy Distribution')
#plotSub(x, Cauchy_pdf(x), ax,label='Cauchy')
plotSub(x, Cauchy_pdf(x,x0=0,scaler=0.75), ax,label='x0=0,scaler=1')
plotSub(x, Cauchy_pdf(x,x0=0,scaler=1), ax,label='x0=0,scaler=1')
#plotSub(x, NormalDistribution(x), ax,label='Normal')
plotSub(x, Cauchy_pdf(x,x0=0,scaler=2), ax,label='x0=0,scaler=2')
plotSub(x, Cauchy_pdf(x,x0=-2,scaler=1), ax,label='x0=-2,scaler=1')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testLaplace_distribution(i):
x = np.linspace(-15.0, 15.0, 100)
ax = plt.subplot(1,1,1)
plt.title('Laplace_distribution')
plotSub(x, Laplace_distribution(x), ax,label='u=0,b=1')
plotSub(x, Laplace_distribution(x,u=0,b=2), ax,label='u=0,b=2')
plotSub(x, Laplace_distribution(x,u=0,b=4), ax,label='u=0,b=4')
plotSub(x, Laplace_distribution(x,u=-5,b=4), ax,label='u=5,b=4')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testLogistic_distribution(i):
x = np.linspace(-5.0, 20.0, 100)
ax = plt.subplot(1,1,1)
plt.title('Logistic_distribution')
plotSub(x, Logistic_distribution(x), ax,label='u=5,s=1')
plotSub(x, Logistic_distribution(x,u=5,s=2), ax,label='u=5,s=2')
plotSub(x, Logistic_distribution(x,u=9,s=3), ax,label='u=9,s=3')
plotSub(x, Logistic_distribution(x,u=9,s=4), ax,label='u=9,s=4')
plotSub(x, Logistic_distribution(x,u=6,s=2), ax,label='u=6,s=2')
plotSub(x, Logistic_distribution(x,u=2,s=1), ax,label='u=2,s=1')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testLog_normal_distribution(i):
x = np.linspace(0, 3.0, 100)
ax = plt.subplot(1,1,1)
plt.title('Log_normal_distribution')
plotSub(x, Log_normal_distribution(x), ax,label='delta=1.0')
plotSub(x, Log_normal_distribution(x,delta=0.25), ax,label='delta=0.25')
plotSub(x, Log_normal_distribution(x,delta=0.5), ax,label='delta=0.5')
#plotSub(x, Log_normal_distribution(x,delta=1.25), ax,label='delta=1.25')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testWeibull_distribution(i):
x = np.linspace(0, 2.5, 100)
ax = plt.subplot(1,1,1)
plt.title('Weibull_distribution')
plotSub(x, Weibull_distribution(x,lamda=1,k=0.5), ax,label='lamda=1,k=0.5')
plotSub(x, Weibull_distribution(x,lamda=1,k=1), ax,label='lamda=1,k=1')
plotSub(x, Weibull_distribution(x,lamda=1,k=1.5), ax,label='lamda=1,k=1.5')
plotSub(x, Weibull_distribution(x,lamda=1,k=5), ax,label='lamda=1,k=5')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testPareto_distribution(i):
x = np.linspace(0, 5, 100)
ax = plt.subplot(1,1,1)
plt.title('Pareto_distribution')
plotSub(x, Pareto_distribution(x,alpha=1), ax,label='alpha=1')
plotSub(x, Pareto_distribution(x,alpha=2), ax,label='alpha=2')
plotSub(x, Pareto_distribution(x,alpha=3), ax,label='alpha=3')
plotSub(x, Pareto_distribution(x,alpha=1,Xm=2), ax,label='alpha=1,Xm=2')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testRayleigh_distribution(i):
x = np.linspace(0, 12, 100)
ax = plt.subplot(1,1,1)
plt.title('Rayleigh_distribution')
plotSub(x, Rayleigh_distribution(x,delta=1), ax,label='delta=1')
plotSub(x, Rayleigh_distribution(x,delta=2), ax,label='delta=2')
plotSub(x, Rayleigh_distribution(x,delta=3), ax,label='delta=3')
plotSub(x, Rayleigh_distribution(x,delta=4), ax,label='delta=4')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testGamma_distribution(i):
x = np.linspace(0, 20, 100)
ax = plt.subplot(1,1,1)
plt.title('Gamma_distribution')
plotSub(x, Gamma_distribution(x,k=2.0,theta=2.0), ax,label='k=2.0,theta=2.0')
plotSub(x, Gamma_distribution(x,k=3.0,theta=2.0), ax,label='k=3.0,theta=2.0')
plotSub(x, Gamma_distribution(x,k=5.0,theta=1.0), ax,label='5.0,theta=1.0')
plotSub(x, Gamma_distribution(x,k=9.0,theta=0.5), ax,label='9.0,theta=0.5')
plotSub(x, Gamma_distribution(x,k=7.5,theta=1.0), ax,label='k=7.5,theta=1.0')
plotSub(x, Gamma_distribution(x,k=0.5,theta=1.0), ax,label='k=0.5,theta=1.0')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testStudentT_distribution(i): #float("inf") +00 , float("-inf") -00
x = np.linspace(-5, 5, 100)
ax = plt.subplot(1,1,1)
plt.title('StudentT_distribution')
plotSub(x, StudentT_distribution(x), ax,label='v=1')
plotSub(x, StudentT_distribution(x,v=2), ax,label='v=2')
plotSub(x, StudentT_distribution(x,v=5), ax,label='v=5')
plotSub(x, StudentT_distribution(x,v=float('inf')), ax,label='v=+00')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testBeta_distribution(i):
x = np.linspace(0, 1, 100)
ax = plt.subplot(1,1,1)
plt.title('Beta_distribution')
plotSub(x, Beta_distribution(x), ax,label='alpha=0.5,bta=0.5')
plotSub(x, Beta_distribution(x,alpha=5,bta=1), ax,label='alpha=5,bta=1')
plotSub(x, Beta_distribution(x,alpha=1,bta=3), ax,label='alpha=1,bta=3')
plotSub(x, Beta_distribution(x,alpha=2,bta=2), ax,label='alpha=2,bta=2')
plotSub(x, Beta_distribution(x,alpha=2,bta=5), ax,label='alpha=2,bta=5')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testGeneralized_logistic_distribution(i):
x = np.linspace(-25.0, 8.0, 200)
ax = plt.subplot(1,1,1)
plt.title('Generalized_logistic_distribution')
plotSub(x, Generalized_logistic_distribution(x), ax,label='alpha=1.0')
plotSub(x, Generalized_logistic_distribution(x,alpha=0.5), ax,label='alpha=0.5')
plotSub(x, Generalized_logistic_distribution(x,alpha=0.2), ax,label='alpha=0.2')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testGumbel_distribution(i):
x = np.linspace(-5.0, 20.0, 100)
ax = plt.subplot(1,1,1)
plt.title('Gumbel_distribution')
plotSub(x, Gumbel_distribution(x), ax,label='u=0.5,belta=2.0')
plotSub(x, Gumbel_distribution(x,u=1.0,belta=2.0), ax,label='u=0.5,belta=2.0')
plotSub(x, Gumbel_distribution(x,u=1.5,belta=3.0),ax,label='u=1.5,belta=3.0')
plotSub(x, Gumbel_distribution(x,u=3.0,belta=4.0),ax,label='u=3.0,belta=4.0')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testChi_distribution(i):
x = np.linspace(0, 4.0, 100)
ax = plt.subplot(1,1,1)
plt.title('Chi_distribution')
plotSub(x, Chi_distribution(x), ax,label='k=1')
plotSub(x, Chi_distribution(x,k=2), ax,label='k=2')
plotSub(x, Chi_distribution(x,k=3),ax,label='k=3')
plotSub(x, Chi_distribution(x,k=4),ax,label='k=4')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testErlang_distribution(i):
x = np.linspace(0, 20.0, 100)
ax = plt.subplot(1,1,1)
plt.title('Erlang_distribution')
plotSub(x, Erlang_distribution(x,k=1,u=2.0), ax,label='k=1,u=2.0')
plotSub(x, Erlang_distribution(x,k=2,u=2.0),ax,label='k=2,u=2.0')
plotSub(x, Erlang_distribution(x,k=3,u=2.0),ax,label='k=3,u=2.0')
plotSub(x, Erlang_distribution(x,k=5,u=1.0), ax,label='k=5,u=1.0')
plotSub(x, Erlang_distribution(x,k=7,u=0.5),ax,label='k=7,u=0.5')
plotSub(x, Erlang_distribution(x,k=9,u=1.0),ax,label='k=9,u=1.0')
plotSub(x, Erlang_distribution(x,k=1,u=1.0),ax,label='k=1,u=1.0')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testExponential_distribution(i):
x = np.linspace(0, 4.0, 100)
ax = plt.subplot(1,1,1)
plt.title('Exponential_distribution')
plotSub(x, Exponential_distribution(x), ax,label='lam=1')
plotSub(x, Exponential_distribution(x,lam=0.5), ax,label='lam=0.5')
plotSub(x, Exponential_distribution(x,lam=1.5), ax,label='lam=1.5')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testBoltzmann_distribution(i):
x = np.linspace(0, 20.0, 100)
ax = plt.subplot(1,1,1)
plt.title('Boltzmann_distribution')
plotSub(x, Boltzmann_distribution(x), ax,label='a=1')
plotSub(x, Boltzmann_distribution(x,a=2), ax,label='a=2')
plotSub(x, Boltzmann_distribution(x,a=5), ax,label='a=5')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testVon_Mises_distribution(i):
x = np.linspace(-np.pi, np.pi, 100)
ax = plt.subplot(1,1,1)
plt.title('Von_Mises_distribution')
plotSub(x, Von_Mises_distribution(x), ax,label='u=0,k=0')
plotSub(x, Von_Mises_distribution(x,u=0,k=0.5), ax,label='u=0,k=0.5')
plotSub(x, Von_Mises_distribution(x,u=0,k=1), ax,label='u=0,k=1')
plotSub(x, Von_Mises_distribution(x,u=0,k=2), ax,label='u=0,k=2')
plotSub(x, Von_Mises_distribution(x,u=0,k=4), ax,label='u=0,k=4')
#plotSub(x, Von_Mises_distribution(x,u=1,k=8), ax,label='u=1,k=8')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
ax.xaxis.set_major_locator(plt.MultipleLocator(np.pi ))
ax.xaxis.set_minor_locator(plt.MultipleLocator(np.pi / 2))
ax.xaxis.set_major_formatter(plt.FuncFormatter(plt.FormatStrFormatter(r'%d $\pi$')))
plt.show()
def testLogit_normal_distribution(i):
x = np.linspace(0, 1.0, 100)
ax = plt.subplot(1,1,1)
plt.title('Logit_normal_distribution')
plotSub(x, Logit_normal_distribution(x), ax,label='deta=0.2,u=0')
plotSub(x, Logit_normal_distribution(x,deta=0.3,u=0), ax,label='deta=0.3,u=0')
plotSub(x, Logit_normal_distribution(x,deta=0.5,u=0), ax,label='deta=0.5,u=0')
plotSub(x, Logit_normal_distribution(x,deta=1.0,u=0), ax,label='deta=1.0,u=0')
plotSub(x, Logit_normal_distribution(x,deta=1.5,u=0), ax,label='deta=1.5,u=0')
plotSub(x, Logit_normal_distribution(x,deta=3.0,u=0), ax,label='deta=3.0,u=0')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testIrwin_Hall_distribution(i):
total=100
ax = plt.subplot(1,1,1)
plt.title('Irwin_Hall_distribution')
n=1
x = np.linspace(0, n, total)
plotSub(x, Irwin_Hall_distribution(x), ax,label='n=1')
n=2
x = np.linspace(0, n, total)
#print('x=',x)
#print('res=',Irwin_Hall_distribution(x,n=2))
plotSub(x, Irwin_Hall_distribution(x,n=2), ax,label='n=2')
n=4
x = np.linspace(0, n, total)
plotSub(x, Irwin_Hall_distribution(x,n=4), ax,label='n=4')
n=8
x = np.linspace(0, n, total)
plotSub(x, Irwin_Hall_distribution(x,n=8), ax,label='n=8')
n=16
x = np.linspace(0, n, total)
plotSub(x, Irwin_Hall_distribution(x,n=16), ax,label='n=16')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testBates_distribution(i):
ax = plt.subplot(1,1,1)
plt.title('Bates_distribution')
x = np.linspace(0, 1, 100)
plotSub(x, Bates_distribution(x), ax,label='n=1')
plotSub(x, Bates_distribution(x,n=2), ax,label='n=2')
plotSub(x, Bates_distribution(x,n=3), ax,label='n=3')
plotSub(x, Bates_distribution(x,n=10), ax,label='n=10')
plotSub(x, Bates_distribution(x,n=20), ax,label='n=20')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testKumaraswamy_distribution(i):
ax = plt.subplot(1,1,1)
plt.title('Kumaraswamy_distribution')
x = np.linspace(0, 1, 100)
plotSub(x, Kumaraswamy_distribution(x), ax,label='a=0.5,b=0.5')
plotSub(x, Kumaraswamy_distribution(x,a=5.0,b=1.0), ax,label='a=5.0,b=1.0')
plotSub(x, Kumaraswamy_distribution(x,a=1.0,b=3.0), ax,label='a=1.0,b=3.0')
plotSub(x, Kumaraswamy_distribution(x,a=2.0,b=2.0), ax,label='a=2.0,b=2.0')
plotSub(x, Kumaraswamy_distribution(x,a=2.0,b=5.0), ax,label='a=2.0,b=5.0')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testPERT_distribution(i):
ax = plt.subplot(1,1,1)
plt.title('PERT_distribution')
a=0
b=10
c=100
x = np.linspace(a, c, 100)
plotSub(x, PERT_distribution(x), ax,label='a=0,b=10,c=100')
plotSub(x, PERT_distribution(x,a=0,b=50,c=100), ax,label='a=0,b=50,c=100')
plotSub(x, PERT_distribution(x,a=0,b=70,c=100), ax,label='a=0,b=70,c=100')
plotSub(x, PERT_distribution(x,a=0,b=90,c=100), ax,label='a=0,b=90,c=100')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testReciprocal_distribution(i):
ax = plt.subplot(1,1,1)
plt.title('Reciprocal_distribution')
a=1
b=4
x = np.linspace(a, b, 100)
plotSub(x, Reciprocal_distribution(x,a=a,b=b), ax,label='a=1,b=4')
a=2
b=6
x = np.linspace(a, b, 100)
plotSub(x, Reciprocal_distribution(x,a=a,b=b), ax,label='a=2,b=6')
a=1
b=5
x = np.linspace(a, b, 100)
plotSub(x, Reciprocal_distribution(x,a=a,b=b), ax,label='a=1,b=5')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testTriangular_distribution(i):
ax = plt.subplot(1,1,1)
plt.title('Triangular_distribution')
x = np.linspace(0, 7, 200)
plotSub(x, Triangular_distribution(x), ax,label='a=1,c=3,b=4')
plotSub(x, Triangular_distribution(x,a=1,c=3,b=6), ax,label='a=1,c=3,b=6')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testTrapezoidal_distribution(i):
ax = plt.subplot(1,1,1)
plt.title('Trapezoidal_distribution')
a=-3
d=0
x = np.linspace(a, d, 100)
plotSub(x, Trapezoidal_distribution(x,a=a,b=-2,c=-1,d=d), ax,label='a=-3,b=-2,c=-1,d=0')
a=1
d=5
x = np.linspace(a, d, 100)
plotSub(x, Trapezoidal_distribution(x,a=a,b=2,c=4.5,d=d), ax,label='a=1,b=2,c=4.5,d=5')
a=-2
d=2
x = np.linspace(a, d, 100)
plotSub(x, Trapezoidal_distribution(x,a=-2,b=0,c=1,d=2), ax,label='a=-2,b=0,c=1,d=2')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testWigner_semicircle_distribution(i):
ax = plt.subplot(1,1,1)
plt.title('Wigner_semicircle_distribution')
r = 0.25
x = np.linspace(-r, r, 100)
plotSub(x, Wigner_semicircle_distribution(x,r=r), ax,label='r=0.25')
r = 0.5
x = np.linspace(-r, r, 100)
plotSub(x, Wigner_semicircle_distribution(x,r=r), ax,label='r=0.5')
r = 1.0
x = np.linspace(-r, r, 100)
plotSub(x, Wigner_semicircle_distribution(x,r=r), ax,label='r=1.0')
r = 2.0
x = np.linspace(-r, r, 100)
plotSub(x, Wigner_semicircle_distribution(x,r=r), ax,label='r=2.0')
r = 3.0
x = np.linspace(-r, r, 100)
plotSub(x, Wigner_semicircle_distribution(x,r=r), ax,label='r=3.0')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testF_distribution(i):
ax = plt.subplot(1,1,1)
plt.title('F_distribution')
x = np.linspace(0, 5, 100)
plotSub(x, F_distribution(x), ax,label='d1=1,d2=1')
plotSub(x, F_distribution(x,d1=2,d2=1), ax,label='d1=2,d2=1')
plotSub(x, F_distribution(x,d1=5,d2=2), ax,label='d1=5,d2=1')
plotSub(x, F_distribution(x,d1=10,d2=1), ax,label='d1=10,d2=1')
plotSub(x, F_distribution(x,d1=20,d2=20), ax,label='d1=20,d2=20')
plt.savefig(os.path.join(imgSavePath, f'dsitribution{i}.png'))
plt.show()
def testLandau_distribution(i):
ax = plt.subplot(1,1,1)
plt.title('Landau_distribution')
x = np.linspace(-5, 15, 200)
y = [ Landau_distribution(_, u=0, c=1) for _ in x ]
plotSub(x, y, ax,label='u=0,c=1')
y = | |
title, year, imdb, tvdb, season,
episode, tvshowtitle, localtvshowtitle, aliases, premiered, i[0], i[1]))
s = [i[0] + (i[1],) for i in zip(sourceDict, threads)]
s = [(i[3].getName(), i[0], i[2]) for i in s]
mainsourceDict = [i[0] for i in s if i[2] == 0]
sourcelabelDict = dict([(i[0], i[1].upper()) for i in s])
[i.start() for i in threads]
string1 = control.lang(32404).encode('utf-8')
string2 = control.lang(32405).encode('utf-8')
string3 = control.lang(32406).encode('utf-8')
string4 = control.lang(32601).encode('utf-8')
string5 = control.lang(32602).encode('utf-8')
string6 = control.lang(32606).encode('utf-8')
string7 = control.lang(32607).encode('utf-8')
try:
timeout = int(control.setting('scrapers.timeout.1'))
except:
pass
quality = control.setting('hosts.quality')
if quality == '':
quality = '0'
line1 = line2 = line3 = ""
pre_emp = control.setting('preemptive.termination')
pre_emp_limit = control.setting('preemptive.limit')
source_4k = d_source_4k = 0
source_1080 = d_source_1080 = 0
source_720 = d_source_720 = 0
source_sd = d_source_sd = 0
total = d_total = 0
debrid_list = debrid.debrid_resolvers
debrid_status = debrid.status()
debrid_only = control.setting('debrid.only')
total_format = '[COLOR %s][B]%s[/B][/COLOR]'
pdiag_format = ' 4K: %s | 1080p: %s | 720p: %s | SD: %s | %s: %s'.split('|')
pdiag_bg_format = '4K:%s(%s)|1080p:%s(%s)|720p:%s(%s)|SD:%s(%s)|T:%s(%s)'.split('|')
for i in range(0, 4 * timeout):
if str(pre_emp) == 'true':
if quality in ['0','1']:
if (source_4k + d_source_4k) >= int(pre_emp_limit): break
elif quality in ['1']:
if (source_1080 + d_source_1080) >= int(pre_emp_limit): break
elif quality in ['2']:
if (source_720 + d_source_720) >= int(pre_emp_limit): break
elif quality in ['3']:
if (source_sd + d_source_sd) >= int(pre_emp_limit): break
else:
if (source_sd + d_source_sd) >= int(pre_emp_limit): break
try:
if xbmc.abortRequested is True:
return sys.exit()
try:
if progressDialog.iscanceled():
break
except:
pass
if len(self.sources) > 0:
if quality in ['0']:
source_4k = len([e for e in self.sources if e['quality'] == '4K' and e['debridonly'] is False])
source_1080 = len([e for e in self.sources if e['quality'] in ['1440p','1080p'] and e['debridonly'] is False])
source_720 = len([e for e in self.sources if e['quality'] in ['720p','HD'] and e['debridonly'] is False])
source_sd = len([e for e in self.sources if e['quality'] == 'SD' and e['debridonly'] is False])
elif quality in ['1']:
source_1080 = len([e for e in self.sources if e['quality'] in ['1440p','1080p'] and e['debridonly'] is False])
source_720 = len([e for e in self.sources if e['quality'] in ['720p','HD'] and e['debridonly'] is False])
source_sd = len([e for e in self.sources if e['quality'] == 'SD' and e['debridonly'] is False])
elif quality in ['2']:
source_1080 = len([e for e in self.sources if e['quality'] in ['1080p'] and e['debridonly'] is False])
source_720 = len([e for e in self.sources if e['quality'] in ['720p','HD'] and e['debridonly'] is False])
source_sd = len([e for e in self.sources if e['quality'] == 'SD' and e['debridonly'] is False])
elif quality in ['3']:
source_720 = len([e for e in self.sources if e['quality'] in ['720p','HD'] and e['debridonly'] is False])
source_sd = len([e for e in self.sources if e['quality'] == 'SD' and e['debridonly'] is False])
else:
source_sd = len([e for e in self.sources if e['quality'] == 'SD' and e['debridonly'] is False])
total = source_4k + source_1080 + source_720 + source_sd
if debrid_status:
if quality in ['0']:
for d in debrid_list:
d_source_4k = len([e for e in self.sources if e['quality'] == '4K' and d.valid_url(e['url'], e['source'])])
d_source_1080 = len([e for e in self.sources if e['quality'] in ['1440p','1080p'] and d.valid_url(e['url'], e['source'])])
d_source_720 = len([e for e in self.sources if e['quality'] in ['720p','HD'] and d.valid_url(e['url'], e['source'])])
d_source_sd = len([e for e in self.sources if e['quality'] == 'SD' and d.valid_url(e['url'], e['source'])])
elif quality in ['1']:
for d in debrid_list:
d_source_1080 = len([e for e in self.sources if e['quality'] in ['1440p','1080p'] and d.valid_url(e['url'], e['source'])])
d_source_720 = len([e for e in self.sources if e['quality'] in ['720p','HD'] and d.valid_url(e['url'], e['source'])])
d_source_sd = len([e for e in self.sources if e['quality'] == 'SD' and d.valid_url(e['url'], e['source'])])
elif quality in ['2']:
for d in debrid_list:
d_source_1080 = len([e for e in self.sources if e['quality'] in ['1080p'] and d.valid_url(e['url'], e['source'])])
d_source_720 = len([e for e in self.sources if e['quality'] in ['720p','HD'] and d.valid_url(e['url'], e['source'])])
d_source_sd = len([e for e in self.sources if e['quality'] == 'SD' and d.valid_url(e['url'], e['source'])])
elif quality in ['3']:
for d in debrid_list:
d_source_720 = len([e for e in self.sources if e['quality'] in ['720p','HD'] and d.valid_url(e['url'], e['source'])])
d_source_sd = len([e for e in self.sources if e['quality'] == 'SD' and d.valid_url(e['url'], e['source'])])
else:
for d in debrid_list:
d_source_sd = len([e for e in self.sources if e['quality'] == 'SD' and d.valid_url(e['url'], e['source'])])
d_total = d_source_4k + d_source_1080 + d_source_720 + d_source_sd
if debrid_status:
d_4k_label = total_format % ('red', d_source_4k) if d_source_4k == 0 else total_format % ('lime', d_source_4k)
d_1080_label = total_format % ('red', d_source_1080) if d_source_1080 == 0 else total_format % ('lime', d_source_1080)
d_720_label = total_format % ('red', d_source_720) if d_source_720 == 0 else total_format % ('lime', d_source_720)
d_sd_label = total_format % ('red', d_source_sd) if d_source_sd == 0 else total_format % ('lime', d_source_sd)
d_total_label = total_format % ('red', d_total) if d_total == 0 else total_format % ('lime', d_total)
source_4k_label = total_format % ('red', source_4k) if source_4k == 0 else total_format % ('lime', source_4k)
source_1080_label = total_format % ('red', source_1080) if source_1080 == 0 else total_format % ('lime', source_1080)
source_720_label = total_format % ('red', source_720) if source_720 == 0 else total_format % ('lime', source_720)
source_sd_label = total_format % ('red', source_sd) if source_sd == 0 else total_format % ('lime', source_sd)
source_total_label = total_format % ('red', total) if total == 0 else total_format % ('lime', total)
if (i / 2) < timeout:
try:
mainleft = [sourcelabelDict[x.getName()] for x in threads if x.is_alive() is True and x.getName() in mainsourceDict]
info = [sourcelabelDict[x.getName()] for x in threads if x.is_alive() is True]
if i >= timeout and len(mainleft) == 0 and len(self.sources) >= 100 * len(info):
break
if debrid_status:
if quality in ['0']:
if not progressDialog == control.progressDialogBG:
line1 = ('%s:' + '|'.join(pdiag_format)) % (string6, d_4k_label, d_1080_label, d_720_label, d_sd_label, str(string4), d_total_label)
line2 = ('%s:' + '|'.join(pdiag_format)) % (string7, source_4k_label, source_1080_label, source_720_label, source_sd_label, str(string4), source_total_label)
print line1, line2
else:
control.idle()
line1 = '|'.join(pdiag_bg_format[:-1]) % (source_4k_label, d_4k_label, source_1080_label, d_1080_label, source_720_label, d_720_label, source_sd_label, d_sd_label)
elif quality in ['1']:
if not progressDialog == control.progressDialogBG:
line1 = ('%s:' + '|'.join(pdiag_format[1:])) % (string6, d_1080_label, d_720_label, d_sd_label, str(string4), d_total_label)
line2 = ('%s:' + '|'.join(pdiag_format[1:])) % (string7, source_1080_label, source_720_label, source_sd_label, str(string4), source_total_label)
else:
control.idle()
line1 = '|'.join(pdiag_bg_format[1:]) % (source_1080_label, d_1080_label, source_720_label, d_720_label, source_sd_label, d_sd_label, source_total_label, d_total_label)
elif quality in ['2']:
if not progressDialog == control.progressDialogBG:
line1 = ('%s:' + '|'.join(pdiag_format[1:])) % (string6, d_1080_label, d_720_label, d_sd_label, str(string4), d_total_label)
line2 = ('%s:' + '|'.join(pdiag_format[1:])) % (string7, source_1080_label, source_720_label, source_sd_label, str(string4), source_total_label)
else:
control.idle()
line1 = '|'.join(pdiag_bg_format[1:]) % (source_1080_label, d_1080_label, source_720_label, d_720_label, source_sd_label, d_sd_label, source_total_label, d_total_label)
elif quality in ['3']:
if not progressDialog == control.progressDialogBG:
line1 = ('%s:' + '|'.join(pdiag_format[2:])) % (string6, d_720_label, d_sd_label, str(string4), d_total_label)
line2 = ('%s:' + '|'.join(pdiag_format[2:])) % (string7, source_720_label, source_sd_label, str(string4), source_total_label)
else:
control.idle()
line1 = '|'.join(pdiag_bg_format[2:]) % (source_720_label, d_720_label, source_sd_label, d_sd_label, source_total_label, d_total_label)
else:
if not progressDialog == control.progressDialogBG:
line1 = ('%s:' + '|'.join(pdiag_format[3:])) % (string6, d_sd_label, str(string4), d_total_label)
line2 = ('%s:' + '|'.join(pdiag_format[3:])) % (string7, source_sd_label, str(string4), source_total_label)
else:
control.idle()
line1 = '|'.join(pdiag_bg_format[3:]) % (source_sd_label, d_sd_label, source_total_label, d_total_label)
else:
if quality in ['0']:
line1 = '|'.join(pdiag_format) % (source_4k_label, source_1080_label, source_720_label, source_sd_label, str(string4), source_total_label)
elif quality in ['1']:
line1 = '|'.join(pdiag_format[1:]) % (source_1080_label, source_720_label, source_sd_label, str(string4), source_total_label)
elif quality in ['2']:
line1 = '|'.join(pdiag_format[1:]) % (source_1080_label, source_720_label, source_sd_label, str(string4), source_total_label)
elif quality in ['3']:
line1 = '|'.join(pdiag_format[2:]) % (source_720_label, source_sd_label, str(string4), source_total_label)
else:
line1 = '|'.join(pdiag_format[3:]) % (source_sd_label, str(string4), source_total_label)
if debrid_status:
if len(info) > 6:
line3 = string3 % (str(len(info)))
elif len(info) > 0:
line3 = string3 % (', '.join(info))
else:
break
percent = int(100 * float(i) / (2 * timeout) + 0.5)
if not progressDialog == control.progressDialogBG:
progressDialog.update(max(1, percent), line1, line2, line3)
else:
progressDialog.update(max(1, percent), line1, line3)
else:
if len(info) > 6:
line2 = string3 % (str(len(info)))
elif len(info) > 0:
line2 = string3 % (', '.join(info))
else:
break
percent = int(100 * float(i) / (2 * timeout) + 0.5)
progressDialog.update(max(1, percent), line1, line2)
except Exception as e:
log_utils.log('Exception Raised: %s' % str(e), log_utils.LOGERROR)
else:
try:
| |
of times that this couple occurs, and the state of the
couple.
Raises
------
TypeError
If any of the required arguments is missing
"""
cursor = self.db.cursor()
if state is not None:
cursor.execute(state_query, (repo_url, state))
else:
cursor.execute(query, (repo_url,))
return cursor.fetchall()
def update_repo(self, query, url, last_scan):
""" Update the last scan timestamp of a repo.
After a scan, record the timestamp of the last scan, such that
another (future) scan will not process the same commits twice.
Parameters
----------
query: str
The query to be run, with placeholders in place of parameters
url: str
The url of the repository scanned
last_scan: int
The timestamp of the last scan
Returns
-------
bool
`True` if the update is successful, `False` otherwise
"""
return self.query_check(query, last_scan, url)
def update_discovery(self, query, discovery_id, new_state):
""" Change the state of a discovery.
Parameters
----------
query: str
The query to be run, with placeholders in place of parameters
discovery_id: int
The id of the discovery to be updated
new_state: str
The new state of this discovery
Returns
-------
bool
`True` if the update is successful, `False` otherwise
"""
if new_state not in ('new', 'false_positive', 'addressing',
'not_relevant', 'fixed'):
return False
return self.query_check(query, new_state, discovery_id)
def update_discoveries(self, query, discoveries_ids, new_state):
""" Change the state of multiple discoveries.
Parameters
----------
query: str
The query to be run, with placeholders in place of parameters
discoveries_ids: list
The ids of the discoveries to be updated
new_state: str
The new state of these discoveries
Returns
-------
bool
`True` if the update is successful, `False` otherwise
"""
if new_state not in ('new', 'false_positive', 'addressing',
'not_relevant', 'fixed'):
return False
return self.query_check(query, new_state, tuple(discoveries_ids))
def update_discovery_group(self, query, new_state, repo_url, file_name=None,
snippet=None):
""" Change the state of a group of discoveries.
A group of discoveries is identified by the url of their repository,
their filename, and their snippet.
Parameters
----------
query: str
The query to be run, with placeholders in place of parameters
new_state: str
The new state of these discoveries
repo_url: str
The url of the repository
file_name: str, optional
The name of the file
snippet: str, optional
The snippet
Returns
-------
bool
`True` if the update is successful, `False` otherwise
"""
if new_state not in ('new', 'false_positive', 'addressing',
'not_relevant', 'fixed'):
return False
if snippet is None:
return self.query_check(query, new_state, repo_url, file_name)
elif file_name is None:
return self.query_check(query, new_state, repo_url, snippet)
else:
return self.query_check(
query, new_state, repo_url, file_name, snippet)
def scan(self, repo_url, category=None, models=None, exclude=None,
force=False, debug=False, generate_snippet_extractor=False,
local_repo=False, git_token=None):
""" Launch the scan of a git repository.
Parameters
----------
repo_url: str
The url of the repo to scan
category: str, optional
If specified, scan the repo using all the rules of this category,
otherwise use all the rules in the db
models: list, optional
A list of models for the ML false positives detection
exclude: list, optional
A list of rules to exclude
force: bool, default `False`
Force a complete re-scan of the repository, in case it has already
been scanned previously
debug: bool, default `False`
Flag used to decide whether to visualize the progressbars during
the scan (e.g., during the insertion of the detections in the db)
generate_snippet_extractor: bool, default `False`
Generate the extractor model to be used in the SnippetModel. The
extractor is generated using the ExtractorGenerator. If `False`,
use the pre-trained extractor model
local_repo: bool, optional
If True, get the repository from a local directory instead of the
web
git_token: str, optional
Git personal access token to authenticate to the git server
Returns
-------
list
The id of the discoveries detected by the scanner (excluded the
ones classified as false positives).
"""
if local_repo:
repo_url = os.path.abspath(repo_url)
rules = self._get_scan_rules(category, exclude)
scanner = GitScanner(rules)
return self._scan(
repo_url=repo_url, scanner=scanner, models=models, force=force,
debug=debug, generate_snippet_extractor=generate_snippet_extractor,
local_repo=local_repo, git_token=git_token)
def scan_path(self, scan_path, category=None, models=None, exclude=None,
force=False, debug=False, generate_snippet_extractor=False,
max_depth=-1, ignore_list=[]):
""" Launch the scan of a local directory or file.
Parameters
----------
scan_path: str
The path of the directory or file to scan
category: str, optional
If specified, scan the repo using all the rules of this category,
otherwise use all the rules in the db
models: list, optional
A list of models for the ML false positives detection
exclude: list, optional
A list of rules to exclude
force: bool, default `False`
Force a complete re-scan of the repository, in case it has already
been scanned previously
debug: bool, default `False`
Flag used to decide whether to visualize the progressbars during
the scan (e.g., during the insertion of the detections in the db)
generate_snippet_extractor: bool, default `False`
Generate the extractor model to be used in the SnippetModel. The
extractor is generated using the ExtractorGenerator. If `False`,
use the pre-trained extractor model
max_depth: int, optional
The maximum depth to which traverse the subdirectories tree.
A negative value will not affect the scan.
ignore_list: list, optional
A list of paths to ignore during the scan. This can include file
names, directory names, or whole paths. Wildcards are supported as
per the fnmatch package.
Returns
-------
list
The id of the discoveries detected by the scanner (excluded the
ones classified as false positives).
"""
scan_path = os.path.abspath(scan_path)
if self.get_repo(scan_path) != {} and force is False:
raise ValueError(f"The directory \"{scan_path}\" has already been "
"scanned. Please use \"force\" to rescan it.")
rules = self._get_scan_rules(category, exclude)
scanner = FileScanner(rules)
return self._scan(
repo_url=scan_path, scanner=scanner, models=models, force=force,
debug=debug, generate_snippet_extractor=generate_snippet_extractor,
max_depth=max_depth, ignore_list=ignore_list)
def scan_user(self, username, category=None, models=None, exclude=None,
debug=False, generate_snippet_extractor=False, forks=False,
git_token=None, api_endpoint='https://api.github.com'):
""" Scan all the repositories of a user.
Find all the repositories of a user, and scan
them. Please note that git limits the list of repositories to maximum
100 (due to pagination).
Parameters
----------
username: str
The username as on github.com
category: str, optional
If specified, scan the repo using all the rules of this category,
otherwise use all the rules in the db
models: list, optional
A list of models for the ML false positives detection
exclude: list, optional
A list of rules to exclude
debug: bool, default `False`
Flag used to decide whether to visualize the progressbars during
the scan (e.g., during the insertion of the detections in the db)
generate_snippet_extractor: bool, default `False`
Generate the extractor model to be used in the SnippetModel. The
extractor is generated using the ExtractorGenerator. If `False`,
use the pre-trained extractor model
forks: bool, default `False`
Scan also repositories forked by this user
git_token: str, optional
Git personal access token to authenticate to the git server
api_endpoint: str, default `https://api.github.com`
API endpoint of the git server (default is github.com)
Returns
-------
dict
The id of the discoveries detected by the scanner (excluded the
ones classified as false positives), grouped by repository.
"""
logger.debug(f'Use API endpoint {api_endpoint}')
rules = self._get_scan_rules(category, exclude)
scanner = GitScanner(rules)
g = Github(base_url=api_endpoint,
login_or_token=git_token,
verify=False)
missing_ids = {}
for repo in g.get_user(username).get_repos():
if not forks and repo.fork:
# Ignore this repo since it is a fork
logger.info(f'Ignore {repo} (it is a fork)')
continue
# Get repo clone url without .git at the end
repo_url = repo.clone_url[:-4]
logger.info(f'Scanning {repo.url}')
missing_ids[repo_url] = self._scan(repo_url, scanner,
models=models,
debug=debug,
git_token=git_token)
return missing_ids
def scan_wiki(self, repo_url, category=None, models=None, exclude=None,
debug=False, git_token=None):
""" Scan the wiki of a repository.
This method simply generates the url of a wiki from the url of its repo,
and uses the same `scan` method that we use for repositories.
Parameters
----------
repo_url: str
The url of the repository
category: str, optional
If specified, scan the repo using all the rules of this category,
otherwise use all the rules in the db
models: list, optional
A list of models for the ML false positives detection
exclude: list, optional
A list of rules to exclude
debug: bool, default `False`
Flag used to decide whether to visualize the progressbars during
the scan (e.g., during the insertion of the detections in the db)
git_token: str, optional
| |
<gh_stars>0
from __future__ import unicode_literals
import uritemplate
import wac
from balanced import exc, config, utils
registry = wac.ResourceRegistry(route_prefix='/')
class JSONSchemaCollection(wac.ResourceCollection):
@property
def href(self):
return self.uri
class ObjectifyMixin(wac._ObjectifyMixin):
def _objectify(self, resource_cls, **fields):
# setting values locally, not from server
if 'links' not in fields:
for key, value in fields.iteritems():
setattr(self, key, value)
else:
self._construct_from_response(**fields)
def _construct_from_response(self, **payload):
payload = self._hydrate(payload)
meta = payload.pop('meta', None)
if isinstance(self, wac.Page):
for key, value in meta.iteritems():
setattr(self, key, value)
# the remaining keys here are just hypermedia resources
for _type, resources in payload.iteritems():
# Singular resources are represented as JSON objects. However,
# they are still wrapped inside an array:
cls = Resource.registry[_type]
for resource_body in resources:
# if we couldn't determine the type of this object we use a
# generic resource object, target that instead.
if isinstance(self, (cls, Resource)):
# we are loading onto our self, self is the target
target = self
else:
target = cls()
for key, value in resource_body.iteritems():
if key in ('links',):
continue
setattr(target, key, value)
# if loading into a collection
if target != self:
# ensure that we have a collection to hold this item
if not hasattr(self, _type):
setattr(self, _type, [])
getattr(self, _type).append(target)
@classmethod
def _hydrate(cls, payload):
"""
Construct links for objects
"""
links = payload.pop('links', {})
for key, uri in links.iteritems():
variables = uritemplate.variables(uri)
# marketplaces.card_holds
collection, resource_type = key.split('.')
item_attribute = item_property = resource_type
# if parsed from uri then retrieve. e.g. customer.id
for item in payload[collection]:
# find type, fallback to Resource if we can't determine the
# type e.g. marketplace.owner_customer
collection_type = Resource.registry.get(resource_type,
Resource)
def extract_variables_from_item(item, variables):
for v in variables:
_, item_attribute = v.split('.')
# HACK: https://github.com/PoundPay/balanced/issues/184
if item_attribute == 'self':
item_attribute = 'id'
item_value = item['links'].get(
item_attribute, item.get(item_attribute)
)
if item_value:
yield v, item_value
item_variables = dict(
extract_variables_from_item(item, variables))
# expand variables if we have them, else this is a link like
# /debits
if item_variables:
parsed_link = uritemplate.expand(uri, item_variables)
else:
parsed_link = uri
# check if this is a collection or a singular item
if any(
parsed_link.endswith(value)
for value in item_variables.itervalues()
):
# singular
if not item_property.endswith('_href'):
item_property += '_href'
lazy_href = parsed_link
else:
# collection
lazy_href = JSONSchemaCollection(
collection_type, parsed_link)
item.setdefault(item_property, lazy_href)
return payload
class JSONSchemaPage(wac.Page, ObjectifyMixin):
@property
def items(self):
try:
try:
return getattr(self, self.resource_cls.type)
except AttributeError:
# horrid hack because event callbacks are misnamed.
return self.event_callbacks
except AttributeError:
# Notice:
# there is no resources key in the response from server
# if the list is empty, so when we try to get something like
# `debits`, an AttributeError will be raised. Not sure is this
# behavior a bug of server, but anyway, this is just a workaround here
# for solving the problem. The issue was posted here
# https://github.com/balanced/balanced-python/issues/93
return []
class JSONSchemaResource(wac.Resource, ObjectifyMixin):
collection_cls = JSONSchemaCollection
page_cls = JSONSchemaPage
def save(self):
cls = type(self)
attrs = self.__dict__.copy()
href = attrs.pop('href', None)
if not href:
if not cls.uri_gen or not cls.uri_gen.root_uri:
raise TypeError(
'Unable to create {0} resources directly'.format(
cls.__name__
)
)
href = cls.uri_gen.root_uri
method = cls.client.put if 'id' in attrs else cls.client.post
attrs = dict(
(k, v.href if isinstance(v, Resource) else v)
for k, v in attrs.iteritems()
if not isinstance(v, (cls.collection_cls))
)
resp = method(href, data=attrs)
instance = self.__class__(**resp.data)
self.__dict__.clear()
self.__dict__.update(instance.__dict__)
return self
def delete(self):
self.client.delete(self.href)
def __dir__(self):
return self.__dict__.keys()
def __getattr__(self, item):
if isinstance(item, basestring):
suffix = '_href'
if suffix not in item:
href = getattr(self, item + suffix, None)
if href:
setattr(self, item, Resource.get(href))
return getattr(self, item)
raise AttributeError(
"'{0}' has no attribute '{1}'".format(
self.__class__.__name__, item
)
)
class Resource(JSONSchemaResource):
client = config.client
registry = registry
uri_gen = wac.URIGen('/resources', '{resource}')
def unstore(self):
return self.delete()
@classmethod
def fetch(cls, href):
return cls.get(href)
class Marketplace(Resource):
"""
A Marketplace represents your central broker for all operations on the
Balanced API.
A Marketplace has a single `owner_customer` which represents your person or
business.
All Resources apart from APIKeys are associated with a Marketplace.
A Marketplace has an escrow account which receives all funds from Debits
that are not associated with Orders. The sum of the escrow (`in_escrow`) is
(Debits - Refunds + Reversals - Credits).
"""
type = 'marketplaces'
uri_gen = wac.URIGen('/marketplaces', '{marketplace}')
@utils.classproperty
def mine(cls):
"""
Returns an instance representing the marketplace associated with the
current API key used for this request.
"""
return cls.query.one()
my_marketplace = mine
class APIKey(Resource):
"""
Your APIKey is used to authenticate when performing operations on the
Balanced API. You must create an APIKey before you create a Marketplace.
**NOTE:** Never give out or expose your APIKey. You may POST to this
endpoint to create new APIKeys and then DELETE any old keys.
"""
type = 'api_keys'
uri_gen = wac.URIGen('/api_keys', '{api_key}')
class CardHold(Resource):
type = 'card_holds'
uri_gen = wac.URIGen('/card_holds', '{card_hold}')
def cancel(self):
self.is_void = False
return self.save()
def capture(self, **kwargs):
return Debit(
href=self.debits.href,
**kwargs
).save()
class Transaction(Resource):
"""
Any transfer, funds from or to, your Marketplace's escrow account or the
escrow account of an Order associated with your Marketplace.
E.g. a Credit, Debit, Refund, or Reversal.
If the Transaction is associated with an Order then it will be applied to
the Order's escrow account, not to the Marketplace's escrow account.
"""
type = 'transactions'
class Credit(Transaction):
"""
A Credit represents a transfer of funds from your Marketplace's
escrow account to a FundingInstrument.
Credits are created by calling the `credit` method on a FundingInstrument.
"""
type = 'credits'
uri_gen = wac.URIGen('/credits', '{credit}')
def reverse(self, **kwargs):
"""
Reverse a Credit. If no amount is specified it will reverse the entire
amount of the Credit, you may create many Reversals up to the sum of
the total amount of the original Credit.
:rtype: Reversal
"""
return Reversal(
href=self.reversals.href,
**kwargs
).save()
class Debit(Transaction):
"""
A Debit represents a transfer of funds from a FundingInstrument to your
Marketplace's escrow account.
A Debit may be created directly, or it will be created as a side-effect
of capturing a CardHold. If you create a Debit directly it will implicitly
create the associated CardHold if the FundingInstrument supports this.
"""
type = 'debits'
uri_gen = wac.URIGen('/debits', '{debit}')
def refund(self, **kwargs):
"""
Refunds this Debit. If no amount is specified it will refund the entire
amount of the Debit, you may create many Refunds up to the sum total
of the original Debit's amount.
:rtype: Refund
"""
return Refund(
href=self.refunds.href,
**kwargs
).save()
class Refund(Transaction):
"""
A Refund represents a reversal of funds from a Debit. A Debit can have
many Refunds associated with it up to the total amount of the original
Debit. Funds are returned to your Marketplace's escrow account
proportional to the amount of the Refund.
"""
type = 'refunds'
uri_gen = wac.URIGen('/refunds', '{refund}')
class Reversal(Transaction):
"""
A Reversal represents a reversal of funds from a Credit. A Credit can have
many Reversal associated with it up to the total amount of the original
Credit. Funds are returned to your Marketplace's escrow account
proportional to the amount of the Reversal.
"""
type = 'reversals'
uri_gen = wac.URIGen('/reversals', '{reversal}')
class FundingInstrument(Resource):
"""
A FundingInstrument is either (or both) a source or destination of funds.
You may perform `debit` or `credit` operations on a FundingInstrument to
transfer funds to or from your Marketplace's escrow.
"""
type = 'funding_instruments'
def associate_to_customer(self, customer):
try:
self.links
except AttributeError:
self.links = {}
self.links['customer'] = utils.extract_href_from_object(customer)
self.save()
def debit(self, amount, **kwargs):
"""
Creates a Debit of funds from this FundingInstrument to your
Marketplace's escrow account.
:param appears_on_statement_as: If None then Balanced will use the
`domain_name` property from your Marketplace.
:rtype: Debit
"""
return Debit(
href=self.debits.href,
amount=amount,
**kwargs
).save()
def credit(self, amount, **kwargs):
"""
Creates a Credit of funds from your Marketplace's escrow account to
this FundingInstrument.
:rtype: Credit
"""
return Credit(
href=self.credits.href,
amount=amount,
**kwargs
).save()
class BankAccount(FundingInstrument):
"""
A BankAccount is both a source, and a destination of, funds. You may
create Debits and Credits to and from, this funding instrument.
"""
type | |
fill. If `True`, fill all
possible fields.
Each event will have the data filled for the intersection
of it's external keys and the fields requested filled.
Default is False
handler_registry : dict, optional
mapping asset specs (strings) to handlers (callable classes)
Yields
------
event : Event
The event, optionally with non-scalar data filled in
Raises
------
ValueError if any key in `fields` is not in at least one descriptor
pre header.
"""
if handler_registry is not None:
raise NotImplementedError(
"The handler_registry must be set when "
"the Broker is initialized, usually specified "
"in a configuration file."
)
for name, doc in self.get_documents(
headers,
fields=fields,
stream_name=stream_name,
fill=fill,
handler_registry=handler_registry,
):
if name == "event":
yield doc
def get_table(
self,
headers,
stream_name="primary",
fields=None,
fill=False,
handler_registry=None,
convert_times=True,
timezone=None,
localize_times=True,
):
"""
Load the data from one or more runs as a table (``pandas.DataFrame``).
Parameters
----------
headers : Header or iterable of Headers
The headers to fetch the events for
stream_name : str, optional
Get events from only "event stream" with this name.
Default is 'primary'
fields : List[str], optional
whitelist of field names of interest; if None, all are returned
Default is None
fill : bool or Iterable[str], optional
Which fields to fill. If `True`, fill all
possible fields.
Each event will have the data filled for the intersection
of it's external keys and the fields requested filled.
Default is False
handler_registry : dict, optional
mapping filestore specs (strings) to handlers (callable classes)
convert_times : bool, optional
Whether to convert times from float (seconds since 1970) to
numpy datetime64, using pandas. True by default.
timezone : str, optional
e.g., 'US/Eastern'; if None, use metadatastore configuration in
`self.mds.config['timezone']`
handler_registry : dict, optional
mapping asset specs (strings) to handlers (callable classes)
localize_times : bool, optional
If the times should be localized to the 'local' time zone. If
True (the default) the time stamps are converted to the localtime
zone (as configure in mds).
This is problematic for several reasons:
- apparent gaps or duplicate times around DST transitions
- incompatibility with every other time stamp (which is in UTC)
however, this makes the dataframe repr look nicer
This implies convert_times.
Defaults to True to preserve back-compatibility.
Returns
-------
table : pandas.DataFrame
"""
if handler_registry is not None:
raise NotImplementedError(
"The handler_registry must be set when "
"the Broker is initialized, usually specified "
"in a configuration file."
)
headers = _ensure_list(headers)
fields = set(fields or [])
dfs = []
for header in headers:
descriptors = [
d for d in header.descriptors if d.get("name") == stream_name
]
data_keys = descriptors[0]["data_keys"]
if not fill:
external_fields = {k for k, v in data_keys.items() if v.get("external")}
requested_external = fields.intersection(external_fields)
if requested_external:
raise ValueError(
f"The fields {requested_external} are externally stored data "
"and can only be requested with fill=True."
)
applicable_fields = (fields or set(data_keys)) - external_fields
else:
applicable_fields = fields or set(data_keys)
applicable_fields.add("time")
run = self._catalog[header.start["uid"]]
dataset = run[stream_name].read(variables=(applicable_fields or None))
dataset.load()
dict_of_arrays = {}
for var_name in dataset:
column = dataset[var_name].data
if column.ndim > 1:
column = list(column) # data must be 1-dimensional
dict_of_arrays[var_name] = column
df = pandas.DataFrame(dict_of_arrays)
# if converting to datetime64 (in utc or 'local' tz)
times = dataset["time"].data
if convert_times or localize_times:
times = pandas.to_datetime(times, unit="s")
# make sure this is a series
times = pandas.Series(times, index=df.index)
# if localizing to 'local' time
if localize_times:
times = (
times.dt.tz_localize("UTC") # first make tz aware
# .dt.tz_convert(timezone) # convert to 'local'
.dt.tz_localize(None) # make naive again
)
df["time"] = times
dfs.append(df)
if dfs:
result = pandas.concat(dfs)
else:
# edge case: no data
result = pandas.DataFrame()
result.index.name = "seq_num"
# seq_num starts at 1, not 0
result.index = 1 + result.index
return result
def get_images(
self,
headers,
name,
stream_name="primary",
handler_registry=None,
):
"""
This method is deprecated. Use Broker.get_documents instead.
Load image data from one or more runs into a lazy array-like object.
Parameters
----------
headers : Header or list of Headers
name : string
field name (data key) of a detector
handler_registry : dict, optional
mapping spec names (strings) to handlers (callable classes)
Examples
--------
>>> header = db[-1]
>>> images = Images(header, 'my_detector_lightfield')
>>> for image in images:
# do something
"""
# Defer this import so that pims is an optional dependency.
from ._legacy_images import Images
headers = _ensure_list(headers)
datasets = [header.xarray_dask(stream_name=stream_name) for header in headers]
if handler_registry is not None:
raise NotImplementedError(
"The handler_registry parameter is no longer supported "
"and must be None."
)
dataset = xarray.merge(datasets)
data_array = dataset[name]
return Images(data_array=data_array)
def restream(self, headers, fields=None, fill=False):
"""
Get all Documents from given run(s).
This output can be used as a drop-in replacement for the output of the
bluesky Run Engine.
Parameters
----------
headers : Header or iterable of Headers
header or headers to fetch the documents for
fields : list, optional
whitelist of field names of interest; if None, all are returned
fill : bool, optional
Whether externally-stored data should be filled in. Defaults to
False.
Yields
------
name, doc : tuple
string name of the Document type and the Document itself.
Example: ('start', {'time': ..., ...})
Examples
--------
>>> def f(name, doc):
... # do something
...
>>> h = db[-1] # most recent header
>>> for name, doc in restream(h):
... f(name, doc)
See Also
--------
:meth:`Broker.process`
"""
for payload in self.get_documents(headers, fields=fields, fill=fill):
yield payload
stream = restream # compat
def process(self, headers, func, fields=None, fill=False):
"""
Pass all the documents from one or more runs into a callback.
This output can be used as a drop-in replacement for the output of the
bluesky Run Engine.
Parameters
----------
headers : Header or iterable of Headers
header or headers to process documents from
func : callable
function with the signature `f(name, doc)`
where `name` is a string and `doc` is a dict
fields : list, optional
whitelist of field names of interest; if None, all are returned
fill : bool, optional
Whether externally-stored data should be filled in. Defaults to
False.
Examples
--------
>>> def f(name, doc):
... # do something
...
>>> h = db[-1] # most recent header
>>> process(h, f)
See Also
--------
:meth:`Broker.restream`
"""
for name, doc in self.get_documents(headers, fields=fields, fill=fill):
func(name, doc)
def export(self, headers, db, new_root=None, copy_kwargs=None):
"""
Serialize a list of runs.
If a new_root is passed files associated with the run will be moved to
this new location, and the corresponding resource document will be
updated with the new_root.
Parameters
----------
headers : databroker.header
one or more run headers that are going to be exported
db : databroker.Broker
an instance of databroker.Broker class that will be the target to
export info
new_root : str
optional. root directory of files that are going to
be exported
copy_kwargs : dict or None
passed through to the ``copy_files`` method on Registry;
None by default
Returns
------
file_pairs : list
list of (old_file_path, new_file_path) pairs generated by
``copy_files`` method on Registry.
"""
if copy_kwargs is None:
copy_kwargs = {}
if isinstance(headers, Header):
headers = [headers]
file_pairs = []
for header in headers:
for name, doc in self._catalog[header.start["uid"]].documents(fill=False):
if name == "event_page":
for event in event_model.unpack_event_page(doc):
db.insert("event", event)
elif name == "resource" and new_root:
copy_kwargs.setdefault("run_start_uid", header.start["uid"])
file_pairs.extend(self.reg.copy_files(doc, new_root, **copy_kwargs))
new_resource = doc.to_dict()
new_resource["root"] = new_root
db.insert(name, new_resource)
else:
db.insert(name, doc)
return file_pairs
def export_size(self, headers):
"""
Get the size of files associated with a list of headers.
Parameters
----------
headers : :class:databroker.Header:
one or more headers that are going to be exported
Returns
-------
total_size : float
total size of all the files associated with the ``headers`` in Gb
"""
headers = _ensure_list(headers)
total_size = 0
for header in headers:
run = self._catalog[header.start["uid"]]
for name, doc in self._catalog[header.start["uid"]].documents(fill="no"):
if name == "resource":
for filepath in run.get_file_list(doc):
total_size += os.path.getsize(filepath)
return total_size * 1e-9
def insert(self, name, | |
'''
Script with all sampling methods.
Computation of effective sampling size from:
https://github.com/jwalton3141/jwalton3141.github.io
following definition from:
ref Gelman, Andrew, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. 2013. Bayesian Data Analysis. Third Edition. London: Chapman & Hall / CRC Press.
'''
import numpy as np
import torch
from flonaco.phifour_utils import PhiFour
from flonaco.croissant_utils import Croissants
def run_langevin(model, x, n_steps, dt):
'''
model: model with the potentiel we will run the langevin on
x (tensor): init points for the chains to update (batch_dim, dim)
dt -> is multiplied by N for the phiFour before being passed
'''
optimizer = torch.optim.SGD([x], lr=dt)
def add_noise(grad):
noise = np.sqrt(2 / (dt * model.beta)) * torch.randn_like(grad)
return grad + noise
x.register_hook(add_noise)
xs = []
for t in range(n_steps):
optimizer.zero_grad()
loss = model.U(x).sum()
loss.backward()
optimizer.step()
xs.append(x.clone())
return torch.stack(xs)
def run_MALA(target, x_init, n_steps, dt):
'''
target: target with the potentiel we will run the langevin on
-> needs to have grad_U function implemented
x (tensor): init points for the chains to update (batch_dim, dim)
dt -> is multiplied by N for the phiFour before being passed
'''
xs = []
accs = []
for t in range(n_steps):
x = x_init.clone()
x.detach_()
x = x_init - dt * target.grad_U(x_init)
if dt > 0:
x += dt * np.sqrt(2 / (dt * target.beta)) * torch.randn_like(x_init)
ratio = - target.U(x)
ratio -= ((x_init - x + dt * target.grad_U(x)) ** 2 / (4 * dt)).sum(1)
ratio += target.U(x_init)
ratio += ((x - x_init + dt * target.grad_U(x_init)) ** 2 / (4 * dt)).sum(1)
ratio = target.beta * ratio
ratio = torch.exp(ratio)
u = torch.rand_like(ratio)
acc = u < torch.min(ratio, torch.ones_like(ratio))
x[~acc] = x_init[~acc]
accs.append(acc)
xs.append(x.clone())
x_init = x.clone().detach()
return torch.stack(xs), torch.stack(accs)
def run_em_langevin(model, x, n_steps, dt, drift=None, bc=None, beta_ratios=None):
'''
Euler-Maruyama Scheme
model (nn.Module): must implement potential energy function model.V(x)
x (tensor): init points for the chains to update (batch_dim, dim)
dt (float): integration timestep
drift (tensor): non-conservative forcing term with dimensions of x
bc (float): None or PBC (hypercube) specified as bc=L for [0,L]^d
'''
xs = []
for t in range(n_steps):
gradU = torch.autograd.grad(model.V(x), x)[0]
x = x - (gradU - drift) * dt + np.sqrt(2 / model.beta * dt) * torch.randn_like(gradU)
if bc is not None:
x = x + bc * (x < 0.) - bc * (x > bc)
xs.append(x.clone())
return torch.stack(xs)
def run_action_langevin(model, xt, n_steps, dt_path, dt, bc=None):
'''
Path action langevin for diffusions
model (nn.Module): model with the function `model.action(xt)` implemented
xt (tensor): initial trajectory
dt (float): integration timestep for the input trajectory
drift (tensor): non-conservative forcing term with dimensions of x
bc (float): None or PBC (hypercube) specified as bc=L for [0,L]^d
'''
xts = []
for t in range(n_steps):
gradPath = torch.autograd.grad(model.U(xt).sum(), xt)[0]
noise = np.sqrt(2 * dt_path / (model.beta * model.dt)) * \
torch.randn_like(gradPath)
xt = xt - gradPath * dt_path + noise
if bc is not None:
xt = xt + bc * (xt < 0.) - bc * (xt > bc)
xts.append(xt.clone())
return torch.stack(xts)
def run_action_mh_langevin(model, target, xt, n_steps, dt_path, dt, bc=None):
'''
Path action langevin for diffusions
model (nn.Module): model with the function `model.action(xt)` implemented
xt (tensor): initial trajectory
dt (float): integration timestep for the input trajectory
drift (tensor): non-conservative forcing term with dimensions of x
bc (float): None or PBC (hypercube) specified as bc=L for [0,L]^d
'''
xts = []
accs = []
betadt = target.dt * target.beta
for t in range(n_steps):
x = model.sample(xt.shape[0])
xt = xt.reshape(-1,model.dim)
ratio = -betadt * target.U(x) + betadt * model.nll(x.reshape(-1,model.dim))
ratio += betadt * target.U(xt) - betadt * model.nll(xt.reshape(-1,model.dim))
ratio = torch.exp(ratio)
u = torch.rand_like(ratio)
acc = u < torch.min(ratio, torch.ones_like(ratio))
x[~acc] = xt[~acc]
accs.append(acc)
xt.data = x.clone().detach()
gradPath = torch.autograd.grad(target.U(xt).sum(), xt)[0]
noise = np.sqrt(2 * dt_path / (betadt)) * \
torch.randn_like(gradPath)
xt = xt - gradPath * dt_path + noise
if bc is not None:
xt = xt + bc * (xt < 0.) - bc * (xt > bc)
xts.append(xt.clone())
return torch.stack(xts), torch.stack(accs)
def run_metropolis(model, target, x_init, n_steps):
xs = []
accs = []
for dt in range(n_steps):
x = model.sample(x_init.shape[0])
ratio = - target.beta * target.U(x) + model.nll(x)
ratio += target.beta * target.U(x_init) - model.nll(x_init)
ratio = torch.exp(ratio)
u = torch.rand_like(ratio)
acc = u < torch.min(ratio, torch.ones_like(ratio))
x[~acc] = x_init[~acc]
xs.append(x.clone())
accs.append(acc)
x_init = x.clone()
return torch.stack(xs), torch.stack(accs)
def run_metrolangevin(model, target, x_lang, n_steps, dt, lag=1):
'''
model: model with the potential we will run the MCMC on
x_lang (tensor): init points for the chains to update (batch_dim, dim)
dt: time step in Langevin -> will be multiplied by N for the phiFour
lag (int): number of Langevin steps before considering resampling
'''
optimizer = torch.optim.SGD([x_lang], lr=dt)
def add_noise(grad):
return grad + np.sqrt(2 / (dt * target.beta)) * torch.randn_like(grad)
x_lang.register_hook(add_noise)
xs = []
accs = []
for t in range(n_steps):
if t % lag == 0:
with torch.no_grad():
x = model.sample(x_lang.shape[0])
ratio = - target.beta * target.U(x) + model.nll(x)
ratio += target.beta * target.U(x_lang) - model.nll(x_lang)
ratio = torch.exp(ratio)
u = torch.rand_like(ratio)
acc = u < torch.min(ratio, torch.ones_like(ratio))
x[~acc] = x_lang[~acc]
accs.append(acc)
x_lang.data = x.clone()
optimizer.zero_grad()
loss = target.U(x_lang).sum()
loss.backward()
optimizer.step()
xs.append(x_lang.clone())
return torch.stack(xs), torch.stack(accs)
def run_metromalangevin(model, target, x_lang, n_steps, dt, lag=1):
'''
Coversely to the metrolangevin, Langevin steps are corrected by M.H. here.
model: model with the potential we will run the MCMC on
x_lang (tensor): init points for the chains to update (batch_dim, dim)
dt: time step in Langevin -> will be multiplied by N for the phiFour
lag (int): number of Langevin steps before considering resampling
'''
xs = []
accs = []
for t in range(n_steps):
if t % lag == 0:
x = model.sample(x_lang.shape[0])
ratio = - target.beta * target.U(x) + model.nll(x)
ratio += target.beta * target.U(x_lang) - model.nll(x_lang)
ratio = torch.exp(ratio)
u = torch.rand_like(ratio)
acc = u < torch.min(ratio, torch.ones_like(ratio))
x[~acc] = x_lang[~acc]
accs.append(acc)
x_lang.data = x.clone()
x = x_lang.clone()
x = x_lang - dt * target.grad_U(x_lang)
if dt > 0:
x += dt * np.sqrt(2 / (dt * target.beta)) * torch.randn_like(x_lang)
ratio = - target.U(x)
ratio -= ((x_lang - x + dt * target.grad_U(x)) ** 2 / (4 * dt)).sum(1)
ratio += target.U(x_lang)
ratio += ((x - x_lang + dt * target.grad_U(x_lang)) ** 2 / (4 * dt)).sum(1)
ratio = target.beta * ratio
ratio = torch.exp(ratio)
u = torch.rand_like(ratio)
acc = u < torch.min(ratio, torch.ones_like(ratio))
x[~acc] = x_lang[~acc]
x_lang.data = x.clone()
xs.append(x_lang.clone())
return torch.stack(xs), torch.stack(accs)
def estimate_deltaF(model1, model2,
xs_chains=None,
method='direct',
no_ratio=False,
n_tot=int(1e4),
dt=1e-2,
state_dic=None,
coupling_per_site=False):
"""
Estimate the ratio e^{-\beta (U_1(x) - U_2(x))} under the
statistics of model2 with sampling defined by the sampling kwargs,
or using the provided xs.
If no_ratio -> estimates 1 under the statistics of model2 with sampling
defined by the sampling kwargs (helped by model1 if mhlangevin or mh).
Returns mean and variance (estimated according to sampling method)
model1,2: RealNVP or MoG or PhiFour
xs: samples to be used to compute the estimate - assumed to be from model2
method: 'direct' computes the expectations using model2.sample
method: 'mh' computes the expectations using run_metropolis
method: 'mhlangevin' computes the expectations using run_metrolangevin
state_dic: dictionary {'center':, 'width':, 'norm': } or {'mean_thershold':, side: '+' or '-'}
"""
if xs_chains is None:
if method == 'direct':
xs = model2.sample(n_tot)
xs_chains = xs.unsqueeze(0) # 1 iter of n_tot chains
elif 'mh' in method:
burn_in = int(1e2)
steps_per_chain = int(1e2)
assert n_tot / steps_per_chain >= 1
x_init = model2.sample(int(n_tot / steps_per_chain))
n_steps = steps_per_chain + burn_in
x_init.detach_().requires_grad_()
if method == 'mhlangevin':
xs_all, _ = run_metrolangevin(model1, model2, x_init,
n_steps, dt)
elif method == 'mh':
xs_all, _ = run_metropolis(model1, model2, x_init,
n_steps=n_steps)
xs = xs_all[-steps_per_chain:, :, :].reshape(-1, model1.dim)
xs_chains = xs_all[-steps_per_chain:, :, :]
else:
print(RuntimeWarning('Error estimated according to method given'))
xs = xs_chains.reshape(-1, model1.dim)
n_tot = xs.shape[0]
if no_ratio:
Zs = torch.ones(n_tot, device=model2.device)
else:
Zs = Boltzmann_diff(xs, model1, model2,
coupling_per_site=coupling_per_site)
| |
description = """a module that holds helper functions for the injection simulations and subsequent skymap analysis"""
authors = "<NAME> (<EMAIL>), <NAME> (<EMAIL>)"
#-------------------------------------------------
import numpy as np
import healpy as hp
import astropy
from astropy.time import Time as astropyTime
from astropy import coordinates as astropyCoordinates
try:
import ephem
except ImportError as e:
print( "WARNING: could not find 'ephem'. Functions involving the Moon's position may raise exceptions!" )
from lal.gpstime import tconvert
from lal.lal import GreenwichMeanSiderealTime as GMST
#-------------------------------------------------
deg2rad = np.pi/180
rad2deg = 180/np.pi
twopi = 2*np.pi
pi2 = np.pi/2
pi6 = np.pi/6
pi8 = np.pi/8
pi10 = np.pi/10
hour = 3600.
day = 86400.
t_ephem_start = astropyTime('1899-12-31 12:00:00', format='iso', scale='utc')
#-------------------------------------------------
### known observatories
class Observatory(object):
def __init__(self, name, lon, lat, color='r', marker='o', degrees=True):
"""
if degrees, we expect lon and lat to be provided in degrees and we convert them to radians internally
"""
self.name = name
self.lon = lon
self.lat = lat
if degrees:
self.lon *= deg2rad
self.lat *= deg2rad
self.color = color
self.marker = marker
### set up dictionary of known observatories
observatories = {}
observatories['Palomar'] = Observatory( 'Palomar', -116.8639, 33.3558, color='r', marker='o', degrees=True )
observatories['Cerro Tololo'] = Observatory( '<NAME>', -70.806525, -30.169661, color='m', marker='h', degrees=True )
observatories['Mauna Kea'] = Observatory( 'Mauna Kea', -156.25333, 20.70972, color='y', marker='D', degrees=True )
observatories['La Palma'] = Observatory( 'La Palma', -17.8947, 28.7636, color='b', marker='s', degrees=True )
observatories['Anglo-Australian'] = Observatory( 'Anglo-Australian', 149.0672, -31.2754, color='g', marker='^', degrees=True )
observatories['Nishi-Harima'] = Observatory( 'Nishi-Harima', 134.3356, 35.0253, color='c', marker='p', degrees=True )
#-------------------------------------------------
### utilities for drawing random times
def gps2relativeUTC( t ):
"""
converts gps secons into the number of seconds after the most recent 00:00:00 UTC
"""
if isinstance(t, (np.ndarray, list, tuple)):
times = []
for T in t:
ans = tconvert( T )
times.append( T - float(tconvert( ans.split(',')[0]+" 00:00:00" )) )
return np.array(times)
else:
ans = tconvert( t )
return t-float(tconvert( ans.split(',')[0]+" 00:00:00" ))
def diurnalPDF( t, amplitude=0.5, phase=pi8 ):
"""
"t" must be specified in gps seconds
we convert the time in gps seconds into the number of seconds after the most recent 00:00:00 UTC
return (1 + amplitude*sin(2*pi*t/day - phase))/day
"""
if amplitude > 1:
raise ValueError("amplitude cannot be larger than 1")
t = gps2relativeUTC(t)
return (1 + amplitude*np.sin(twopi*t/day - phase))/day
def diurnalCDF( t, amplitude=0.5, phase=pi8 ):
"""
"t" must be specified in gps seconds
we convert the time in gps seconds into the number of seconds after the most recent 00:00:00 UTC
return t/day - (amplitude/2pi)*cos(2*pi*t/day - phase)
"""
if amplitude > 1:
raise ValueError("amplitude cannot be larger than 1")
t = gps2relativeUTC(t)
return t/day - (amplitude/twopi)*(np.cos(twopi*t/day - phase) - np.cos(phase))
def drawTimes( start, end, N, amplitude=0.5, phase=pi8, verbose=False ):
"""
draws N times from the diurnal cycle between start and end
"""
if start >= end:
raise ValueError("bad start and end times")
dur = end-start
times = []
maxPDF = (1+amplitude)/day
while len(times) < N:
t, p = np.random.rand(2)
t = start + t*dur
if diurnalPDF( t, amplitude=amplitude, phase=phase ) > p*maxPDF: ### accept the time if the CDF is bigger than the random draw
if verbose:
print "accepted t=%.9f"%(t)
times.append( t )
return times
#-------------------------------------------------
### utilities for rotating maps
def rotateRAC2C( ra, gps1, gps2 ):
"""
rotates the RA according to the change in gps
takes ra at gps1 and rotates it so that the earth-fixed coordinates are invarient but the time has changed to gps2
"""
gmst2 = GMST( gps2 )
gmst1 = GMST( gps1 )
return (ra - gmst1 + gmst2)%(twopi)
def rotateRAC2E( ra, gps ):
"""
rotates ra -> earth fixed coords
"""
gmst = GMST( gps )
return (ra - gmst)%(twopi)
def rotateRAE2C( phi, gps ):
"""
rotates earth fixed coords -> ra
"""
gmst = GMST( gps )
return (phi + gmst)%(twopi)
def rotateMap( posterior, dphi ):
"""
rotates phi -> phi+dphi
"""
npix = len(posterior)
nside = hp.npix2nside( npix )
theta, phi = hp.pix2ang( nside, np.arange(npix) )
phi += dphi
new_pix = hp.ang2pix( nside, theta, phi )
return posterior[new_pix]
def rotateMapC2C( posterior, old_gps, new_gps ):
"""
returns a rotated map that keeps the posterior in the same relative position to the detectors at the new_gps time
as it was at the old_gps time.
"""
npix = len(posterior)
nside = hp.npix2nside( npix )
theta, new_phi = hp.pix2ang( nside, np.arange( npix ) )
phi = rotateRAC2C( new_phi, new_gps, old_gps ) ### rotate the RA according to times
### need to map the RAs at the new gps time into the RAs at the old gps time
new_pix = hp.ang2pix( nside, theta, phi )
return posterior[new_pix]
def rotateMapC2E( posterior, gps ):
npix = len(posterior)
nside = hp.npix2nside( npix )
theta, phi = hp.pix2ang( nside, np.arange( npix ) )
ra = rotateRAE2C( phi, gps ) ### rotate phi to get ra -> needed to determine original indexing
new_pix = hp.ang2pix( nside, theta, ra )
return posterior[new_pix]
def rotateMapE2C( posterior, gps ):
npix = len(posterior)
nside = hp.npix2nside( npix )
theta, ra = hp.pix2ang( nside, np.arange( npix ) )
phi = rotateRAC2E( ra, gps ) ### rotate the RA to get phi -> needed to determine original indexing
new_pix = hp.ang2pix( nside, theta, phi )
return posterior[new_pix]
#-------------------------------------------------
def solarPosition( gps, coord="C" ):
'''
'''
timeObj = astropyTime( tconvert(int(gps), form="%Y-%m-%dT%H:%M:%S")+("%.3f"%(gps%1))[1:], format='isot', scale='utc')
sun = astropyCoordinates.get_sun(timeObj)
if coord=="C":
return float(sun.dec.radian), float(sun.ra.radian)
else:
return float(sun.dec.radian), rotateRAC2E( float(sun.ra.radian), gps )
def lunarPosition( gps, coord="C" ):
'''
'''
moon = ephem.Moon()
moon.compute(tconvert(int(gps), form="%Y/%m/%d %H:%M:%S"))
if coord=="C":
return float(moon.dec), float(moon.ra)
else:
return float(moon.dec), rotateRAC2E( float(moon.ra), gps )
### utilities for generating masks for occlusion
def solarOcclusion(sunDec, sunRA, nside, dead_zone, coord="C"):
'''
Find the accesible part of the sky in equatorial coordinate.
"t" must be specified in gps seconds and the "dead_zone" must be specified in radians.
returns a mask which corresponds to only those parts of the sky that are further than "dead_zone" from the sun at this time
adapted from <NAME> code
'''
if coord!="C":
print "we only know how to handle coord=\"C\" at the moment. Returning a mask that removes nothing..."
return np.ones((hp.nside2npix(nside),), dtype="int")
npix = hp.nside2npix( nside )
### get solar position in spherical coordinate
sunTheta = pi2 - sunDec
### get the cartesian coordinates of all the other pixels
pix = np.arange( npix )
theta, phi = hp.pix2ang( nside, pix )
### compute cos(theta) between all pixels and the sun in spherical coordinates
cosdtheta = np.cos(sunTheta)*np.cos(theta) + np.sin(sunTheta)*np.sin(theta)*np.cos(sunRA-phi)
return (cosdtheta <= np.cos(dead_zone)).astype(int)
def lunarOcclusion(lunDec, lunRA, nside, dead_zone, coord="C"):
'''
Find the accesible part of the sky in equatorial coordinate.
"t" must be specified in gps seconds and the "dead_zone" must be specified in radians.
returns a mask which corresponds to only those parts of the sky that are further than "dead_zone" from the sun at this time
adapted from <NAME>'s code
'''
if coord!="C":
print "we only know how to handle coord=\"C\" at the moment. Returning a mask that removes nothing..."
return np.ones((hp.nside2npix(nside),), dtype="int")
npix = hp.nside2npix( nside )
### get solar position in spherical coordinate
lunTheta = pi2 - lunDec
### get the cartesian coordinates of all the other pixels
pix = np.arange( npix )
theta, phi = hp.pix2ang( nside, pix )
### compute cos(theta) between all pixels and the sun in spherical coordinates
cosdtheta = np.cos(lunTheta)*np.cos(theta) + np.sin(lunTheta)*np.sin(theta)*np.cos(lunRA-phi)
return (cosdtheta <= np.cos(dead_zone)).astype(int)
#-------------------------------------------------
### utilities for determining where an observatory can see
def observableRegion( sunDec, sunRA, nside, obsAng, obsLat, obsLon, solar_occlusion_angle=pi6, coord='C' ):
"""
computes the region that is just observable for a particular observatory
assumes the sun does not move and computes bounds based on the horizon implied by obsAng and solar_occlusion_angle
"""
### figure out where the sun is
if coord!="C":
print "we only know how to handle coord=\"C\" at the moment. Returning a mask that removes nothing..."
return np.ones((hp.nside2npix(nside),), dtype="int")
### get solar position in spherical coordinate
theta_sun = pi2 - sunDec
cosTheta_sun = np.cos(theta_sun)
sinTheta_sun = np.sin(theta_sun)
### get the cartesian coordinates of all the other pixels
npix = hp.nside2npix( nside )
### cut out regions of the sky that are not visible ever
obsTheta = pi2-obsLat
cosObsTheta = np.cos(obsTheta)
sinObsTheta = np.sin(obsTheta)
### add back in the | |
# Copyright 2003-2008 by <NAME>. All rights reserved.
# Revisions copyright 2008-2017 by <NAME>.
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
#
# Contact: <NAME>, The James Hutton Institute,
# Invergowrie, Dundee, Scotland, DD2 5DA, UK
# <EMAIL>
################################################################################
"""CircularDrawer module for GenomeDiagram."""
# ReportLab imports
from reportlab.graphics.shapes import Drawing, String, Group, Line, Circle, Polygon
from reportlab.lib import colors
from reportlab.graphics.shapes import ArcPath
# GenomeDiagram imports
from ._AbstractDrawer import AbstractDrawer, draw_polygon, intermediate_points
from ._AbstractDrawer import _stroke_and_fill_colors
from ._FeatureSet import FeatureSet
from ._GraphSet import GraphSet
from math import pi, cos, sin
class CircularDrawer(AbstractDrawer):
"""Object for drawing circular diagrams.
Attributes:
- tracklines Boolean for whether to draw lines dilineating tracks
- pagesize Tuple describing the size of the page in pixels
- x0 Float X co-ord for leftmost point of drawable area
- xlim Float X co-ord for rightmost point of drawable area
- y0 Float Y co-ord for lowest point of drawable area
- ylim Float Y co-ord for topmost point of drawable area
- pagewidth Float pixel width of drawable area
- pageheight Float pixel height of drawable area
- xcenter Float X co-ord of center of drawable area
- ycenter Float Y co-ord of center of drawable area
- start Int, base to start drawing from
- end Int, base to stop drawing at
- length Size of sequence to be drawn
- track_size Float (0->1) the proportion of the track height to draw in
- drawing Drawing canvas
- drawn_tracks List of ints denoting which tracks are to be drawn
- current_track_level Int denoting which track is currently being drawn
- track_offsets Dictionary of number of pixels that each track top,
center and bottom is offset from the base of a fragment, keyed by track
- sweep Float (0->1) the proportion of the circle circumference to
use for the diagram
- cross_track_links List of tuples each with four entries (track A,
feature A, track B, feature B) to be linked.
"""
def __init__(
self,
parent=None,
pagesize="A3",
orientation="landscape",
x=0.05,
y=0.05,
xl=None,
xr=None,
yt=None,
yb=None,
start=None,
end=None,
tracklines=0,
track_size=0.75,
circular=1,
circle_core=0.0,
cross_track_links=None,
):
"""Create CircularDrawer object.
Arguments:
- parent Diagram object containing the data that the drawer
draws
- pagesize String describing the ISO size of the image, or a tuple
of pixels
- orientation String describing the required orientation of the
final drawing ('landscape' or 'portrait')
- x Float (0->1) describing the relative size of the X
margins to the page
- y Float (0->1) describing the relative size of the Y
margins to the page
- xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
- xl Float (0->1) describing the relative size of the left X
margin to the page (overrides x)
- xr Float (0->1) describing the relative size of the right X
margin to the page (overrides x)
- yt Float (0->1) describing the relative size of the top Y
margin to the page (overrides y)
- yb Float (0->1) describing the relative size of the lower Y
margin to the page (overrides y)
- start Int, the position to begin drawing the diagram at
- end Int, the position to stop drawing the diagram at
- tracklines Boolean flag to show (or not) lines delineating tracks
on the diagram
- track_size The proportion of the available track height that
should be taken up in drawing
- circular Boolean flaw to show whether the passed sequence is
circular or not
- circle_core The proportion of the available radius to leave
empty at the center of a circular diagram (0 to 1).
- cross_track_links List of tuples each with four entries (track A,
feature A, track B, feature B) to be linked.
"""
# Use the superclass' instantiation method
AbstractDrawer.__init__(
self,
parent,
pagesize,
orientation,
x,
y,
xl,
xr,
yt,
yb,
start,
end,
tracklines,
cross_track_links,
)
# Useful measurements on the page
self.track_size = track_size
self.circle_core = circle_core
# Determine proportion of circumference around which information will be drawn
if not circular:
self.sweep = 0.9
else:
self.sweep = 1.0
def set_track_heights(self):
"""Initialize track heights.
Since tracks may not be of identical heights, the bottom and top
radius for each track is stored in a dictionary - self.track_radii,
keyed by track number
"""
bot_track = min(min(self.drawn_tracks), 1)
top_track = max(self.drawn_tracks) # The 'highest' track to draw
trackunit_sum = 0 # Total number of 'units' taken up by all tracks
trackunits = {} # Start and & units for each track keyed by track number
heightholder = 0 # placeholder variable
for track in range(bot_track, top_track + 1): # track numbers to 'draw'
try:
trackheight = self._parent[track].height # Get track height
except Exception: # TODO: ValueError? IndexError?
trackheight = 1
trackunit_sum += trackheight # increment total track unit height
trackunits[track] = (heightholder, heightholder + trackheight)
heightholder += trackheight # move to next height
max_radius = 0.5 * min(self.pagewidth, self.pageheight)
trackunit_height = max_radius * (1 - self.circle_core) / trackunit_sum
track_core = max_radius * self.circle_core
# Calculate top and bottom radii for each track
self.track_radii = {} # The inner, outer and center radii for each track
track_crop = (
trackunit_height * (1 - self.track_size) / 2.0
) # 'step back' in pixels
for track in trackunits:
top = trackunits[track][1] * trackunit_height - track_crop + track_core
btm = trackunits[track][0] * trackunit_height + track_crop + track_core
ctr = btm + (top - btm) / 2.0
self.track_radii[track] = (btm, ctr, top)
def draw(self):
"""Draw a circular diagram of the stored data."""
# Instantiate the drawing canvas
self.drawing = Drawing(self.pagesize[0], self.pagesize[1])
feature_elements = [] # holds feature elements
feature_labels = [] # holds feature labels
greytrack_bgs = [] # holds track background
greytrack_labels = [] # holds track foreground labels
scale_axes = [] # holds scale axes
scale_labels = [] # holds scale axis labels
# Get tracks to be drawn and set track sizes
self.drawn_tracks = self._parent.get_drawn_levels()
self.set_track_heights()
# Go through each track in the parent (if it is to be drawn) one by
# one and collate the data as drawing elements
for track_level in self._parent.get_drawn_levels():
self.current_track_level = track_level
track = self._parent[track_level]
gbgs, glabels = self.draw_greytrack(track) # Greytracks
greytrack_bgs.append(gbgs)
greytrack_labels.append(glabels)
features, flabels = self.draw_track(track) # Features and graphs
feature_elements.append(features)
feature_labels.append(flabels)
if track.scale:
axes, slabels = self.draw_scale(track) # Scale axes
scale_axes.append(axes)
scale_labels.append(slabels)
feature_cross_links = []
for cross_link_obj in self.cross_track_links:
cross_link_elements = self.draw_cross_link(cross_link_obj)
if cross_link_elements:
feature_cross_links.append(cross_link_elements)
# Groups listed in order of addition to page (from back to front)
# Draw track backgrounds
# Draw feature cross track links
# Draw features and graphs
# Draw scale axes
# Draw scale labels
# Draw feature labels
# Draw track labels
element_groups = [
greytrack_bgs,
feature_cross_links,
feature_elements,
scale_axes,
scale_labels,
feature_labels,
greytrack_labels,
]
for element_group in element_groups:
for element_list in element_group:
[self.drawing.add(element) for element in element_list]
if self.tracklines:
# Draw test tracks over top of diagram
self.draw_test_tracks()
def draw_track(self, track):
"""Return list of track elements and list of track labels."""
track_elements = [] # Holds elements for features and graphs
track_labels = [] # Holds labels for features and graphs
# Distribution dictionary for dealing with different set types
set_methods = {FeatureSet: self.draw_feature_set, GraphSet: self.draw_graph_set}
for set in track.get_sets(): # Draw the feature or graph sets
elements, labels = set_methods[set.__class__](set)
track_elements += elements
track_labels += labels
return track_elements, track_labels
def draw_feature_set(self, set):
"""Return list of feature elements and list of labels for them."""
# print('draw feature set')
feature_elements = [] # Holds diagram elements belonging to the features
label_elements = [] # Holds diagram elements belonging to feature labels
# Collect all the elements for the feature set
for feature in set.get_features():
if self.is_in_bounds(feature.start) or self.is_in_bounds(feature.end):
features, labels = self.draw_feature(feature)
feature_elements += features
label_elements += | |
<gh_stars>1-10
'''
*****************************************************************************************
*
* ===============================================
* Nirikshak Bot (NB) Theme (eYRC 2020-21)
* ===============================================
*
* This script is to implement Task 2B of Nirikshak Bot (NB) Theme (eYRC 2020-21).
*
* This software is made available on an "AS IS WHERE IS BASIS".
* Licensee/end user indemnifies and will keep e-Yantra indemnified from
* any and all claim(s) that emanate from the use of the Software or
* breach of the terms of this agreement.
*
* e-Yantra - An MHRD (now MOE) project under National Mission on Education using ICT (NMEICT)
*
*****************************************************************************************
'''
# Team ID: [NB_2082]
# Author List: [<NAME>]
# Filename: task_2b.py
# Functions: init_remote_api_server, exit_remote_api_server, get_vision_sensor_image,
# transform_vision_sensor_image, send_data
# [ Comma separated list of functions in this file ]
# Global variables: client_id
# [ List of global variables defined in this file ]
####################### IMPORT MODULES #######################
## You are not allowed to make any changes in this section. ##
## You have to implement this task with the available ##
## modules for this task (numpy,opencv,os,sys,platform ##
## traceback and math ) ##
##############################################################
import cv2
import numpy as np
import os, sys, platform
import traceback
import math
import time
##############################################################
# Importing the sim module for Remote API connection with CoppeliaSim
try:
import sim
except Exception:
print('\n[ERROR] It seems the sim.py OR simConst.py files are not found!')
print('\n[WARNING] Make sure to have following files in the directory:')
print('sim.py, simConst.py and appropriate library - remoteApi.dll (if on Windows), remoteApi.so (if on Linux) or remoteApi.dylib (if on Mac).\n')
sys.exit()
# Global variable "client_id" for storing ID of starting the CoppeliaSim Remote connection
# NOTE: DO NOT change the value of this "client_id" variable here
client_id = -1
##############################################################
################# ADD UTILITY FUNCTIONS HERE #################
## You can define any utility functions for your code. ##
## Please add proper comments to ensure that your code is ##
## readable and easy to understand. ##
##############################################################
##############################################################
def init_remote_api_server():
"""
Purpose:
---
This function should first close any open connections and then start
communication thread with server i.e. CoppeliaSim.
NOTE: In this Task, do not call the exit_remote_api_server function in case of failed connection to the server.
The test_task_2a executable script will handle that condition.
Input Arguments:
---
None
Returns:
---
`client_id` : [ integer ]
the client_id generated from start connection remote API, it should be stored in a global variable
Example call:
---
client_id = init_remote_api_server()
NOTE: This function will be automatically called by test_task_2a executable before starting the simulation.
"""
global client_id
############## ADD YOUR CODE HERE ##############
sim.simxFinish(-1)
client_id = sim.simxStart('127.0.0.1',19997,True,True,5000,5)
##################################################
return client_id
def get_vision_sensor_image():
"""
Purpose:
---
This function should first get the handle of the Vision Sensor object from the scene.
After that it should get the Vision Sensor's image array from the CoppeliaSim scene.
Input Arguments:
---
None
Returns:
---
`vision_sensor_image` : [ list ]
the image array returned from the get vision sensor image remote API
`image_resolution` : [ list ]
the image resolution returned from the get vision sensor image remote API
`return_code` : [ integer ]
the return code generated from the remote API
Example call:
---
vision_sensor_image, image_resolution, return_code = get_vision_sensor_image()
NOTE: This function will be automatically called by test_task_2a executable at regular intervals.
"""
global client_id
vision_sensor_image = []
image_resolution = []
return_code = 0
############## ADD YOUR CODE HERE ##############
return_code,v0=sim.simxGetObjectHandle(client_id,'Vision_sensor',sim.simx_opmode_blocking)
return_code, resolution, image = sim.simxGetVisionSensorImage(client_id, v0, 0, sim.simx_opmode_blocking)
vision_sensor_image = np.array(image)
image_resolution = np.array(resolution)
##################################################
return vision_sensor_image, image_resolution, return_code
def transform_vision_sensor_image(vision_sensor_image, image_resolution):
"""
Purpose:
---
Transforms the image data returned by simxGetVisionSensorImage into a numpy
array that is possible to process using OpenCV library.
This function should:
1. First convert the vision_sensor_image list to a NumPy array with data-type as uint8.
2. Since the image returned from Vision Sensor is in the form of a 1-D (one dimensional) array,
the new NumPy array should then be resized to a 3-D (three dimensional) NumPy array.
3. Change the color of the new image array from BGR to RGB.
4. Flip the resultant image array about the X-axis.
The resultant image NumPy array should be returned.
Input Arguments:
---
`vision_sensor_image` : [ list ]
the image array returned from the get vision sensor image remote API
`image_resolution` : [ list ]
the image resolution returned from the get vision sensor image remote API
Returns:
---
`transformed_image` : [ numpy array ]
the resultant transformed image array after performing above 4 steps
that can be processed further using OpenCV library
Example call:
---
transformed_image = transform_vision_sensor_image(vision_sensor_image, image_resolution)
NOTE: This function will be automatically called by test_task_2a executable at regular intervals.
"""
transformed_image = None
############## ADD YOUR CODE HERE ##############
vision_sensor_image = np.uint8(vision_sensor_image)
vision_sensor_image = vision_sensor_image.reshape(image_resolution[0],image_resolution[1],3)
transformed_image = cv2.cvtColor(vision_sensor_image, cv2.COLOR_BGR2RGB)
transformed_image = cv2.flip(vision_sensor_image, 0)
##################################################
return transformed_image
def send_data(maze_array):
"""
Purpose:
---
Sends data to CoppeliaSim via Remote API.
Input Arguments:
---
`maze_array` : [ nested list of lists ]
encoded maze in the form of a 2D array returned by detectMaze() function
Returns:
---
`return_code` : [ integer ]
the return code generated from the call script function remote API
Example call:
---
send_data(maze_array)
NOTE: You might want to study this link to understand simx.callScriptFunction() better
https://www.coppeliarobotics.com/helpFiles/en/remoteApiExtension.htm
"""
global client_id
return_code = -1
#####n######### ADD YOUR CODE HERE ##############
maze_array = np.array(maze_array)
inputInts=maze_array.flatten()
inputFloats=[]
inputStrings=[]
inputBuffer=bytearray()
return_code,retInts,retFloats,retStrings,retBuffer=sim.simxCallScriptFunction(client_id,'Base',sim.sim_scripttype_customizationscript,'receiveData',inputInts,inputFloats,inputStrings,inputBuffer,sim.simx_opmode_blocking)
# if (return_code == sim.simx_return_ok):
# print(return_code,retInts,retFloats,retStrings,retBuffer)
##################################################
return return_code
def exit_remote_api_server():
"""
Purpose:
---
This function should wait for the last command sent to arrive at the Coppeliasim server
before closing the connection and then end the communication thread with server
i.e. CoppeliaSim using simxFinish Remote API.
Input Arguments:
---
None
Returns:
---
None
Example call:
---
exit_remote_api_server()
NOTE: This function will be automatically called by test_task_2a executable after ending the simulation.
"""
global client_id
############## ADD YOUR CODE HERE ##############
sim.simxGetPingTime(client_id)
sim.simxFinish(client_id)
##################################################
# NOTE: YOU ARE NOT ALLOWED TO MAKE ANY CHANGE TO THIS FUNCTION
#
# Function Name: main
# Inputs: None
# Outputs: None
# Purpose: This part of the code is only for testing your solution. The function does the following:
# - takes maze00.jpg as input
# - applies the Perspective transform and encodes the maze in the form of a 2D array by
# calling the detectMaze() function (same as Task 1B)
# - connects with the remote API server (CoppeliaSim scene)
# - starts the simulation
# - receives the output of the Vision Sensor in the CoppeliaSim scene
# - saves the output of vision sensor as an image
# - stops the simulation
# - Disconnects with the remote API server
# It then asks the whether to repeat the same above steps on all maze images present in
# 'test_cases' folder or not. Write your solution ONLY in the space provided in the
# transform_vision_sensor_image() and send_data() functions.
if __name__ == "__main__":
# Import 'task_1b.py' file as module
try:
import task_1b
except ImportError:
print('\n[ERROR] task_1b.py file is not present in the current directory.')
print('Your current directory is: ', os.getcwd())
print('Make sure task_1b.py is present in this current directory.\n')
sys.exit()
except Exception as e:
print('Your task_1b.py throwed an Exception, kindly debug your code!\n')
traceback.print_exc(file=sys.stdout)
sys.exit()
# Initiate the Remote API connection with CoppeliaSim server
print('\nConnection to CoppeliaSim Remote API Server initiated.')
print('Trying to connect to Remote API Server...')
try:
client_id = init_remote_api_server()
if (client_id != -1):
print('\nConnected successfully to Remote API Server in CoppeliaSim!')
else:
print('\n[ERROR] Failed connecting to Remote API server!')
print('[WARNING] Make sure the CoppeliaSim software is running and')
print('[WARNING] Make sure the Port number for Remote API Server is set to 19997.')
print('[ERROR] OR init_remote_api_server function is not configured correctly, check the code!')
print()
sys.exit()
except Exception:
print('\n[ERROR] Your init_remote_api_server function throwed an Exception, kindly debug your code!')
print('Stop the CoppeliaSim simulation manually if started.\n')
traceback.print_exc(file=sys.stdout)
print()
sys.exit()
# Flag to check whether maze array is generated or not, initially set to 0
maze_array_generated_flag = 0
# path directory of images in 'test_cases' folder
img_dir_path = 'test_cases/'
# path directory to 'generated_images' folder
generated_dir_path = 'generated_images/'
# path to 'maze00.jpg' image file
file_num = 0
img_file_path = img_dir_path + 'maze0' + str(file_num) + '.jpg'
if os.path.exists(img_file_path):
# print('\nFound maze0' + str(file_num) + '.jpg')
pass
else:
print('\n[ERROR] maze0' + str(file_num) + '.jpg not found. Make sure "test_cases" folder is present in current directory.')
print('Your current directory is: ', os.getcwd())
sys.exit()
print('\n============================================')
print('\nFor maze0' + str(file_num) + '.jpg')
# read the 'maze00.jpg' image file
input_img = cv2.imread(img_file_path)
if type(input_img) is np.ndarray:
try:
# get the resultant warped maze image after applying Perspective Transform
warped_img = task_1b.applyPerspectiveTransform(input_img)
if type(warped_img) is np.ndarray:
try:
# get the encoded maze in the form of a 2D array
maze_array = task_1b.detectMaze(warped_img)
if (type(maze_array) is list) and (len(maze_array) == 10):
print('\nEncoded Maze Array = %s' % (maze_array))
print('\n============================================')
# Flag for maze array generated updated to 1
maze_array_generated_flag = 1
else:
print('\n[ERROR] maze_array returned by detectMaze function in \'task_1b.py\' is not returning maze array in expected format!, check the code.')
print()
sys.exit()
except Exception:
print('\n[ERROR] Your detectMaze function in \'task_1b.py\' throwed an Exception, kindly debug your code!')
traceback.print_exc(file=sys.stdout)
print()
sys.exit()
else:
print('\n[ERROR] applyPerspectiveTransform function in \'task_1b.py\' is not returning the warped maze image in expected format!, check the code.')
print()
sys.exit()
except Exception:
print('\n[ERROR] Your applyPerspectiveTransform function in \'task_1b.py\' throwed an Exception, kindly debug your code!')
traceback.print_exc(file=sys.stdout)
print()
sys.exit()
else:
print('\n[ERROR] maze0' + str(file_num) + '.jpg was not read correctly, something went wrong!')
print()
sys.exit()
# Check if connected to Remote API server and maze array has been generated successfully
if ((client_id != -1) and (maze_array_generated_flag == 1)):
try:
# Send maze array data to CoppeliaSim via Remote API
return_code = send_data(maze_array)
if | |
be the same length as
the length of `data` and each element will be applied to each shape
otherwise the same value will be used for all shapes.
opacity : float | list
Opacity of the shapes, must be between 0 and 1.
z_index : int | list
Specifier of z order priority. Shapes with higher z order are
displayed ontop of others. If a list is supplied it must be the
same length as the length of `data` and each element will be
applied to each shape otherwise the same value will be used for all
shapes.
"""
if edge_width is None:
edge_width = self.edge_width
if edge_color is None:
edge_color = self.edge_color
if face_color is None:
face_color = self.face_color
if opacity is None:
opacity = self.opacity
if self._data_view is not None:
z_index = z_index or max(self._data_view._z_index, default=-1) + 1
else:
z_index = z_index or 0
if len(data) > 0:
if np.array(data[0]).ndim == 1:
# If a single array for a shape has been passed turn into list
data = [data]
# Turn input arguments into iterables
shape_inputs = zip(
data,
ensure_iterable(shape_type),
ensure_iterable(edge_width),
ensure_iterable(edge_color, color=True),
ensure_iterable(face_color, color=True),
ensure_iterable(opacity),
ensure_iterable(z_index),
)
for d, st, ew, ec, fc, o, z in shape_inputs:
# A False slice_key means the shape is invalid as it is not
# confined to a single plane
shape_cls = shape_classes[ShapeType(st)]
shape = shape_cls(
d,
edge_width=ew,
edge_color=ec,
face_color=fc,
opacity=o,
z_index=z,
dims_order=self.dims.order,
ndisplay=self.dims.ndisplay,
)
# Add shape
self._data_view.add(shape)
self._display_order_stored = copy(self.dims.order)
self._ndisplay_stored = copy(self.dims.ndisplay)
self._update_dims()
def _set_view_slice(self):
"""Set the view given the slicing indices."""
if not self.dims.ndisplay == self._ndisplay_stored:
self.selected_data = []
self._data_view.ndisplay = min(self.dims.ndim, self.dims.ndisplay)
self._ndisplay_stored = copy(self.dims.ndisplay)
self._clipboard = {}
if not self.dims.order == self._display_order_stored:
self.selected_data = []
self._data_view.update_dims_order(self.dims.order)
self._display_order_stored = copy(self.dims.order)
# Clear clipboard if dimensions swap
self._clipboard = {}
slice_key = np.array(self.dims.indices)[list(self.dims.not_displayed)]
if not np.all(slice_key == self._data_view.slice_key):
self.selected_data = []
self._data_view.slice_key = slice_key
self._set_highlight(force=True)
self._update_thumbnail()
self._update_coordinates()
self.events.set_data()
def interaction_box(self, index):
"""Create the interaction box around a shape or list of shapes.
If a single index is passed then the boudning box will be inherited
from that shapes interaction box. If list of indices is passed it will
be computed directly.
Parameters
----------
index : int | list
Index of a single shape, or a list of shapes around which to
construct the interaction box
Returns
----------
box : np.ndarray
10x2 array of vertices of the interaction box. The first 8 points
are the corners and midpoints of the box in clockwise order
starting in the upper-left corner. The 9th point is the center of
the box, and the last point is the location of the rotation handle
that can be used to rotate the box
"""
if isinstance(index, (list, np.ndarray)):
if len(index) == 0:
box = None
elif len(index) == 1:
box = copy(self._data_view.shapes[index[0]]._box)
else:
indices = np.isin(self._data_view.displayed_index, index)
box = create_box(self._data_view.displayed_vertices[indices])
else:
box = copy(self._data_view.shapes[index]._box)
if box is not None:
rot = box[Box.TOP_CENTER]
length_box = np.linalg.norm(
box[Box.BOTTOM_LEFT] - box[Box.TOP_LEFT]
)
if length_box > 0:
r = self._rotation_handle_length * self.scale_factor
rot = (
rot
- r
* (box[Box.BOTTOM_LEFT] - box[Box.TOP_LEFT])
/ length_box
)
box = np.append(box, [rot], axis=0)
return box
def _outline_shapes(self):
"""Find outlines of any selected or hovered shapes.
Returns
----------
vertices : None | np.ndarray
Nx2 array of any vertices of outline or None
triangles : None | np.ndarray
Mx3 array of any indices of vertices for triangles of outline or
None
"""
if self._value[0] is not None or len(self.selected_data) > 0:
if len(self.selected_data) > 0:
index = copy(self.selected_data)
if self._value[0] is not None:
if self._value[0] in index:
pass
else:
index.append(self._value[0])
index.sort()
else:
index = self._value[0]
centers, offsets, triangles = self._data_view.outline(index)
vertices = centers + (
self.scale_factor * self._highlight_width * offsets
)
vertices = vertices[:, ::-1]
else:
vertices = None
triangles = None
return vertices, triangles
def _compute_vertices_and_box(self):
"""Compute location of highlight vertices and box for rendering.
Returns
----------
vertices : np.ndarray
Nx2 array of any vertices to be rendered as Markers
face_color : str
String of the face color of the Markers
edge_color : str
String of the edge color of the Markers and Line for the box
pos : np.ndarray
Nx2 array of vertices of the box that will be rendered using a
Vispy Line
width : float
Width of the box edge
"""
if len(self.selected_data) > 0:
if self._mode == Mode.SELECT:
# If in select mode just show the interaction boudning box
# including its vertices and the rotation handle
box = self._selected_box[Box.WITH_HANDLE]
if self._value[0] is None:
face_color = 'white'
elif self._value[1] is None:
face_color = 'white'
else:
face_color = self._highlight_color
edge_color = self._highlight_color
vertices = box[:, ::-1]
# Use a subset of the vertices of the interaction_box to plot
# the line around the edge
pos = box[Box.LINE_HANDLE][:, ::-1]
width = 1.5
elif self._mode in (
[
Mode.DIRECT,
Mode.ADD_PATH,
Mode.ADD_POLYGON,
Mode.ADD_RECTANGLE,
Mode.ADD_ELLIPSE,
Mode.ADD_LINE,
Mode.VERTEX_INSERT,
Mode.VERTEX_REMOVE,
]
):
# If in one of these mode show the vertices of the shape itself
inds = np.isin(
self._data_view.displayed_index, self.selected_data
)
vertices = self._data_view.displayed_vertices[inds][:, ::-1]
# If currently adding path don't show box over last vertex
if self._mode == Mode.ADD_PATH:
vertices = vertices[:-1]
if self._value[0] is None:
face_color = 'white'
elif self._value[1] is None:
face_color = 'white'
else:
face_color = self._highlight_color
edge_color = self._highlight_color
pos = None
width = 0
else:
# Otherwise show nothing
vertices = np.empty((0, 2))
face_color = 'white'
edge_color = 'white'
pos = None
width = 0
elif self._is_selecting:
# If currently dragging a selection box just show an outline of
# that box
vertices = np.empty((0, 2))
edge_color = self._highlight_color
face_color = 'white'
box = create_box(self._drag_box)
width = 1.5
# Use a subset of the vertices of the interaction_box to plot
# the line around the edge
pos = box[Box.LINE][:, ::-1]
else:
# Otherwise show nothing
vertices = np.empty((0, 2))
face_color = 'white'
edge_color = 'white'
pos = None
width = 0
return vertices, face_color, edge_color, pos, width
def _set_highlight(self, force=False):
"""Render highlights of shapes.
Includes boundaries, vertices, interaction boxes, and the drag
selection box when appropriate.
Parameters
----------
force : bool
Bool that forces a redraw to occur when `True`
"""
# Check if any shape or vertex ids have changed since last call
if (
self.selected_data == self._selected_data_stored
and np.all(self._value == self._value_stored)
and np.all(self._drag_box == self._drag_box_stored)
) and not force:
return
self._selected_data_stored = copy(self.selected_data)
self._value_stored = copy(self._value)
self._drag_box_stored = copy(self._drag_box)
self.events.highlight()
def _finish_drawing(self):
"""Reset properties used in shape drawing."""
index = copy(self._moving_value[0])
self._is_moving = False
self.selected_data = []
self._drag_start = None
self._drag_box = None
self._is_selecting = False
self._fixed_vertex = None
self._value = (None, None)
self._moving_value = (None, None)
if self._is_creating is True and self._mode == Mode.ADD_PATH:
vertices = self._data_view.displayed_vertices[
self._data_view.displayed_index == index
]
if len(vertices) <= 2:
self._data_view.remove(index)
else:
data_full = self.expand_shape(vertices)
self._data_view.edit(index, data_full[:-1])
if self._is_creating is True and self._mode == Mode.ADD_POLYGON:
vertices = self._data_view.displayed_vertices[
self._data_view.displayed_index == index
]
if len(vertices) <= 2:
self._data_view.remove(index)
self._is_creating = False
self._update_dims()
def _update_thumbnail(self):
"""Update thumbnail with current points and colors."""
# calculate min vals for the vertices and pad with 0.5
# the offset is needed to ensure that the top left corner of the shapes
# corresponds to the top left corner of the thumbnail
offset = (
np.array([self.dims.range[d][0] for d in self.dims.displayed])
+ 0.5
)
# calculate range of values for the vertices and pad with 1
# padding ensures the entire shape can be represented in the thumbnail
# without getting clipped
shape = np.ceil(
[
self.dims.range[d][1] - self.dims.range[d][0] + 1
for d in self.dims.displayed
]
).astype(int)
zoom_factor = np.divide(self._thumbnail_shape[:2], shape[-2:]).min()
colormapped = self._data_view.to_colors(
colors_shape=self._thumbnail_shape[:2],
zoom_factor=zoom_factor,
offset=offset[-2:],
)
self.thumbnail = colormapped
def remove_selected(self):
"""Remove any selected shapes."""
to_remove = sorted(self.selected_data, reverse=True)
for index in to_remove:
self._data_view.remove(index)
self.selected_data = []
coord = [self.coordinates[i] for i in self.dims.displayed]
self._finish_drawing()
def _rotate_box(self, angle, center=[0, 0]):
"""Perfrom a rotation on the | |
@mock.patch('requests.request')
def test_get_target_info_by_initiator_negative(
self,
mock_request):
"""Test get target info by initiator."""
mock_request.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfo114Response(),
FakeLoginResponse(),
FakeNoAuthPassedResponse()] + [
FakeLoginResponse(),
FakeNoAuthPassedResponse()] * 4)
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Pool1',
True))
self.driver.do_setup('context')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.api_executor.
get_target_info_by_initiator,
'fakeInitiatorIQN')
@mock.patch('requests.request')
def test_get_target_info_by_initiator_with_wrong_result(
self,
mock_request):
"""Test get target info by initiator."""
mock_request.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfo114Response(),
FakeLoginResponse(),
FakeTargetInfoByInitiatorFail()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Pool1',
True))
self.driver.do_setup('context')
self.driver.api_executor.get_target_info_by_initiator(
'fakeInitiatorIQN')
fake_params = {}
fake_params['func'] = 'extra_get'
fake_params['initiatorIQN'] = 'fakeInitiatorIQN'
fake_params['sid'] = 'fakeSid'
fake_post_params = self.sanitize(fake_params)
get_target_info_url = (
'http://1.2.3.4:8080/cgi-bin/disk/iscsi_portal_setting.cgi?' +
fake_post_params)
expected_call_list = [
mock.call('POST', fake_login_url, data=global_sanitized_params,
headers=header, verify=False),
mock.call('GET', fake_get_basic_info_url, data=None, headers=None,
verify=False),
mock.call('POST', fake_login_url, data=global_sanitized_params,
headers=header, verify=False),
mock.call('GET', get_target_info_url, data=None, headers=None,
verify=False)]
self.assertEqual(expected_call_list, mock_request.call_args_list)
class QnapAPIExecutorTsTestCase(QnapDriverBaseTestCase):
"""Tests QnapAPIExecutorTS."""
@mock.patch('requests.request')
def test_create_lun_positive_with_thin_allocate(
self,
mock_request):
"""Test create lun."""
fake_volume = VolumeClass(
'fakeDisplayName', 'fakeId', 100, 'fakeLunName')
mock_request.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoTsResponse(),
FakeLoginResponse(),
FakeCreateLunResponse()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Storage Pool 1',
True))
self.driver.do_setup('context')
self.assertEqual(
'fakeLunIndex',
self.driver.api_executor.create_lun(
fake_volume, 'fakepool', 'fakeLun', True, False, True, False))
fake_params = {}
fake_params['func'] = 'add_lun'
fake_params['FileIO'] = 'no'
fake_params['LUNThinAllocate'] = '1'
fake_params['LUNName'] = 'fakeLun'
fake_params['LUNPath'] = 'fakeLun'
fake_params['poolID'] = 'fakepool'
fake_params['lv_ifssd'] = 'no'
fake_params['LUNCapacity'] = 100
fake_params['LUNSectorSize'] = '512'
fake_params['lv_threshold'] = '80'
fake_params['sid'] = 'fakeSid'
fake_post_params = self.sanitize(fake_params)
create_lun_url = (
'http://1.2.3.4:8080/cgi-bin/disk/iscsi_lun_setting.cgi?' +
fake_post_params)
expected_call_list = [
mock.call('POST', fake_login_url, data=global_sanitized_params,
headers=header, verify=False),
mock.call('GET', fake_get_basic_info_url, data=None, headers=None,
verify=False),
mock.call('POST', fake_login_url, data=global_sanitized_params,
headers=header, verify=False),
mock.call('GET', create_lun_url, data=None, headers=None,
verify=False)]
self.assertEqual(expected_call_list, mock_request.call_args_list)
@mock.patch('requests.request')
def test_create_lun_positive_without_thin_allocate(
self,
mock_request):
"""Test create lun."""
fake_volume = VolumeClass(
'fakeDisplayName', 'fakeId', 100, 'fakeLunName')
mock_request.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoTsResponse(),
FakeLoginResponse(),
FakeCreateLunResponse()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Storage Pool 1',
True))
self.driver.do_setup('context')
self.assertEqual(
'fakeLunIndex',
self.driver.api_executor.create_lun(
fake_volume, 'fakepool', 'fakeLun', False, False, True, False))
fake_params = {}
fake_params['func'] = 'add_lun'
fake_params['FileIO'] = 'no'
fake_params['LUNThinAllocate'] = '0'
fake_params['LUNName'] = 'fakeLun'
fake_params['LUNPath'] = 'fakeLun'
fake_params['poolID'] = 'fakepool'
fake_params['lv_ifssd'] = 'no'
fake_params['LUNCapacity'] = 100
fake_params['LUNSectorSize'] = '512'
fake_params['lv_threshold'] = '80'
fake_params['sid'] = 'fakeSid'
fake_post_params = self.sanitize(fake_params)
create_lun_url = (
'http://1.2.3.4:8080/cgi-bin/disk/iscsi_lun_setting.cgi?' +
fake_post_params)
expected_call_list = [
mock.call('POST', fake_login_url, data=global_sanitized_params,
headers=header, verify=False),
mock.call('GET', fake_get_basic_info_url, data=None, headers=None,
verify=False),
mock.call('POST', fake_login_url, data=global_sanitized_params,
headers=header, verify=False),
mock.call('GET', create_lun_url, data=None, headers=None,
verify=False)]
self.assertEqual(expected_call_list, mock_request.call_args_list)
@mock.patch('requests.request')
def test_create_lun_negative(
self,
mock_request):
"""Test create lun."""
fake_volume = VolumeClass(
'fakeDisplayName', 'fakeId', 100, 'fakeLunName')
mock_request.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoTsResponse(),
FakeLoginResponse(),
FakeNoAuthPassedResponse()] + [
FakeLoginResponse(),
FakeNoAuthPassedResponse()] * 4)
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Storage Pool 1',
True))
self.driver.do_setup('context')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.api_executor.create_lun,
fake_volume, 'fakepool', 'fakeLun', 'False',
'False', 'True', 'False')
@mock.patch('requests.request')
def test_create_lun_negative_with_wrong_result(
self,
mock_request):
"""Test create lun."""
fake_volume = VolumeClass(
'fakeDisplayName', 'fakeId', 100, 'fakeLunName')
mock_request.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoTsResponse(),
FakeLoginResponse(),
FakeCreateLunFailResponse()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Storage Pool 1',
True))
self.driver.do_setup('context')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.api_executor.create_lun,
fake_volume, 'fakepool', 'fakeLun', 'False',
'False', 'True', 'False')
@mock.patch('requests.request')
def test_delete_lun(
self,
mock_request):
"""Test delete lun."""
mock_request.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoTsResponse(),
FakeLoginResponse(),
FakeCreateLunResponse()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Storage Pool 1',
True))
self.driver.do_setup('context')
self.driver.api_executor.delete_lun('fakeLunIndex')
fake_params = {}
fake_params['func'] = 'remove_lun'
fake_params['run_background'] = '1'
fake_params['ha_sync'] = '1'
fake_params['LUNIndex'] = 'fakeLunIndex'
fake_params['sid'] = 'fakeSid'
fake_post_params = self.sanitize(fake_params)
delete_lun_url = (
'http://1.2.3.4:8080/cgi-bin/disk/iscsi_lun_setting.cgi?' +
fake_post_params)
expected_call_list = [
mock.call('POST', fake_login_url, data=global_sanitized_params,
headers=header, verify=False),
mock.call('GET', fake_get_basic_info_url, data=None, headers=None,
verify=False),
mock.call('POST', fake_login_url, data=global_sanitized_params,
headers=header, verify=False),
mock.call('GET', delete_lun_url, data=None, headers=None,
verify=False)]
self.assertEqual(expected_call_list, mock_request.call_args_list)
@mock.patch('requests.request')
def test_delete_lun_negative(
self,
mock_request):
"""Test delete lun."""
mock_request.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoTsResponse(),
FakeLoginResponse(),
FakeNoAuthPassedResponse()] + [
FakeLoginResponse(),
FakeNoAuthPassedResponse()] * 4)
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Storage Pool 1',
True))
self.driver.do_setup('context')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.api_executor.delete_lun,
'fakeLunIndex')
@mock.patch('requests.request')
def test_delete_lun_negative_with_wrong_result(
self,
mock_request):
"""Test delete lun."""
mock_request.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoTsResponse(),
FakeLoginResponse(),
FakeCreateLunFailResponse()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Storage Pool 1',
True))
self.driver.do_setup('context')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.api_executor.delete_lun,
'fakeLunIndex')
@mock.patch('requests.request')
def test_delete_lun_positive_with_busy_result(
self,
mock_request):
"""Test delete lun."""
mock_request.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoTsResponse(),
FakeLoginResponse(),
FakeCreateLunBusyResponse()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Storage Pool 1',
True))
self.driver.do_setup('context')
self.driver.api_executor.delete_lun('fakeLunIndex')
fake_params = {}
fake_params['func'] = 'remove_lun'
fake_params['run_background'] = '1'
fake_params['ha_sync'] = '1'
fake_params['LUNIndex'] = 'fakeLunIndex'
fake_params['sid'] = 'fakeSid'
fake_post_params = self.sanitize(fake_params)
delete_lun_url = (
'http://1.2.3.4:8080/cgi-bin/disk/iscsi_lun_setting.cgi?' +
fake_post_params)
expected_call_list = [
mock.call('POST', fake_login_url, data=global_sanitized_params,
headers=header, verify=False),
mock.call('GET', fake_get_basic_info_url, data=None, headers=None,
verify=False),
mock.call('POST', fake_login_url, data=global_sanitized_params,
headers=header, verify=False),
mock.call('GET', delete_lun_url, data=None, headers=None,
verify=False)]
self.assertEqual(expected_call_list, mock_request.call_args_list)
@mock.patch('requests.request')
def test_map_lun(
self,
mock_request):
"""Test map lun."""
mock_request.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoTsResponse(),
FakeLoginResponse(),
FakeCreateLunResponse()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Storage Pool 1',
True))
self.driver.do_setup('context')
self.driver.api_executor.map_lun(
'fakeLunIndex', 'fakeTargetIndex')
fake_params = {}
fake_params['func'] = 'add_lun'
fake_params['LUNIndex'] = 'fakeLunIndex'
fake_params['targetIndex'] = 'fakeTargetIndex'
fake_params['sid'] = 'fakeSid'
fake_post_params = self.sanitize(fake_params)
map_lun_url = (
'http://1.2.3.4:8080/cgi-bin/disk/iscsi_target_setting.cgi?' +
fake_post_params)
expected_call_list = [
mock.call('POST', fake_login_url, data=global_sanitized_params,
headers=header, verify=False),
mock.call('GET', fake_get_basic_info_url, data=None, headers=None,
verify=False),
mock.call('POST', fake_login_url, data=global_sanitized_params,
headers=header, verify=False),
mock.call('GET', map_lun_url, data=None, headers=None,
verify=False)]
self.assertEqual(expected_call_list, mock_request.call_args_list)
@mock.patch('requests.request')
def test_map_lun_negative(
self,
mock_request):
"""Test map lun."""
mock_request.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoTsResponse(),
FakeLoginResponse(),
FakeNoAuthPassedResponse()] + [
FakeLoginResponse(),
FakeNoAuthPassedResponse()] * 4)
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Storage Pool 1',
True))
self.driver.do_setup('context')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.api_executor.map_lun,
'fakeLunIndex', 'fakeTargetIndex')
@mock.patch('requests.request')
def test_map_lun_negative_with_wrong_result(
self,
mock_request):
"""Test map lun."""
mock_request.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoTsResponse(),
FakeLoginResponse(),
FakeCreateLunFailResponse()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Storage Pool 1',
True))
self.driver.do_setup('context')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.api_executor.map_lun,
'fakeLunIndex', 'fakeTargetIndex')
@mock.patch('requests.request')
def test_disable_lun(
self,
mock_request):
"""Test disable lun."""
mock_request.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoTsResponse(),
FakeLoginResponse(),
FakeCreateLunResponse()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Storage Pool 1',
True))
self.driver.do_setup('context')
self.driver.api_executor.disable_lun(
'fakeLunIndex', 'fakeTargetIndex')
fake_params = {}
fake_params['func'] = 'edit_lun'
fake_params['LUNIndex'] = 'fakeLunIndex'
fake_params['targetIndex'] = 'fakeTargetIndex'
fake_params['LUNEnable'] = 0
fake_params['sid'] = 'fakeSid'
fake_post_params = self.sanitize(fake_params)
unmap_lun_url = (
'http://1.2.3.4:8080/cgi-bin/disk/iscsi_target_setting.cgi?' +
fake_post_params)
expected_call_list = [
mock.call('POST', fake_login_url, data=global_sanitized_params,
headers=header, verify=False),
mock.call('GET', fake_get_basic_info_url, data=None, headers=None,
verify=False),
mock.call('POST', fake_login_url, data=global_sanitized_params,
headers=header, verify=False),
mock.call('GET', unmap_lun_url, data=None, headers=None,
verify=False)]
self.assertEqual(expected_call_list, mock_request.call_args_list)
@mock.patch('requests.request')
def test_disable_lun_negative(
self,
mock_request):
"""Test disable lun."""
mock_request.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoTsResponse(),
FakeLoginResponse(),
FakeNoAuthPassedResponse()] + [
FakeLoginResponse(),
FakeNoAuthPassedResponse()] * 4)
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Storage Pool 1',
True))
self.driver.do_setup('context')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.api_executor.disable_lun,
'fakeLunIndex', 'fakeTargetIndex')
@mock.patch('requests.request')
def test_disable_lun_negative_with_wrong_result(
self,
mock_request):
"""Test disable lun."""
mock_request.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoTsResponse(),
FakeLoginResponse(),
FakeCreateLunFailResponse()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Storage Pool 1',
True))
self.driver.do_setup('context')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.api_executor.disable_lun,
'fakeLunIndex', 'fakeTargetIndex')
@mock.patch('requests.request')
def test_unmap_lun(
self,
mock_request):
"""Test unmap lun."""
mock_request.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoTsResponse(),
FakeLoginResponse(),
FakeCreateLunResponse()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Storage Pool 1',
True))
self.driver.do_setup('context')
self.driver.api_executor.unmap_lun(
'fakeLunIndex', 'fakeTargetIndex')
fake_params = {}
fake_params['func'] = 'remove_lun'
fake_params['LUNIndex'] = 'fakeLunIndex'
fake_params['targetIndex'] = 'fakeTargetIndex'
fake_params['sid'] = 'fakeSid'
fake_post_params = self.sanitize(fake_params)
unmap_lun_url = (
'http://1.2.3.4:8080/cgi-bin/disk/iscsi_target_setting.cgi?' +
fake_post_params)
expected_call_list = [
mock.call('POST', fake_login_url, data=global_sanitized_params,
headers=header, verify=False),
mock.call('GET', fake_get_basic_info_url, data=None, headers=None,
verify=False),
mock.call('POST', fake_login_url, data=global_sanitized_params,
headers=header, verify=False),
mock.call('GET', unmap_lun_url, data=None, headers=None,
verify=False)]
self.assertEqual(expected_call_list, mock_request.call_args_list)
@mock.patch('requests.request')
def test_unmap_lun_negative(
self,
mock_request):
"""Test unmap lun."""
mock_request.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoTsResponse(),
FakeLoginResponse(),
FakeNoAuthPassedResponse()] + [
FakeLoginResponse(),
FakeNoAuthPassedResponse()] * 4)
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Storage Pool 1',
True))
self.driver.do_setup('context')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.api_executor.unmap_lun,
'fakeLunIndex', 'fakeTargetIndex')
@mock.patch('requests.request')
def test_unmap_lun_negative_with_wrong_result(
self,
mock_request):
"""Test unmap lun."""
mock_request.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoTsResponse(),
FakeLoginResponse(),
FakeCreateLunFailResponse()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Storage Pool 1',
True))
self.driver.do_setup('context')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.api_executor.unmap_lun,
'fakeLunIndex', 'fakeTargetIndex')
@mock.patch('requests.request')
def test_remove_target_init(
self,
mock_request):
"""Test remove target init."""
mock_request.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoTsResponse(),
FakeLoginResponse(),
FakeTargetInfo()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Storage Pool 1',
True))
self.driver.do_setup('context')
self.driver.api_executor.remove_target_init(
'fakeTargetIqn', 'fakeDefaultAcl')
fake_params = {}
fake_params['func'] = 'remove_init'
fake_params['targetIQN'] = 'fakeTargetIqn'
fake_params['initiatorIQN'] = 'fakeDefaultAcl'
fake_params['ha_sync'] = '1'
fake_params['sid'] = 'fakeSid'
fake_post_params = self.sanitize(fake_params)
remove_target_init_url = (
'http://1.2.3.4:8080/cgi-bin/disk/iscsi_target_setting.cgi?' +
fake_post_params)
expected_call_list = [
mock.call('POST', fake_login_url, data=global_sanitized_params,
headers=header, verify=False),
mock.call('GET', fake_get_basic_info_url, data=None, headers=None,
verify=False),
mock.call('POST', fake_login_url, data=global_sanitized_params,
headers=header, verify=False),
mock.call('GET', remove_target_init_url, data=None, headers=None,
verify=False)]
self.assertEqual(expected_call_list, mock_request.call_args_list)
@mock.patch('requests.request')
def test_remove_target_init_negative(
self,
mock_request):
"""Test remove target init."""
mock_request.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoTsResponse(),
FakeLoginResponse(),
FakeNoAuthPassedResponse()] + [
FakeLoginResponse(),
FakeNoAuthPassedResponse()] * 4)
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Storage Pool 1',
True))
self.driver.do_setup('context')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.api_executor.remove_target_init,
'fakeTargetIqn', 'fakeDefaultAcl')
@mock.patch('requests.request')
def test_remove_target_init_negative_with_wrong_result(
self, mock_request):
"""Test remove target init."""
mock_request.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoTsResponse(),
FakeLoginResponse(),
FakeTargetInfoFail()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Storage Pool 1',
True))
self.driver.do_setup('context')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.api_executor.remove_target_init,
| |
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
"""
This is an implementation of a state-emitting MarkovModel. I am using
terminology similar to Manning and Schutze.
Functions:
train_bw Train a markov model using the Baum-Welch algorithm.
train_visible Train a visible markov model using MLE.
find_states Find the a state sequence that explains some observations.
load Load a MarkovModel.
save Save a MarkovModel.
Classes:
MarkovModel Holds the description of a markov model
"""
import numpy
try:
logaddexp = numpy.logaddexp
except AttributeError:
# Numpy versions older than 1.3 do not contain logaddexp.
# Once we require Numpy version 1.3 or later, we should revisit this
# module to see if we can simplify some of the other functions in
# this module.
import warnings
warnings.warn("For optimal speed, please update to Numpy version 1.3 or later (current version is %s)" % numpy.__version__)
def logaddexp(logx, logy):
if logy - logx > 100:
return logy
elif logx - logy > 100:
return logx
minxy = min(logx, logy)
return minxy + numpy.log(numpy.exp(logx - minxy) + numpy.exp(logy - minxy))
def itemindex(values):
d = {}
entries = enumerate(values[::-1])
n = len(values) - 1
for index, key in entries:
d[key] = n - index
return d
numpy.random.seed()
VERY_SMALL_NUMBER = 1E-300
LOG0 = numpy.log(VERY_SMALL_NUMBER)
class MarkovModel(object):
def __init__(self, states, alphabet,
p_initial=None, p_transition=None, p_emission=None):
self.states = states
self.alphabet = alphabet
self.p_initial = p_initial
self.p_transition = p_transition
self.p_emission = p_emission
def __str__(self):
from Bio._py3k import StringIO
handle = StringIO()
save(self, handle)
handle.seek(0)
return handle.read()
def _readline_and_check_start(handle, start):
line = handle.readline()
if not line.startswith(start):
raise ValueError("I expected %r but got %r" % (start, line))
return line
def load(handle):
"""load(handle) -> MarkovModel()"""
# Load the states.
line = _readline_and_check_start(handle, "STATES:")
states = line.split()[1:]
# Load the alphabet.
line = _readline_and_check_start(handle, "ALPHABET:")
alphabet = line.split()[1:]
mm = MarkovModel(states, alphabet)
N, M = len(states), len(alphabet)
# Load the initial probabilities.
mm.p_initial = numpy.zeros(N)
line = _readline_and_check_start(handle, "INITIAL:")
for i in range(len(states)):
line = _readline_and_check_start(handle, " %s:" % states[i])
mm.p_initial[i] = float(line.split()[-1])
# Load the transition.
mm.p_transition = numpy.zeros((N, N))
line = _readline_and_check_start(handle, "TRANSITION:")
for i in range(len(states)):
line = _readline_and_check_start(handle, " %s:" % states[i])
mm.p_transition[i, :] = [float(v) for v in line.split()[1:]]
# Load the emission.
mm.p_emission = numpy.zeros((N, M))
line = _readline_and_check_start(handle, "EMISSION:")
for i in range(len(states)):
line = _readline_and_check_start(handle, " %s:" % states[i])
mm.p_emission[i, :] = [float(v) for v in line.split()[1:]]
return mm
def save(mm, handle):
"""save(mm, handle)"""
# This will fail if there are spaces in the states or alphabet.
w = handle.write
w("STATES: %s\n" % ' '.join(mm.states))
w("ALPHABET: %s\n" % ' '.join(mm.alphabet))
w("INITIAL:\n")
for i in range(len(mm.p_initial)):
w(" %s: %g\n" % (mm.states[i], mm.p_initial[i]))
w("TRANSITION:\n")
for i in range(len(mm.p_transition)):
w(" %s: %s\n" % (mm.states[i], ' '.join(str(x) for x in mm.p_transition[i])))
w("EMISSION:\n")
for i in range(len(mm.p_emission)):
w(" %s: %s\n" % (mm.states[i], ' '.join(str(x) for x in mm.p_emission[i])))
# XXX allow them to specify starting points
def train_bw(states, alphabet, training_data,
pseudo_initial=None, pseudo_transition=None, pseudo_emission=None,
update_fn=None,
):
"""train_bw(states, alphabet, training_data[, pseudo_initial]
[, pseudo_transition][, pseudo_emission][, update_fn]) -> MarkovModel
Train a MarkovModel using the Baum-Welch algorithm. states is a list
of strings that describe the names of each state. alphabet is a
list of objects that indicate the allowed outputs. training_data
is a list of observations. Each observation is a list of objects
from the alphabet.
pseudo_initial, pseudo_transition, and pseudo_emission are
optional parameters that you can use to assign pseudo-counts to
different matrices. They should be matrices of the appropriate
size that contain numbers to add to each parameter matrix, before
normalization.
update_fn is an optional callback that takes parameters
(iteration, log_likelihood). It is called once per iteration.
"""
N, M = len(states), len(alphabet)
if not training_data:
raise ValueError("No training data given.")
if pseudo_initial is not None:
pseudo_initial = numpy.asarray(pseudo_initial)
if pseudo_initial.shape != (N,):
raise ValueError("pseudo_initial not shape len(states)")
if pseudo_transition is not None:
pseudo_transition = numpy.asarray(pseudo_transition)
if pseudo_transition.shape != (N, N):
raise ValueError("pseudo_transition not shape " +
"len(states) X len(states)")
if pseudo_emission is not None:
pseudo_emission = numpy.asarray(pseudo_emission)
if pseudo_emission.shape != (N, M):
raise ValueError("pseudo_emission not shape " +
"len(states) X len(alphabet)")
# Training data is given as a list of members of the alphabet.
# Replace those with indexes into the alphabet list for easier
# computation.
training_outputs = []
indexes = itemindex(alphabet)
for outputs in training_data:
training_outputs.append([indexes[x] for x in outputs])
# Do some sanity checking on the outputs.
lengths = [len(x) for x in training_outputs]
if min(lengths) == 0:
raise ValueError("I got training data with outputs of length 0")
# Do the training with baum welch.
x = _baum_welch(N, M, training_outputs,
pseudo_initial=pseudo_initial,
pseudo_transition=pseudo_transition,
pseudo_emission=pseudo_emission,
update_fn=update_fn)
p_initial, p_transition, p_emission = x
return MarkovModel(states, alphabet, p_initial, p_transition, p_emission)
MAX_ITERATIONS = 1000
def _baum_welch(N, M, training_outputs,
p_initial=None, p_transition=None, p_emission=None,
pseudo_initial=None, pseudo_transition=None,
pseudo_emission=None, update_fn=None):
# Returns (p_initial, p_transition, p_emission)
if p_initial is None:
p_initial = _random_norm(N)
else:
p_initial = _copy_and_check(p_initial, (N,))
if p_transition is None:
p_transition = _random_norm((N, N))
else:
p_transition = _copy_and_check(p_transition, (N, N))
if p_emission is None:
p_emission = _random_norm((N, M))
else:
p_emission = _copy_and_check(p_emission, (N, M))
# Do all the calculations in log space to avoid underflows.
lp_initial = numpy.log(p_initial)
lp_transition = numpy.log(p_transition)
lp_emission = numpy.log(p_emission)
if pseudo_initial is not None:
lpseudo_initial = numpy.log(pseudo_initial)
else:
lpseudo_initial = None
if pseudo_transition is not None:
lpseudo_transition = numpy.log(pseudo_transition)
else:
lpseudo_transition = None
if pseudo_emission is not None:
lpseudo_emission = numpy.log(pseudo_emission)
else:
lpseudo_emission = None
# Iterate through each sequence of output, updating the parameters
# to the HMM. Stop when the log likelihoods of the sequences
# stops varying.
prev_llik = None
for i in range(MAX_ITERATIONS):
llik = LOG0
for outputs in training_outputs:
x = _baum_welch_one(
N, M, outputs,
lp_initial, lp_transition, lp_emission,
lpseudo_initial, lpseudo_transition, lpseudo_emission,)
llik += x
if update_fn is not None:
update_fn(i, llik)
if prev_llik is not None and numpy.fabs(prev_llik - llik) < 0.1:
break
prev_llik = llik
else:
raise RuntimeError("HMM did not converge in %d iterations"
% MAX_ITERATIONS)
# Return everything back in normal space.
return [numpy.exp(x) for x in (lp_initial, lp_transition, lp_emission)]
def _baum_welch_one(N, M, outputs,
lp_initial, lp_transition, lp_emission,
lpseudo_initial, lpseudo_transition, lpseudo_emission):
# Do one iteration of Baum-Welch based on a sequence of output.
# NOTE: This will change the values of lp_initial, lp_transition,
# and lp_emission in place.
T = len(outputs)
fmat = _forward(N, T, lp_initial, lp_transition, lp_emission, outputs)
bmat = _backward(N, T, lp_transition, lp_emission, outputs)
# Calculate the probability of traversing each arc for any given
# transition.
lp_arc = numpy.zeros((N, N, T))
for t in range(T):
k = outputs[t]
lp_traverse = numpy.zeros((N, N)) # P going over one arc.
for i in range(N):
for j in range(N):
# P(getting to this arc)
# P(making this transition)
# P(emitting this character)
# P(going to the end)
lp = (fmat[i][t] +
lp_transition[i][j] +
lp_emission[i][k] +
bmat[j][t + 1])
lp_traverse[i][j] = lp
# Normalize the probability for this time step.
lp_arc[:, :, t] = lp_traverse - _logsum(lp_traverse)
# Sum of all the transitions out of state i at time t.
lp_arcout_t = numpy.zeros((N, T))
for t in range(T):
for i in range(N):
lp_arcout_t[i][t] = _logsum(lp_arc[i, :, t])
# Sum of all the transitions out of state i.
lp_arcout = numpy.zeros(N)
for i in range(N):
lp_arcout[i] = _logsum(lp_arcout_t[i, :])
# UPDATE P_INITIAL.
lp_initial = lp_arcout_t[:, 0]
if lpseudo_initial is not None:
lp_initial = _logvecadd(lp_initial, lpseudo_initial)
lp_initial = lp_initial - _logsum(lp_initial)
# UPDATE P_TRANSITION. p_transition[i][j] is the sum of all the
# transitions from i to j, normalized by the sum of the
# transitions out of i.
for i in range(N):
for j in range(N):
lp_transition[i][j] = _logsum(lp_arc[i, j, :]) - lp_arcout[i]
if lpseudo_transition is not None:
lp_transition[i] = _logvecadd(lp_transition[i], lpseudo_transition)
lp_transition[i] = lp_transition[i] - _logsum(lp_transition[i])
# UPDATE P_EMISSION. lp_emission[i][k] is the sum of all the
# transitions out of i when k is observed, divided by the sum of
# the transitions out of i.
for i in range(N):
ksum = | |
<filename>tests/integration/sts/topology/graph_test.py
# Copyright 2014 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from pox.openflow.libopenflow_01 import ofp_phy_port
from sts.entities.hosts import Host
from sts.entities.hosts import HostInterface
from sts.entities.sts_entities import AccessLink
from sts.entities.sts_entities import Link
from sts.entities.sts_entities import FuzzSoftwareSwitch
from sts.topology.graph import Graph
from sts.topology.graph import TopologyGraph
class GraphTest(unittest.TestCase):
"""
Testing sts.topology.base.Graph
"""
def test_init(self):
# Arrange
vertices = {1: None, 2: {'a': 'b'}, 3: None}
edges = {1: {1: {}, 2: {'a': 'b'}}, 3: {1: None}}
# Act
graph1 = Graph()
graph2 = Graph(vertices, edges)
# Assert
self.assertEquals(len(graph1.vertices), 0)
self.assertEquals(len(graph1.edges), 0)
self.assertEquals(len(graph2.vertices), len(vertices))
self.assertEquals(len(graph2.edges), 3)
self.assertEquals(graph2.vertices[1], {})
self.assertEquals(graph2.vertices[2], vertices[2])
self.assertEquals(graph2.vertices[3], {})
self.assertEquals(graph2.edges[0], (1, 1, {}))
self.assertEquals(graph2.edges[1], (1, 2, edges[1][2]))
self.assertEquals(graph2.edges[2], (3, 1, {}))
def test_add_vertex(self):
# Arrange
vertices = {1: None, 2: {'a': 'b'}, 3: None}
edges = {1: {1: {}, 2: {'a': 'b'}}, 3: {1: None}}
# Act
graph = Graph(vertices, edges)
graph.add_vertex(4, c='d')
graph.add_vertex(5)
# Assert
self.assertEquals(len(graph.vertices), len(vertices) + 2)
self.assertEquals(len(graph.edges), 3)
self.assertEquals(graph.vertices[1], {})
self.assertEquals(graph.vertices[2], vertices[2])
self.assertEquals(graph.vertices[3], {})
self.assertEquals(graph.vertices[4], {'c': 'd'})
self.assertEquals(graph.vertices[5], {})
self.assertTrue(graph.has_vertex(1))
self.assertTrue(graph.has_vertex(2))
self.assertTrue(graph.has_vertex(3))
self.assertTrue(graph.has_vertex(4))
self.assertTrue(graph.has_vertex(5))
self.assertFalse(graph.has_vertex(6))
def test_has_vertex(self):
# Arrange
vertices = {1: None, 2: {'a': 'b'}, 3: None}
edges = {1: {1: {}, 2: {'a': 'b'}}, 3: {1: None}}
# Act
graph = Graph(vertices, edges)
graph.add_vertex(4, c='d')
graph.add_vertex(5)
# Assert
self.assertTrue(graph.has_vertex(1))
self.assertTrue(graph.has_vertex(2))
self.assertTrue(graph.has_vertex(3))
self.assertTrue(graph.has_vertex(4))
self.assertTrue(graph.has_vertex(5))
self.assertFalse(graph.has_vertex(6))
def test_edges_iter(self):
# Arrange
edges = {1: {1: {}, 2: {'a': 'b'}}, 3: {1: {}}}
graph = Graph(vertices=None, edges=edges)
# Act
edges1 = list(graph.edges_iter(include_attrs=False))
edges2 = list(graph.edges_iter(include_attrs=True))
# Assert
for edge in edges1:
self.assertEquals(len(edge), 2)
self.assertIn(edge[0], edges)
self.assertIn(edge[1], edges[edge[0]])
for edge in edges2:
self.assertIn(edge[0], edges)
self.assertIn(edge[1], edges[edge[0]])
self.assertEquals(edges[edge[0]][edge[1]], edge[2])
def test_edges_iter_with_check(self):
# Arrange
edges = {1: {1: {}, 2: {'a': 'b'}}, 3: {1: {}}}
graph = Graph(vertices=None, edges=edges)
check = lambda v1, v2, attrs: attrs.get('a', None) is not None
# Act
edges1 = list(graph.edges_iter_with_check(check, include_attrs=False))
edges2 = list(graph.edges_iter_with_check(check, include_attrs=True))
# Assert
self.assertEquals(edges1, [(1, 2)])
self.assertEquals(edges2, [(1, 2, {'a': 'b'})])
def test_vertices_iter(self):
# Arrange
vertices = {1: None, 2: {'a': 'b'}, 3: None, 4: None, 5: None}
graph = Graph(vertices)
# Act
vertices1 = list(graph.vertices_iter(include_attrs=False))
vertices2 = list(graph.vertices_iter(include_attrs=True))
# Assert
for vertex in vertices1:
self.assertTrue(vertex in vertices)
for vertex, value in vertices2:
value = value if value != {} else None
self.assertEquals(vertices[vertex], value)
def test_vertices_iter_with_check(self):
# Arrange
vertices = {1: None, 2: {'a': 'b'}, 3: None, 4: None, 5: None}
graph = Graph(vertices)
check = lambda v, attrs: attrs.get('a', None) is not None
# Act
vertices1 = list(graph.vertices_iter_with_check(check, include_attrs=False))
vertices2 = list(graph.vertices_iter_with_check(check, include_attrs=True))
# Assert
self.assertEquals(vertices1, [2])
self.assertEquals(vertices2, [(2, vertices[2])])
def test_add_edge(self):
# Arrange
vertices = {1: None, 2: {'a': 'b'}, 3: None}
edges = {1: {1: {}, 2: {'a': 'b'}}, 3: {1: None}}
expected = [(1, 1, {}), (1, 1, {}), (1, 2, edges[1][2]), (3, 1, {}),
(1, 3, {}), (1, 4, {'c': 'd'})]
# Act
graph = Graph(vertices, edges)
graph.add_edge(1, 3)
graph.add_edge(1, 4, c='d')
# Assert
self.assertEquals(len(graph.vertices), len(vertices) + 1)
self.assertEquals(len(graph.edges), 3 + 2)
self.assertEquals(graph.vertices[1], {})
self.assertEquals(graph.vertices[2], vertices[2])
self.assertEquals(graph.vertices[3], {})
self.assertEquals(graph.vertices[4], {})
self.assertTrue(graph.has_edge(1, 2))
self.assertFalse(graph.has_edge(2, 4))
self.assertFalse(graph.has_edge(9, 6))
for edge in expected:
self.assertTrue(edge in graph.edges)
def test_remove_edge(self):
# Arrange
graph = Graph()
edge1 = graph.add_edge(1, 2)
edge2 = graph.add_edge(2, 3)
edge3 = graph.add_edge(2, 4)
# Act
graph.remove_edge(*edge1)
graph.remove_edge(*edge2)
# Assert
self.assertRaises(AssertionError, graph.remove_edge, 10, 20)
self.assertFalse(graph.has_edge(*edge1))
self.assertFalse(graph.has_edge(*edge2))
self.assertTrue(graph.has_edge(*edge3))
def test_remove_vertex(self):
# Arrange
graph = Graph()
v1, v2, v3, v4, v5, v6, v7 = 1, 2, 3, 4, 5, 6, 7
graph.add_vertex(v1)
graph.add_vertex(v2)
graph.add_vertex(v3)
graph.add_vertex(v4)
graph.add_vertex(v5)
graph.add_vertex(v6)
graph.add_vertex(v7)
e1 = graph.add_edge(v1, v2)
e2 = graph.add_edge(v3, v4)
e3 = graph.add_edge(v3, v5)
# Act
graph.remove_vertex(v1, remove_edges=True)
graph.remove_vertex(v6, remove_edges=False)
self.assertRaises(AssertionError, graph.remove_vertex, v3, remove_edges=False)
graph.remove_vertex(v3, remove_edges=True)
# Assert
self.assertFalse(graph.has_vertex(v1))
self.assertTrue(graph.has_vertex(v2))
self.assertFalse(graph.has_vertex(v3))
self.assertTrue(graph.has_vertex(v4))
self.assertTrue(graph.has_vertex(v5))
self.assertFalse(graph.has_vertex(v6))
self.assertTrue(graph.has_vertex(v7))
self.assertFalse(graph.has_edge(*e1))
self.assertFalse(graph.has_edge(*e2))
self.assertFalse(graph.has_edge(*e3))
def test_edges_src(self):
# Arrange
v1, v2, v3, v4 = 1, 2, 3, 4
g = Graph()
e1 = g.add_edge(v1, v2)
e2 = g.add_edge(v2, v3)
e3 = g.add_edge(v2, v4)
# Act
v1_src = g.edges_src(v1)
v2_src = g.edges_src(v2)
v3_src = g.edges_src(v3)
# Assert
self.assertItemsEqual([e1], v1_src)
self.assertItemsEqual([e2, e3], v2_src)
self.assertItemsEqual([], v3_src)
def test_edges_dst(self):
# Arrange
v1, v2, v3, v4 = 1, 2, 3, 4
g = Graph()
e1 = g.add_edge(v1, v2)
e2 = g.add_edge(v1, v3)
e3 = g.add_edge(v2, v3)
g.add_vertex(v4)
# Act
v1_dst = g.edges_dst(v1)
v2_dst = g.edges_dst(v2)
v3_dst = g.edges_dst(v3)
v4_dst = g.edges_dst(v4)
# Assert
self.assertItemsEqual([], v1_dst)
self.assertItemsEqual([e1], v2_dst)
self.assertItemsEqual([e2, e3], v3_dst)
self.assertEquals(v4_dst, [])
class TopologyGraphTest(unittest.TestCase):
def test_init(self):
# Arrange
h1_eth1 = HostInterface(hw_addr='11:22:33:44:55:66', ip_or_ips='10.0.0.1')
h2_eth1 = HostInterface(hw_addr='11:22:33:44:55:77', ip_or_ips='10.0.0.2')
h1 = Host(h1_eth1, hid=1)
h2 = Host(h2_eth1, hid=2)
hosts = [h1, h2]
interfaces = [h1_eth1, h2_eth1]
s1 = FuzzSoftwareSwitch(1, 's1', ports=3)
s2 = FuzzSoftwareSwitch(2, 's2', ports=3)
s3 = FuzzSoftwareSwitch(3, 's3', ports=3)
switches = [s1, s2, s3]
ports = s1.ports.values() + s2.ports.values() + s3.ports.values()
l1 = Link(s1, s1.ports[1], s2, s2.ports[1])
l2 = Link(s1, s1.ports[2], s2, s2.ports[2])
l3 = AccessLink(h1, h1_eth1, s1, s1.ports[3])
l4 = AccessLink(h2, h2_eth1, s1, s2.ports[3])
links = [l1, l2, l3, l4]
# Act
graph = TopologyGraph(hosts, switches, links)
# Assert
self.assertItemsEqual(hosts, graph.hosts)
self.assertItemsEqual(switches, graph.switches)
self.assertItemsEqual(links, graph.links)
self.assertItemsEqual(interfaces, graph.interfaces)
self.assertItemsEqual(ports, graph.ports)
def test_add_host(self):
# Arrange
h1_eth1 = HostInterface(hw_addr='11:22:33:44:55:66', ip_or_ips='10.0.0.1')
h2_eth1 = HostInterface(hw_addr='11:22:33:44:55:77', ip_or_ips='10.0.0.2')
h1 = Host(h1_eth1, hid=1)
h2 = Host(h2_eth1, hid=2)
h3 = Host(None, hid=3)
graph = TopologyGraph()
# Act
graph.add_host(h1)
graph.add_host(h2)
# Assert
self.assertItemsEqual([h1.name, h2.name], list(graph.hosts_iter(False)))
self.assertTrue(graph.has_host(h1.name))
self.assertTrue(graph.has_host(h2.name))
self.assertFalse(graph.has_host(h3.name))
def test_remove_host(self):
# Arrange
h1_eth1 = HostInterface(hw_addr='11:22:33:44:55:66', ip_or_ips='10.0.0.1')
h2_eth1 = HostInterface(hw_addr='11:22:33:44:55:77', ip_or_ips='10.0.0.2')
h1 = Host(h1_eth1, hid=1)
h2 = Host(h2_eth1, hid=2)
h3 = Host(None, hid=3)
graph = TopologyGraph()
graph.add_host(h1)
graph.add_host(h2)
# Act
graph.remove_host(h1.name)
graph.remove_host(h2.name)
remove_h3 = lambda: graph.remove_host(h3.name)
# Assert
self.assertRaises(AssertionError, remove_h3)
self.assertFalse(graph.hosts)
self.assertFalse(graph.has_host(h1.name))
self.assertFalse(graph.has_host(h2.name))
self.assertFalse(graph.has_host(h3.name))
def test_add_switch(self):
# Arrange
s1 = FuzzSoftwareSwitch(1, 's1', ports=1)
s2 = FuzzSoftwareSwitch(2, 's2', ports=1)
s3 = FuzzSoftwareSwitch(3, 's3', ports=1)
graph = TopologyGraph()
# Act
graph.add_switch(s1)
graph.add_switch(s2)
# Assert
self.assertItemsEqual([s1.name, s2.name], list(graph.switches_iter(False)))
self.assertTrue(graph.has_switch(s1.name))
self.assertTrue(graph.has_switch(s2.name))
self.assertFalse(graph.has_switch(s3.name))
def test_remove_switch(self):
# Arrange
s1 = FuzzSoftwareSwitch(1, 's1', ports=1)
s2 = FuzzSoftwareSwitch(2, 's2', ports=1)
s3 = FuzzSoftwareSwitch(3, 's3', ports=1)
graph = TopologyGraph()
graph.add_switch(s1)
graph.add_switch(s2)
# Act
graph.remove_switch(s1.name)
graph.remove_switch(s2)
remove_s3 = lambda: graph.remove_switch(s3.dpid)
# Assert
self.assertRaises(AssertionError, remove_s3)
self.assertFalse(graph.switches)
self.assertFalse(graph.has_host(s1.dpid))
self.assertFalse(graph.has_host(s2.dpid))
self.assertFalse(graph.has_host(s3.dpid))
def test_add_link(self):
# Arrange
s1 = FuzzSoftwareSwitch(1, 's1', ports=2)
s2 = FuzzSoftwareSwitch(2, 's2', ports=2)
l1 = Link(s1, s1.ports[1], s2, s2.ports[1])
l2 = Link(s1, ofp_phy_port(), s2, s2.ports[2])
graph = TopologyGraph()
graph.add_switch(s1)
graph.add_switch( s2)
# Act
link = graph.add_link(l1)
fail_add = lambda: graph.add_link(l2)
# Assert
self.assertEquals(link, l1)
self.assertTrue(graph.has_link(l1))
self.assertFalse(graph.has_link(l2))
self.assertIsNotNone(graph.get_link('s1-1', 's2-1'))
self.assertIsNone(graph.get_link('s1-2', 's2-2'))
self.assertRaises(AssertionError, fail_add)
def test_remove_link(self):
# Arrange
s1 = FuzzSoftwareSwitch(1, 's1', ports=4)
s2 = FuzzSoftwareSwitch(2, 's2', ports=4)
l1 = Link(s1, s1.ports[1], s2, s2.ports[1])
l2 = Link(s1, s1.ports[2], s2, s2.ports[2])
l3 = Link(s1, s1.ports[3], s2, s2.ports[3])
l4 = Link(s1, s1.ports[4], s2, s2.ports[4])
graph = TopologyGraph()
graph.add_switch(s1)
graph.add_switch(s2)
graph.add_link(l1)
graph.add_link(l2, bidir=l2)
graph.add_link(l3)
# Act
graph.remove_link(l1)
graph.remove_link(l2)
fail_remove = lambda: graph.remove_link(l4)
# Assert
self.assertFalse(graph.has_link(l1))
self.assertFalse(graph.has_link(l2))
self.assertTrue(graph.has_link(l3))
self.assertIsNone(graph.get_link("s1-1", "s2-1"))
self.assertIsNone(graph.get_link("s1-2", "s2-2"))
self.assertIsNotNone(graph.get_link("s1-3", "s2-3"))
self.assertRaises(AssertionError, fail_remove)
def test_get_host_links(self):
# Arrange
h1_eth1 = HostInterface(hw_addr='11:22:33:44:55:66', ip_or_ips='10.0.0.1')
h1_eth2 = HostInterface(hw_addr='11:22:33:44:55:67', ip_or_ips='10.0.0.2')
h2_eth1 = HostInterface(hw_addr='11:22:33:44:55:77', ip_or_ips='10.0.0.3')
h1 = Host([h1_eth1, h1_eth2], hid=1)
h2 = Host(h2_eth1, hid=2)
s1 = FuzzSoftwareSwitch(1, 's1', ports=3)
s2 = FuzzSoftwareSwitch(2, 's2', ports=3)
l1 = AccessLink(h1, h1_eth1, s1, s1.ports[1])
l2 = AccessLink(h1, h1_eth2, s2, s2.ports[1])
l3 = AccessLink(h2, h2_eth1, s2, s2.ports[2])
l4 = Link(s1, s1.ports[3], s2, s2.ports[2])
graph = TopologyGraph()
graph.add_switch(s1)
graph.add_switch(s2)
graph.add_host(h1)
graph.add_host(h2)
graph.add_link(l1)
graph.add_link(l2)
graph.add_link(l3)
graph.add_link(l4)
# Act
h1_links = graph.get_host_links(h1)
h2_links = graph.get_host_links(h2)
# Assert
self.assertItemsEqual([l1, l2], h1_links)
self.assertItemsEqual([l3], h2_links)
def test_get_switches_links(self):
# Arrange
h1_eth1 = HostInterface(hw_addr='11:22:33:44:55:66', ip_or_ips='10.0.0.1')
h1_eth2 = HostInterface(hw_addr='11:22:33:44:55:67', ip_or_ips='10.0.0.2')
h2_eth1 = HostInterface(hw_addr='11:22:33:44:55:77', ip_or_ips='10.0.0.3')
h1 = Host([h1_eth1, h1_eth2], hid=1)
h2 = Host(h2_eth1, hid=2)
s1 = FuzzSoftwareSwitch(1, 's1', ports=3)
s2 = FuzzSoftwareSwitch(2, 's2', ports=3)
l1 = AccessLink(h1, h1_eth1, s1, s1.ports[1])
l2 = AccessLink(h1, h1_eth2, s2, s2.ports[1])
l3 = AccessLink(h2, h2_eth1, s2, s2.ports[2])
l4 | |
False
# -------------------------------------------------------------------------
def as_dict(self, options=None):
sublayers = self.sublayers
if sublayers:
sublayer = sublayers[0]
name = s3_str(current.T(sublayer.name))
name_safe = re.sub("'", "", name)
ldict = {"name": name_safe,
"id": sublayer.layer_id,
}
if sublayer._base:
ldict["base"] = True
if options:
# Used by Map._setup()
options[self.dictname] = ldict
else:
ldict = None
# Used by as_json() and hence as_javascript()
return ldict
# -----------------------------------------------------------------------------
class LayerFeature(Layer):
"""
Feature Layers from Catalogue
"""
tablename = "gis_layer_feature"
dictname = "layers_feature"
style = True
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def __init__(self, record, openlayers):
controller = record.controller
self.skip = False
if controller is not None:
if controller not in current.deployment_settings.modules:
# Module is disabled
self.skip = True
if not current.auth.permission.has_permission("read",
c=controller,
f=record.function):
# User has no permission to this resource (in ACL)
self.skip = True
else:
error = "Feature Layer Record '%s' has no controller" % \
record.name
raise Exception(error)
super(LayerFeature.SubLayer, self).__init__(record, openlayers)
def as_dict(self):
if self.skip:
# Skip layer
return None
# @ToDo: Option to force all filters via POST?
if self.aggregate:
# id is used for url_format
url = "%s.geojson?layer=%i&show_ids=true" % \
(URL(c=self.controller, f=self.function, args="report"),
self.layer_id)
# Use gis/location controller in all reports
url_format = "%s/{id}.plain" % URL(c="gis", f="location")
else:
if self.use_site:
maxdepth = 1
else:
maxdepth = 0
_url = URL(self.controller, self.function)
# id is used for url_format
url = "%s.geojson?layer=%i&mcomponents=None&maxdepth=%s&show_ids=true" % \
(_url,
self.layer_id,
maxdepth)
url_format = "%s/{id}.plain" % _url
if self.filter:
url = "%s&%s" % (url, self.filter)
if self.trackable:
url = "%s&track=1" % url
# Mandatory attributes
output = {"id": self.layer_id,
# Defaults client-side if not-provided
#"type": "feature",
"name": self.safe_name,
"url_format": url_format,
"url": url,
}
popup_format = self.popup_format
if popup_format:
# New-style
if "T(" in popup_format:
# i18n
T = current.T
items = regex_translate.findall(popup_format)
for item in items:
titem = str(T(item[1:-1]))
popup_format = popup_format.replace("T(%s)" % item,
titem)
output["popup_format"] = popup_format
else:
# @ToDo: Deprecate
popup_fields = self.popup_fields
if popup_fields:
# Old-style
popup_label = self.popup_label
if popup_label:
popup_format = "{%s} (%s)" % (popup_fields[0],
current.T(popup_label))
else:
popup_format = "%s" % popup_fields[0]
for f in popup_fields[1:]:
popup_format = "%s<br/>{%s}" % (popup_format, f)
output["popup_format"] = popup_format or ""
# Attributes which are defaulted client-side if not set
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
if self.aggregate:
# Enable the Cluster Strategy, so that it can be enabled/disabled
# depending on the zoom level & hence Points or Polygons
output["cluster"] = 1
if not popup_format:
# Need this to differentiate from e.g. FeatureQueries
output["no_popups"] = 1
if self.style:
output["style"] = self.style
else:
self.marker.add_attributes_to_output(output)
return output
# -----------------------------------------------------------------------------
class LayerGeoJSON(Layer):
"""
GeoJSON Layers from Catalogue
"""
tablename = "gis_layer_geojson"
dictname = "layers_geojson"
style = True
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
# Mandatory attributes
output = {"id": self.layer_id,
"type": "geojson",
"name": self.safe_name,
"url": self.url,
}
# Attributes which are defaulted client-side if not set
projection = self.projection
if projection.epsg != 4326:
output["projection"] = projection.epsg
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
if self.style:
output["style"] = self.style
else:
self.marker.add_attributes_to_output(output)
popup_format = self.popup_format
if popup_format:
if "T(" in popup_format:
# i18n
T = current.T
items = regex_translate.findall(popup_format)
for item in items:
titem = str(T(item[1:-1]))
popup_format = popup_format.replace("T(%s)" % item,
titem)
output["popup_format"] = popup_format
return output
# -----------------------------------------------------------------------------
class LayerGeoRSS(Layer):
"""
GeoRSS Layers from Catalogue
"""
tablename = "gis_layer_georss"
dictname = "layers_georss"
style = True
def __init__(self, all_layers, openlayers=6):
super(LayerGeoRSS, self).__init__(all_layers, openlayers)
LayerGeoRSS.SubLayer.cachetable = current.s3db.gis_cache
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
db = current.db
request = current.request
response = current.response
cachetable = self.cachetable
url = self.url
# Check to see if we should Download layer to the cache
download = True
query = (cachetable.source == url)
existing_cached_copy = db(query).select(cachetable.modified_on,
limitby = (0, 1)).first()
refresh = self.refresh or 900 # 15 minutes set if we have no data (legacy DB)
if existing_cached_copy:
modified_on = existing_cached_copy.modified_on
cutoff = modified_on + datetime.timedelta(seconds=refresh)
if request.utcnow < cutoff:
download = False
if download:
# Download layer to the Cache
from gluon.tools import fetch
# @ToDo: Call directly without going via HTTP
# @ToDo: Make this async by using S3Task (also use this for the refresh time)
fields = ""
if self.data:
fields = "&data_field=%s" % self.data
if self.image:
fields = "%s&image_field=%s" % (fields, self.image)
_url = "%s%s/update.georss?fetchurl=%s%s" % (current.deployment_settings.get_base_public_url(),
URL(c="gis", f="cache_feed"),
url,
fields)
# Keep Session for local URLs
cookie = Cookie.SimpleCookie()
cookie[response.session_id_name] = response.session_id
current.session._unlock(response)
try:
# @ToDo: Need to commit to not have DB locked with SQLite?
fetch(_url, cookie=cookie)
if existing_cached_copy:
# Clear old selfs which are no longer active
query = (cachetable.source == url) & \
(cachetable.modified_on < cutoff)
db(query).delete()
except Exception as exception:
current.log.error("GeoRSS %s download error" % url, exception)
# Feed down
if existing_cached_copy:
# Use cached copy
# Should we Update timestamp to prevent every
# subsequent request attempting the download?
#query = (cachetable.source == url)
#db(query).update(modified_on=request.utcnow)
pass
else:
response.warning += "%s down & no cached copy available" % url
name_safe = self.safe_name
# Pass the GeoJSON URL to the client
# Filter to the source of this feed
url = "%s.geojson?cache.source=%s" % (URL(c="gis", f="cache_feed"),
url)
# Mandatory attributes
output = {"id": self.layer_id,
"type": "georss",
"name": name_safe,
"url": url,
}
self.marker.add_attributes_to_output(output)
# Attributes which are defaulted client-side if not set
if self.refresh != 900:
output["refresh"] = self.refresh
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
return output
# -----------------------------------------------------------------------------
class LayerGoogle(Layer):
"""
Google Layers/Tools from Catalogue
"""
tablename = "gis_layer_google"
dictname = "Google"
style = False
# -------------------------------------------------------------------------
def as_dict(self, options=None):
sublayers = self.sublayers
if sublayers:
T = current.T
spherical_mercator = (Projection().epsg == 900913)
settings = current.deployment_settings
apikey = settings.get_gis_api_google()
s3 = current.response.s3
debug = s3.debug
# Google scripts use document.write so cannot be loaded async via yepnope.js
s3_scripts = s3.scripts
ldict = {}
if spherical_mercator:
# Earth was the only layer which can run in non-Spherical Mercator
# @ToDo: Warning?
for sublayer in sublayers:
# Attributes which are defaulted client-side if not set
#if sublayer.type == "earth":
# # Deprecated:
# # https://maps-apis.googleblog.com/2014/12/announcing-deprecation-of-google-earth.html
# ldict["Earth"] = str(T("Switch to 3D"))
# #{"modules":[{"name":"earth","version":"1"}]}
# script = "//www.google.com/jsapi?key=" + apikey + "&autoload=%7B%22modules%22%3A%5B%7B%22name%22%3A%22earth%22%2C%22version%22%3A%221%22%7D%5D%7D"
# if script not in s3_scripts:
# s3_scripts.append(script)
# # Dynamic Loading not supported: https://developers.google.com/loader/#Dynamic
# #s3.jquery_ready.append('''try{google.load('earth','1')catch(e){}''')
# if debug:
# self.scripts.append("gis/gxp/widgets/GoogleEarthPanel.js")
# else:
# self.scripts.append("gis/gxp/widgets/GoogleEarthPanel.min.js")
# s3.js_global.append('''S3.public_url="%s"''' % settings.get_base_public_url())
if sublayer._base:
# Set default Base layer
ldict["Base"] = sublayer.type
if sublayer.type == "satellite":
ldict["Satellite"] = {"name": sublayer.name or "Google Satellite",
"id": sublayer.layer_id}
elif sublayer.type == "maps":
ldict["Maps"] = {"name": sublayer.name or "Google Maps",
"id": sublayer.layer_id}
elif sublayer.type == "hybrid":
ldict["Hybrid"] = {"name": sublayer.name or "Google Hybrid",
"id": sublayer.layer_id}
elif sublayer.type == "streetview":
ldict["StreetviewButton"] = "Click where you want to open Streetview"
elif sublayer.type == "terrain":
ldict["Terrain"] = {"name": sublayer.name or "Google Terrain",
"id": sublayer.layer_id}
elif sublayer.type == "mapmaker":
ldict["MapMaker"] = {"name": sublayer.name or "Google MapMaker",
"id": sublayer.layer_id}
elif sublayer.type == "mapmakerhybrid":
ldict["MapMakerHybrid"] = {"name": sublayer.name or "Google MapMaker Hybrid",
"id": sublayer.layer_id}
if "MapMaker" in ldict or "MapMakerHybrid" in ldict:
# Need to use v2 API
# This should be able to be fixed in OpenLayers now since Google have fixed in v3 API:
# http://code.google.com/p/gmaps-api-issues/issues/detail?id=2349#c47
script = "//maps.google.com/maps?file=api&v=2&key=%s" % apikey
if script not in s3_scripts:
s3_scripts.append(script)
else:
# v3 API
# https://developers.google.com/maps/documentation/javascript/versions
script = "//maps.google.com/maps/api/js?v=quarterly&key=%s" % apikey
if script not in s3_scripts:
s3_scripts.append(script)
if "StreetviewButton" in ldict:
# Streetview doesn't work with v2 API
ldict["StreetviewButton"] = str(T("Click where you want to open Streetview"))
ldict["StreetviewTitle"] = str(T("Street View"))
if debug:
self.scripts.append("gis/gxp/widgets/GoogleStreetViewPanel.js")
else:
self.scripts.append("gis/gxp/widgets/GoogleStreetViewPanel.min.js")
if options:
# Used by Map._setup()
options[self.dictname] = ldict
else:
ldict = None
# Used by as_json() and hence as_javascript()
return ldict
# -----------------------------------------------------------------------------
class LayerGPX(Layer):
"""
GPX Layers from Catalogue
"""
tablename = "gis_layer_gpx"
dictname = "layers_gpx"
style = True
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
url = URL(c="default", f="download",
args=self.track)
# Mandatory attributes
output = {"id": self.layer_id,
"name": self.safe_name,
"url": url,
}
# Attributes which are defaulted client-side if not set
self.marker.add_attributes_to_output(output)
self.add_attributes_if_not_default(
output,
waypoints = (self.waypoints, (True,)),
tracks = (self.tracks, (True,)),
routes = (self.routes, (True,)),
)
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
return output
# -----------------------------------------------------------------------------
class LayerJS(Layer):
"""
JS Layers from Catalogue
- these are raw Javascript layers for | |
"linkType" : int(stringArray[6])
}
else:
postFieldsDictBacktrackQuery = {
"originEntityID" : entityID1,
"query" : stringArray[4]
}
request = Request(url=queryURL, data=bytes(json.dumps(postFieldsDictBacktrackQuery), encoding='utf-8'))
backtrackQueryResponse = urlopen(request).read().decode('utf8')
try:
backtrackQueryResponseJson = json.loads(backtrackQueryResponse)
except:
backtrackQueryResponseJsonB = backtrackQueryResponse.read()
backtrackQueryResponseJson = json.loads(backtrackQueryResponseJsonB)
backTrackLocationList = backtrackQueryResponseJson["entityIDList"]
if len(backTrackLocationList) > 0:
backTrackCorrect = True
if (backTrackCorrect == True) and (addLocationCorrect == True):
testResult = True
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult).upper()
expectedResult = stringArray[7]
results = [n, testcase, allTrueResult, expectedResult, errata]
lresultSet.append(results)
return lresultSet
def testServerAPIGetLinkCounterpartsByMetaMemeType(serverURL = None, fName = "LinkCounterpartsByMetaMemeType.atest"):
''' Repeat testServerAPIGetLinkCounterpartsByType(), but traversing with metameme paths, instead of meme paths.
LinkCounterpartsByMetaMemeType.atest differs from TestEntityPhase7.atest only in that cols D and E use metameme paths.
Create entities from the meme in the first two colums.
Add a link between the two at the location on entity in from column 3.
Check and see if each is a counterpart as seen from the other using the addresses in columns 4&5 (CheckPath & Backpath)
& the filter.
The filter must be the same as the type of link (or None)
The check location must be the same as the added loation.
'''
results = []
lresultSet = []
#try:
testFileName = os.path.join(testDirPath, fName)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
testResult = False
try:
createEntityURL0 = serverURL + "/modeling/createEntityFromMeme/%s" %stringArray[0]
createEntityURL1 = serverURL + "/modeling/createEntityFromMeme/%s" %stringArray[1]
queryURL = serverURL + "/modeling/query"
querymURL = serverURL + "/modeling/querym"
attachURL = serverURL + "/modeling/addEntityLink"
#entityID0 = Graph.api.createEntityFromMeme(stringArray[0])
#entityID1 = Graph.api.createEntityFromMeme(stringArray[1])
createResponse0 = urllib.request.urlopen(createEntityURL0)
createResponseJson0B = createResponse0.read()
entityUUID0Json = json.loads(createResponseJson0B)
entityID0 = entityUUID0Json["entityUUID"]
createResponse1 = urllib.request.urlopen(createEntityURL1)
createResponseJson1B = createResponse1.read()
entityUUID1Json = json.loads(createResponseJson1B)
entityID1 = entityUUID1Json["entityUUID"]
#Attach entityID1 at the mount point specified in stringArray[2]
if stringArray[2] != "X":
postFieldsDictAttachQuery = {
"originEntityID" : entityID0,
"query" : stringArray[2]
}
request = Request(url=queryURL, data=bytes(json.dumps(postFieldsDictAttachQuery), encoding='utf-8'))
attachPointResponse = urlopen(request).read().decode('utf8')
try:
attachPointResponseJson = json.loads(attachPointResponse)
except:
attachPointResponseJsonB = attachPointResponse.read()
attachPointResponseJson = json.loads(attachPointResponseJsonB)
mountPoints = attachPointResponseJson["entityIDList"]
#mountPoints = api.getLinkCounterpartsByType(entityID0, stringArray[2], 0)
unusedMountPointsOverview = {}
for mountPoint in mountPoints:
postFieldsDictAttach = {
"sourceEntityID" : mountPoint,
"targetEntityID" : entityID1,
"query" : stringArray[2],
"linkType" : int(stringArray[5])
}
request = Request(url=attachURL, data=bytes(json.dumps(postFieldsDictAttach), encoding='utf-8'))
unusedAttachPointResponse = urlopen(request).read().decode('utf8')
else:
raise ValueError("Testcase with invalid attachment point")
backTrackCorrect = False
linkType = None
if stringArray[6] != "X":
linkType = int(stringArray[6])
#see if we can get from entityID0 to entityID1 via stringArray[3]
addLocationCorrect = False
if linkType is not None:
postFieldsDictForwardQuery = {
"originEntityID" : entityID0,
"query" : stringArray[3],
"linkType" : int(stringArray[6])
}
else:
postFieldsDictForwardQuery = {
"originEntityID" : entityID0,
"query" : stringArray[3]
}
request = Request(url=querymURL, data=bytes(json.dumps(postFieldsDictForwardQuery), encoding='utf-8'))
forwardQueryResponse = urlopen(request).read().decode('utf8')
try:
forwardQueryResponseJson = json.loads(forwardQueryResponse)
except:
forwardQueryResponseJsonB = forwardQueryResponse.read()
forwardQueryResponseJson = json.loads(forwardQueryResponseJsonB)
addLocationList = forwardQueryResponseJson["entityIDList"]
if len(addLocationList) > 0:
addLocationCorrect = True
#see if we can get from entityID1 to entityID0 via stringArray[4]
backTrackCorrect = False
if linkType is not None:
postFieldsDictBacktrackQuery = {
"originEntityID" : entityID1,
"query" : stringArray[4],
"linkType" : int(stringArray[6])
}
else:
postFieldsDictBacktrackQuery = {
"originEntityID" : entityID1,
"query" : stringArray[4]
}
request = Request(url=querymURL, data=bytes(json.dumps(postFieldsDictBacktrackQuery), encoding='utf-8'))
backtrackQueryResponse = urlopen(request).read().decode('utf8')
try:
backtrackQueryResponseJson = json.loads(backtrackQueryResponse)
except:
backtrackQueryResponseJsonB = backtrackQueryResponse.read()
backtrackQueryResponseJson = json.loads(backtrackQueryResponseJsonB)
backTrackLocationList = backtrackQueryResponseJson["entityIDList"]
if len(backTrackLocationList) > 0:
backTrackCorrect = True
if (backTrackCorrect == True) and (addLocationCorrect == True):
testResult = True
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult).upper()
expectedResult = stringArray[7]
results = [n, testcase, allTrueResult, expectedResult, errata]
lresultSet.append(results)
return lresultSet
def testServerAPIAEntityPropertiesAdd(serverURL = None, memePath = "Graphyne.Generic"):
"""
Tests the /modeling/createEntityFromMeme/<memePath> and /modeling/getEntityMemeType/<entityUUID> REST API calls
1 - Create an entity of meme type memePath using /modeling/createEntityFromMeme/<memePath>
2 - Add a string property 'Hello World'
"""
#"NumericValue.nv_intValue_3"
method = moduleName + '.' + '/modeling/setEntityPropertyValue/<entityID>/<propName>/<propValue>'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
testResult = True
notes = ""
createEntityURL = serverURL + "/modeling/createEntityFromMeme/%s" %memePath
try:
#create two generic entities
createResponse = urllib.request.urlopen(createEntityURL)
createResponseJson = createResponse.read()
entityUUIDJson = json.loads(createResponseJson)
entityID = entityUUIDJson["entityUUID"]
except urllib.error.URLError as e:
testResult = False
notes = e.reason
except Exception as e:
testResult = False
if testResult != False:
#Set a property
originalPropValue = "Hello World"
postFieldsDict = {"entityID" : entityID, "propName" : "Hello", "propValue" : originalPropValue}
try:
#urllib POST request
requestURL = serverURL + "/modeling/setEntityPropertyValue"
request = Request(url=requestURL, data=bytes(json.dumps(postFieldsDict), encoding='utf-8'))
responseM = urlopen(request).read().decode('utf8')
except urllib.error.URLError as e:
testResult = False
notes = e.reason
except Exception as e:
testResult = False
resultSet = []
testResult = str(testResult)
expectedResult = str(True)
results = [1, "Entity Properties", testResult, expectedResult, [notes]]
resultSet.append(results)
return resultSet
def testServerAPIAEntityPropertiesRead(serverURL = None, memePath = "Graphyne.Generic"):
"""
Tests the /modeling/createEntityFromMeme/<memePath> and /modeling/getEntityMemeType/<entityUUID> REST API calls
1 - Create an entity of meme type memePath using /modeling/createEntityFromMeme/<memePath>
2 - Add a string property 'Hello World'
3 - Check for that property
"""
#"NumericValue.nv_intValue_3"
method = moduleName + '.' + '/modeling/setEntityPropertyValue/<entityID>/<propName>/<propValue>'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
testResult = True
notes = ""
createEntityURL = serverURL + "/modeling/createEntityFromMeme/%s" %memePath
try:
#create two generic entities
createResponse = urllib.request.urlopen(createEntityURL)
createResponseJson = createResponse.read()
entityUUIDJson = json.loads(createResponseJson)
entityID = entityUUIDJson["entityUUID"]
except urllib.error.URLError as e:
testResult = False
notes = e.reason
except Exception as e:
testResult = False
if testResult != False:
#Set a property
originalPropValue = "Hello World"
postFieldsDict = {"entityID" : entityID, "propName" : "Hello", "propValue" : originalPropValue}
try:
#urllib POST request
requestURL = serverURL + "/modeling/setEntityPropertyValue"
request = Request(url=requestURL, data=bytes(json.dumps(postFieldsDict), encoding='utf-8'))
responseM = urlopen(request).read().decode('utf8')
except urllib.error.URLError as e:
testResult = False
notes = e.reason
except Exception as e:
testResult = False
if testResult != False:
try:
#Now read that same property
getPropURL = serverURL + "/modeling/getEntityPropertyValue/%s/%s" %(entityID, "Hello")
readResponse = urllib.request.urlopen(getPropURL)
readResponseJsonB = readResponse.read()
readResponseJson = json.loads(readResponseJsonB)
propValue = readResponseJson["propertyValue"]
except urllib.error.URLError as e:
testResult = False
notes = e.reason
except Exception as e:
testResult = False
if testResult != False:
if propValue != originalPropValue:
testResult = False
resultSet = []
try:
testResult = propValue
except:
testResult = "No result returned"
expectedResult = originalPropValue
results = [1, "Entity Properties", testResult, expectedResult, [notes]]
resultSet.append(results)
return resultSet
def testServerAPIAEntityPropertiesPresent(serverURL = None, memePath = "Graphyne.Generic"):
"""
Tests the /modeling/createEntityFromMeme/<memePath> and /modeling/getEntityMemeType/<entityUUID> REST API calls
1 - Create an entity of meme type memePath using /modeling/createEntityFromMeme/<memePath>
2 - Add a string property 'Hello World'
3 - Check for that property
"""
#"NumericValue.nv_intValue_3"
method = moduleName + '.' + '/modeling/setEntityPropertyValue/<entityID>/<propName>/<propValue>'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
testResult = True
notes = ""
createEntityURL = serverURL + "/modeling/createEntityFromMeme/%s" %memePath
try:
#create two generic entities
createResponse = urllib.request.urlopen(createEntityURL)
createResponseJson = createResponse.read()
entityUUIDJson = json.loads(createResponseJson)
entityID = entityUUIDJson["entityUUID"]
except urllib.error.URLError as e:
testResult = False
notes = e.reason
except Exception as e:
testResult = False
if testResult != False:
#Set a property
originalPropValue = "Hello World"
postFieldsDict = {"entityID" : entityID, "propName" : "Hello", "propValue" : originalPropValue}
try:
#urllib POST request
requestURL = serverURL + "/modeling/setEntityPropertyValue"
request = Request(url=requestURL, data=bytes(json.dumps(postFieldsDict), encoding='utf-8'))
responseM = urlopen(request).read().decode('utf8')
except urllib.error.URLError as e:
testResult = False
notes = e.reason
except Exception as e:
testResult = False
if testResult != False:
try:
#Now read that same property
getPropURL = serverURL + "/modeling/getEntityHasProperty/%s/%s" %(entityID, "Hello")
readResponse = urllib.request.urlopen(getPropURL)
readResponseJsonB = readResponse.read()
readResponseJson = json.loads(readResponseJsonB)
propValue = readResponseJson["present"]
except urllib.error.URLError as e:
testResult = False
notes = e.reason
except Exception as e:
testResult = False
if testResult != False:
if propValue != originalPropValue:
testResult = False
resultSet = []
try:
testResult = propValue
except:
testResult = "No result returned"
expectedResult = str(True)
results | |
<filename>ding/framework/task.py<gh_stars>0
from asyncio import InvalidStateError
from asyncio.tasks import FIRST_EXCEPTION
import time
import asyncio
import concurrent.futures
import fnmatch
import math
from types import GeneratorType
from typing import Any, Awaitable, Callable, Generator, Iterable, List, Optional, Set, Union
from ding.framework.context import Context
from ding.framework.parallel import Parallel
from ding.framework.event_loop import EventLoop
from functools import wraps
def enable_async(func: Callable) -> Callable:
"""
Overview:
Empower the function with async ability.
Arguments:
- func (:obj:`Callable`): The original function.
Returns:
- runtime_handler (:obj:`Callable`): The wrap function.
"""
@wraps(func)
def runtime_handler(task: "Task", *args, async_mode: Optional[bool] = None, **kwargs) -> "Task":
"""
Overview:
If task's async mode is enabled, execute the step in current loop executor asyncly,
or execute the task sync.
Arguments:
- task (:obj:`Task`): The task instance.
- async_mode (:obj:`Optional[bool]`): Whether using async mode.
Returns:
- result (:obj:`Union[Any, Awaitable]`): The result or future object of middleware.
"""
if async_mode is None:
async_mode = task.async_mode
if async_mode:
t = task._async_loop.run_in_executor(task._thread_pool, func, task, *args, **kwargs)
task._async_stack.append(t)
return task
else:
return func(task, *args, **kwargs)
return runtime_handler
class Task:
"""
Tash will manage the execution order of the entire pipeline, register new middleware,
and generate new context objects.
"""
def __init__(
self,
async_mode: bool = False,
n_async_workers: int = 3,
middleware: Optional[List[Callable]] = None,
step_wrappers: Optional[List[Callable]] = None,
labels: Optional[Set[str]] = None,
**_
) -> None:
self._finish = False
self.middleware = middleware or []
self.step_wrappers = step_wrappers or []
self.ctx = Context()
self.parallel_ctx = Context()
self._backward_stack = []
# Bind event loop functions
self._event_loop = EventLoop("task_{}".format(id(self)))
# Async segment
self.async_mode = async_mode
self.n_async_workers = n_async_workers
self._async_stack = []
self._async_loop = None
self._thread_pool = None
self._exception = None
self.labels = labels or set()
# Parallel segment
self.router = Parallel()
if async_mode or self.router.is_active:
self._thread_pool = concurrent.futures.ThreadPoolExecutor(max_workers=n_async_workers)
self._async_loop = asyncio.new_event_loop()
if self.router.is_active:
def sync_finish(value):
self._finish = value
self.on("finish", sync_finish)
self.init_labels()
def init_labels(self):
if self.async_mode:
self.labels.add("async")
if self.router.is_active:
self.labels.add("distributed")
self.labels.add("node.{}".format(self.router.node_id))
for label in self.router.labels:
self.labels.add(label)
else:
self.labels.add("standalone")
def use(self, fn: Callable, filter_labels: Optional[Iterable[str]] = None) -> 'Task':
"""
Overview:
Register middleware to task. The middleware will be executed by it's registry order.
Arguments:
- fn (:obj:`Callable`): A middleware is a function with only one argument: ctx.
"""
if not filter_labels or self.match_labels(filter_labels):
self.middleware.append(fn)
return self
def use_step_wrapper(self, fn: Callable) -> 'Task':
"""
Overview:
Register wrappers to task. A wrapper works like a decorator, but task will apply this \
decorator on top of each middleware.
Arguments:
- fn (:obj:`Callable`): A wrapper is a decorator, so the first argument is a callable function.
"""
self.step_wrappers.append(fn)
return self
def match_labels(self, patterns: Union[Iterable[str], str]) -> bool:
"""
Overview:
A list of patterns to match labels.
Arguments:
- patterns (:obj:`Union[Iterable[str], str]`): Glob like pattern, e.g. node.1, node.*.
"""
if isinstance(patterns, str):
patterns = [patterns]
return any([fnmatch.filter(self.labels, p) for p in patterns])
def run(self, max_step: int = int(1e10)) -> None:
"""
Overview:
Execute the iterations, when reach the max_step or task.finish is true,
The loop will be break.
Arguments:
- max_step (:obj:`int`): Max step of iterations.
"""
if len(self.middleware) == 0:
return
for i in range(max_step):
for fn in self.middleware:
self.forward(fn)
# Sync should be called before backward, otherwise it is possible
# that some generators have not been pushed to backward_stack.
self.sync()
self.backward()
self.sync()
if i == max_step - 1:
self.finish = True
if self.finish:
break
self.renew()
@enable_async
def forward(self, fn: Callable, ctx: Context = None, backward_stack: List[Generator] = None) -> 'Task':
"""
Overview:
This function will execute the middleware until the first yield statment,
or the end of the middleware.
Arguments:
- fn (:obj:`Callable`): Function with contain the ctx argument in middleware.
"""
if not backward_stack:
backward_stack = self._backward_stack
if not ctx:
ctx = self.ctx
for wrapper in self.step_wrappers:
fn = wrapper(fn)
g = fn(ctx)
if isinstance(g, GeneratorType):
try:
next(g)
backward_stack.append(g)
except StopIteration:
pass
return self
@enable_async
def backward(self, backward_stack: List[Generator] = None) -> 'Task':
"""
Overview:
Execute the rest part of middleware, by the reversed order of registry.
"""
if not backward_stack:
backward_stack = self._backward_stack
while backward_stack:
# FILO
g = backward_stack.pop()
try:
next(g)
except StopIteration:
continue
return self
def sequence(self, *fns: List[Callable]) -> Callable:
"""
Overview:
Wrap functions and keep them run in sequence, Usually in order to avoid the confusion
of dependencies in async mode.
Arguments:
- fn (:obj:`Callable`): Chain a sequence of middleware, wrap them into one middleware function.
"""
def _sequence(ctx):
backward_stack = []
for fn in fns:
self.forward(fn, ctx=ctx, backward_stack=backward_stack, async_mode=False)
yield
self.backward(backward_stack=backward_stack, async_mode=False)
name = ",".join([fn.__name__ for fn in fns])
_sequence.__name__ = "sequence<{}>".format(name)
return _sequence
def renew(self) -> 'Task':
"""
Overview:
Renew the context instance, this function should be called after backward in the end of iteration.
"""
# Renew context
old_ctx = self.ctx
new_ctx = old_ctx.renew()
new_ctx.total_step = old_ctx.total_step + 1
self.ctx = new_ctx
return self
def __enter__(self) -> "Task":
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def stop(self) -> None:
"""
Overview:
Stop and cleanup every thing in the runtime of task.
"""
if self._thread_pool:
self._thread_pool.shutdown()
self._event_loop.stop()
if self._async_loop:
self._async_loop.close()
self.router.off(self._wrap_event_name("*"))
# The middleware and listeners may contain some methods that reference to task,
# If we do not clear them after the task exits, we may find that gc will not clean up the task object.
self.middleware.clear()
self.step_wrappers.clear()
self._backward_stack.clear()
self._async_stack.clear()
def sync(self) -> 'Task':
if self._async_loop:
self._async_loop.run_until_complete(self.sync_tasks())
return self
async def sync_tasks(self) -> Awaitable[None]:
if self._async_stack:
await asyncio.wait(self._async_stack, return_when=FIRST_EXCEPTION)
while self._async_stack:
t = self._async_stack.pop(0)
try:
e = t.exception()
if e:
self._exception = e
raise e
except InvalidStateError:
# Not finished. https://docs.python.org/3/library/asyncio-task.html#asyncio.Task.exception
pass
def async_executor(self, fn: Callable, *args, **kwargs) -> None:
"""
Overview:
Execute task in background, then apppend the future instance in _async_stack.
Arguments:
- fn (:obj:`Callable`): Synchronization fuction.
"""
if not self._async_loop:
raise Exception("Event loop was not initialized, please call this function in async or parallel mode")
t = self._async_loop.run_in_executor(self._thread_pool, fn, *args, **kwargs)
self._async_stack.append(t)
def emit(self, event: str, *args, only_remote: bool = False, only_local: bool = False, **kwargs) -> None:
"""
Overview:
Emit an event, call listeners.
Arguments:
- event (:obj:`str`): Event name.
- only_remote (:obj:`bool`): Only broadcast the event to the connected nodes, default is False.
- only_local (:obj:`bool`): Only emit local event, default is False.
- args (:obj:`any`): Rest arguments for listeners.
"""
# Check if need to broadcast event to connected nodes, default is True
if only_local:
self._event_loop.emit(event, *args, **kwargs)
elif only_remote:
if self.router.is_active:
self.async_executor(self.router.emit, self._wrap_event_name(event), event, *args, **kwargs)
else:
if self.router.is_active:
self.async_executor(self.router.emit, self._wrap_event_name(event), event, *args, **kwargs)
self._event_loop.emit(event, *args, **kwargs)
def on(self, event: str, fn: Callable) -> None:
"""
Overview:
Subscribe to an event, execute this function every time the event is emitted.
Arguments:
- event (:obj:`str`): Event name.
- fn (:obj:`Callable`): The function.
"""
self._event_loop.on(event, fn)
if self.router.is_active:
self.router.on(self._wrap_event_name(event), self._event_loop.emit)
def once(self, event: str, fn: Callable) -> None:
"""
Overview:
Subscribe to an event, execute this function only once when the event is emitted.
Arguments:
- event (:obj:`str`): Event name.
- fn (:obj:`Callable`): The function.
"""
self._event_loop.once(event, fn)
if self.router.is_active:
self.router.on(self._wrap_event_name(event), self._event_loop.emit)
def off(self, event: str, fn: Optional[Callable] = None) -> None:
"""
Overview:
Unsubscribe an event
Arguments:
- event (:obj:`str`): Event name.
- fn (:obj:`Callable`): The function.
"""
self._event_loop.off(event, fn)
if self.router.is_active:
self.router.off(self._wrap_event_name(event))
def wait_for(self, event: str, timeout: float = math.inf, ignore_timeout_exception: bool = True) -> Any:
"""
Overview:
Wait for an event and block the thread.
Arguments:
- event (:obj:`str`): Event name.
- timeout (:obj:`float`): Timeout in seconds.
- ignore_timeout_exception (:obj:`bool`): If this is False, an exception will occur when meeting timeout.
"""
received = False
result = None
def _receive_event(*args, **kwargs):
nonlocal result, received
result = (args, kwargs)
received = True
self.once(event, _receive_event)
start = time.time()
while time.time() - start < timeout:
if received or self._exception:
return result
time.sleep(0.01)
if ignore_timeout_exception:
return result
else:
raise TimeoutError("Timeout when waiting for event: {}".format(event))
def __copy__(self):
return Task(**self.__dict__)
@property
def finish(self):
return self._finish
@finish.setter
def finish(self, value: bool):
self._finish = value
if self.router.is_active and value is True:
self.emit("finish", value)
def _wrap_event_name(self, event: str) -> str:
"""
Overview:
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'ar'
import os
import sys
import glob
import time
import json
try:
import cPickle as pickle
except:
import pickle
import matplotlib.pyplot as plt
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Convolution2D, MaxPooling2D,\
Merge, ZeroPadding2D, Dropout
from keras import optimizers as opt
from keras.utils.visualize_util import plot as kplot
import skimage.io as skio
import skimage.transform as sktf
from keras.utils import np_utils
import numpy as np
import pandas as pd
##############################################
def split_list_by_blocks(lst, psiz):
tret = [lst[x:x + psiz] for x in range(0, len(lst), psiz)]
return tret
##############################################
def buildModel_VGG16_Orig(inpShape, numLabels):
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape=inpShape))
model.add(Convolution2D(32, 3, 3, activation='relu',input_shape=inpShape))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
#
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
#
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
#
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
#
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
#
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(numLabels, activation='softmax'))
return model
def buildModel_VGG16_Mod(inpShape, numLabels):
model = Sequential()
model.add(Convolution2D(32, 3, 3, activation='relu',input_shape=inpShape))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
#
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
#
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
#
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
#
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
#
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dense(numLabels, activation='softmax'))
return model
##############################################
class Batcher:
meanPrefix='meanval.txt'
pathCSV=None
pathMeanVal=None
lstPathImg=None
lstIdxLbl=None
uniqIdxLbl = None
lstLabels=None
uniqLabels=None
numLabels=0
numImg=0
rangeIdx=None
shapeImg=None
numIterPerEpoch=0
meanValue = 0.0
meanImage = None
imgScale = 255.
modelPrefix = None
def __init__(self, pathCSV, isTheanoShape=True):
self.isTheanoShape=isTheanoShape
if not os.path.isfile(pathCSV):
raise Exception('Cant fine CSV file [%s]' % pathCSV)
self.pathCSV = pathCSV
self.pathMeanVal = '%s-%s' % (self.pathCSV, self.meanPrefix)
self.wdir=os.path.dirname(self.pathCSV)
tdata = pd.read_csv(self.pathCSV, sep=',')
tlstPathImg = tdata['path'].as_matrix()
tmplst=[os.path.join(self.wdir, ii) for ii in tlstPathImg]
self.lstPathImg = np.array(tmplst)
self.lstIdxLbl = tdata['lblidx'].as_matrix()
self.uniqIdxLbl = np.unique(self.lstIdxLbl)
self.lstLabels = tdata['label'].as_matrix()
self.uniqLabels = np.unique(self.lstLabels)
assert (len(self.uniqIdxLbl) == len(self.uniqLabels))
# build correct correspondence Idx <-> Label Names and map[LabelName]=#LabelNames
self.mapLabelsSizes={}
tmpLabels=[]
for ii in self.uniqIdxLbl.tolist():
tmp=self.lstLabels[self.lstIdxLbl==ii]
tlbl=np.unique(tmp)[0]
self.mapLabelsSizes[tlbl] = len(tmp)
tmpLabels.append(tlbl)
self.uniqLabels=np.array(tmpLabels)
#
self.numImg = len(self.lstIdxLbl)
self.numLabels = len(self.uniqIdxLbl)
self.arrIdxLbl2Cat = np_utils.to_categorical(self.lstIdxLbl, self.numLabels)
#
self.mapPath = {}
self.mapNumImg = {}
for kk in self.uniqIdxLbl.tolist():
self.mapPath[kk] = self.lstPathImg[self.lstIdxLbl==kk]
self.mapNumImg[kk] = len(self.mapPath[kk])
#
timg = skio.imread(self.lstPathImg[0])
self.isRGB = (len(timg.shape)==3)
if self.isTheanoShape:
if len(timg.shape) < 3:
self.shapeImg = tuple([1] + list(timg.shape))
else:
self.shapeImg = (timg.shape[2], timg.shape[0], timg.shape[1])
else:
if len(timg.shape) < 3:
self.shapeImg = (timg.shape[0],timg.shape[1],1)
else:
self.shapeImg = timg.shape
self.rangeIdx = range(self.numImg)
self.modelPrefix = self.genPrefix()
def toString(self):
self.checkIsInitialized()
tstr = '#Images=%d, #Labels=%d, meanValue=%0.3f' % (self.numImg, self.numImg, self.meanValue)
tstr2=''
for kk,vv in self.mapLabelsSizes.items():
tstr2='%s\t#%s = %d\n' % (tstr2, kk, vv)
tstr = '%s\n%s' % (tstr, tstr2)
return tstr
def __str__(self):
return self.toString()
def __repr__(self):
return self.toString()
def calcNumIterPerEpoch(self, batchSize):
self.checkIsInitialized()
return int(self.numImg/batchSize)
def isInitialized(self):
return (self.numImg>0) and (self.numLabels>0) and (self.wdir is not None)
def checkIsInitialized(self):
if not self.isInitialized():
raise Exception('class Batcher() is not correctly initialized')
def precalculateMean(self, maxNumberOfImage=2000, isRecalculateMean=False):
self.checkIsInitialized()
if os.path.isfile(self.pathMeanVal) and (not isRecalculateMean):
print (':: found mean-value file, try to load from it [%s] ...' % self.pathMeanVal)
tmp=np.loadtxt(self.pathMeanVal)
self.meanValue = float(tmp[0])
else:
tmpListPath=np.random.permutation(self.lstPathImg)
tmpNumImages=len(tmpListPath) - 1
if tmpNumImages<=maxNumberOfImage:
print (':: #Images=%d less than parameter [maxNumberOfImage=%d], cut to %d' % (tmpNumImages, maxNumberOfImage, tmpNumImages))
maxNumberOfImage = tmpNumImages
tmpListPath = tmpListPath[:maxNumberOfImage]
self.meanImage = None
for ppi, pp in enumerate(tmpListPath):
tmp = skio.imread(pp).astype(np.float)/self.imgScale
if self.meanImage is None:
self.meanImage = tmp
else:
self.meanImage +=tmp
if (ppi%500)==0:
print ('\t[%d/%d] ...' % (ppi, len(tmpListPath)))
self.meanImage /=len(tmpListPath)
self.meanValue = float(np.mean(self.meanImage))
tmpStd = np.std(self.meanImage)
tmpArray = np.array([self.meanValue, tmpStd])
print (':: mean-value [%0.3f] saved to [%s]' % (self.meanValue, self.pathMeanVal))
np.savetxt(self.pathMeanVal, tmpArray)
def getBatchDataByRndIdx(self, rndIdx, isRemoveMean=True):
parBatchSize = len(rndIdx)
# rndIdx=np.random.permutation(self.rangeIdx)[:parBatchSize]
dataX=np.zeros([parBatchSize] + list(self.shapeImg))
dataY=self.arrIdxLbl2Cat[rndIdx,:]
dataL=self.lstLabels[rndIdx]
for ii,idx in enumerate(rndIdx):
tpath = self.lstPathImg[idx]
timg = skio.imread(tpath).astype(np.float)/self.imgScale
if self.isTheanoShape:
if self.isRGB:
timg = timg.transpose((2,0,1))
else:
timg = timg.reshape(self.shapeImg)
else:
if not self.isRGB:
timg = timg.reshape(self.shapeImg)
if isRemoveMean:
dataX[ii] = timg - self.meanValue
else:
dataX[ii] = timg
return (dataX, dataY, dataL)
def getBatchData(self, parBatchSize=128, isRemoveMean=True):
rndIdx=np.random.permutation(self.rangeIdx)[:parBatchSize]
return self.getBatchDataByRndIdx(rndIdx=rndIdx, isRemoveMean=isRemoveMean)
def evaluateModelOnBatch(self, model, parBatchSizeEval = 128):
splitIdx = split_list_by_blocks(self.rangeIdx, parBatchSizeEval)
numSplit = len(splitIdx)
arrAcc = []
arrLoss = []
for ii,lidx in enumerate(splitIdx):
dataX, dataY, _ = self.getBatchDataByRndIdx(lidx)
tret = model.test_on_batch(dataX, dataY)
arrAcc.append(tret[1])
arrLoss.append(tret[0])
print ('[%d/%d] loss/acc = %0.3f/%0.2f%%' % (ii, numSplit, tret[0], 100.*tret[1]))
meanAcc = float(np.mean(arrAcc))
meanLoss = float(np.mean(arrLoss))
return (meanLoss, meanAcc)
def predictOnModel(self, model, parBatchSizeEval = 128):
splitIdx = split_list_by_blocks(self.rangeIdx, parBatchSizeEval)
numSplit = len(splitIdx)
retTot=None
for ii, lidx in enumerate(splitIdx):
dataX, _, _ = self.getBatchDataByRndIdx(lidx)
tret = model.predict_on_batch(dataX)
if retTot is None:
retTot = tret
else:
retTot = np.concatenate((retTot,tret))
if (ii%5)==0:
print ('\tevaluate [%d/%d]' % (ii,numSplit))
return retTot
def buildModel(self):
self.checkIsInitialized()
return buildModel_VGG16_Mod(self.shapeImg, self.numLabels)
def genPrefix(self):
ret = "kmodel-cls-%s" % (time.strftime('%Y%m%d-%H%M%S'))
return ret
def exportModel(self, model, epochId):
foutModel = "%s-e%03d.json" % (self.modelPrefix, epochId)
foutWeights = "%s-e%03d.h5" % (self.modelPrefix, epochId)
foutModel = '%s-%s' % (self.pathCSV, foutModel)
foutWeights = '%s-%s' % (self.pathCSV, foutWeights)
with open(foutModel, 'w') as f:
str = json.dumps(json.loads(model.to_json()), indent=3)
f.write(str)
model.save_weights(foutWeights, overwrite=True)
return foutModel
##############################################
def readImageAndReshape(fimg, isTheanoShape=True):
timg = skio.imread(fimg).astype(np.float) / 255.
# timg -= batcher.meanValue
if isTheanoShape:
if len(timg.shape) == 3:
timg = timg.transpose((2, 0, 1))
else:
timg = timg.reshape((1, timg.shape[0], timg.shape[1]))
else:
if len(timg.shape) == 3:
timg = timg.reshape((timg.shape[0], timg.shape[1], 1))
else:
pass
# (0) Final reshape: for batch-processing
timg = timg.reshape([1] + list(timg.shape))
return timg
##############################################
def splitModel2CNNandFCNN(model, inpuImageShape, nameFlattenLayer='flatten_1'):
inpShape = inpuImageShape
#
lstLayerNames = [[ii, ll.name] for ii, ll in enumerate(model.layers)]
layerFlatten = [ii for ii in lstLayerNames if ii[1] == nameFlattenLayer][0]
idxFlatten = layerFlatten[0]
numLayers = len(lstLayerNames)
numLayersCNN = idxFlatten + 0
for ii in lstLayerNames:
print (ii)
print ('--------')
print ('Flatten layer is %s, Flatten-index is %d' % (layerFlatten, idxFlatten))
modelCNN = Sequential()
modelFCNN = Sequential()
# (1) Prepare CNN-part of Model
print ('----[ CNN-Part ]----')
for ii in range(numLayersCNN):
tmpLayer = model.layers[ii]
if ii == 0:
if isinstance(tmpLayer, keras.layers.Convolution2D):
newLayer = Convolution2D(nb_filter=tmpLayer.nb_filter,
nb_row=tmpLayer.nb_row,
nb_col=tmpLayer.nb_col,
border_mode=tmpLayer.border_mode,
input_shape=inpShape)
elif isinstance(tmpLayer, keras.layers.ZeroPadding2D):
newLayer = ZeroPadding2D(padding=tmpLayer.padding,
input_shape=inpShape)
else:
raise Exception('Unsupported input CNN-Part-of-Model layer... [%s]' % type(tmpLayer))
else:
newLayer = tmpLayer
modelCNN.add(newLayer)
print ('\t:: CNN-Part of Model: load layer #%d/%d (%s)' % (ii, numLayersCNN, tmpLayer.name))
# modelCNN.build(input_shape=inpShape)
print (':: Load CNN-Part Model weights...')
for ii in range(numLayersCNN):
modelCNN.layers[ii].set_weights(model.layers[ii].get_weights())
# (2) Prepare FCNN-part of Model
print ('----[ FCNN-Part ]----')
shapeFCNN = model.layers[numLayersCNN - 1].get_output_shape_at(0)[1:]
lstIdxFNNLayers = range(numLayersCNN, numLayers)
for i0, ii in enumerate(lstIdxFNNLayers):
tmpLayer = model.layers[ii]
if i0 == 0:
newLayer = Flatten(input_shape=shapeFCNN)
else:
newLayer = tmpLayer
modelFCNN.add(newLayer)
print ('\t:: F*CNN-Part of Model: load layer #%d/%d (%s)' % (ii, len(lstIdxFNNLayers), tmpLayer.name))
modelFCNN.build(input_shape=shapeFCNN)
print (':: Load F*CNN-Part Model weights...')
for i0, ii in enumerate(lstIdxFNNLayers):
modelFCNN.layers[i0].set_weights(model.layers[ii].get_weights())
#
print ('--------------------')
print ('::CNN:')
for ii, ll in enumerate(modelCNN.layers):
print ('\t%d : %s ---> %s' % (ii, ll.name, ll.get_output_shape_at(0)))
print ('::F*CNN:')
for ii, ll in enumerate(modelFCNN.layers):
print ('\t%d : %s ---> %s' % (ii, ll.name, ll.get_output_shape_at(0)))
return (modelCNN, modelFCNN)
##############################################
def loadModelFromJson(pathModelJson):
if not os.path.isfile(pathModelJson):
raise Exception('Cant find JSON-file [%s]' % pathModelJson)
tpathBase = os.path.splitext(pathModelJson)[0]
tpathModelWeights = '%s.h5' % tpathBase
if not os.path.isfile(tpathModelWeights):
raise Exception('Cant find h5-Weights-file [%s]' % tpathModelWeights)
with open(pathModelJson, 'r') as f:
tmpStr = f.read()
model = keras.models.model_from_json(tmpStr)
model.load_weights(tpathModelWeights)
return model
##############################################
def buildProbMap(modelCNN, modelFCNN, pimg):
retMapCNN = modelCNN.predict_on_batch(pimg)[0]
plt.figure()
for xx in range(40):
plt.subplot(5,8,xx+1)
plt.imshow(retMapCNN[xx])
plt.axis('off')
plt.axis('off')
plt.show()
inpShapeFCNN = modelFCNN.layers[0].get_input_shape_at(0)[1:]
numLblFCNN = modelFCNN.layers[-1].get_output_shape_at(0)[1]
nch = inpShapeFCNN[0]
nrow = inpShapeFCNN[1]
ncol = inpShapeFCNN[2]
nrowCNN = retMapCNN.shape[1]
ncolCNN = retMapCNN.shape[2]
nrowCNN0 = nrowCNN - nrow + 1
ncolCNN0 = ncolCNN - ncol + 1
#
batchSizeFCNNmax = 1024
tretProb0 = None
lstIdx = [[rr, cc] for rr in range(nrowCNN0) for cc in range(ncolCNN0)]
splitLstIdx = split_list_by_blocks(lstIdx, batchSizeFCNNmax)
for i0, lstPos in enumerate(splitLstIdx):
tsizBatch = len(lstPos)
tdataX = np.zeros((tsizBatch, nch, nrow, ncol))
for j0, pp in enumerate(lstPos):
tdataX[j0] = retMapCNN[:, pp[0]:pp[0] + nrow, pp[1]:pp[1] + ncol]
tretFCNN = modelFCNN.predict_on_batch(tdataX)
if tretProb0 is None:
tretProb0 = tretFCNN
else:
tretProb0 = np.concatenate((tretProb0, tretFCNN))
tretProb0R = tretProb0.reshape((nrowCNN0, ncolCNN0, numLblFCNN))
| |
if param_labels is None:
param_labels = ['', '']
tmp = [[params, mean_score, scores.std()]
for parameters, mean_score, scores in grid_scores.grid_scores_]
param, means, stddev = list(zip(*tmp))
param_range_0 = grid_scores.param_grid[params[0]]
param_range_1 = grid_scores.param_grid[params[1]]
mat_size = (len(param_range_1), len(param_range_0))
fig, axs = plt.subplots(1, 2, figsize=(10, 5))
matrices = np.concatenate((np.array(means).reshape(mat_size)[None],
np.array(stddev).reshape(mat_size)[None]))
X_cmap = _grid_matrix_cmap()
x_label = param_labels[0]
y_label = param_labels[1]
plot_title = [score_label, 'Standard Deviation']
for ax, label, matrix, title in zip(axs, param_labels,
np.swapaxes(matrices, -1, -2),
plot_title):
ax.set_xticklabels(param_range_0, fontsize=12)
ax.set_yticklabels(param_range_1, fontsize=12)
ax.set_xticks(np.arange(len(param_range_0)))
ax.set_yticks(np.arange(len(param_range_0)))
ax.set_xlabel(x_label, fontsize=14)
ax.set_ylabel(y_label, fontsize=14)
ax.grid(False)
im = ax.imshow(np.swapaxes(matrix, 0, 1),
cmap=X_cmap, interpolation='none')
ax.set_title(title, fontsize=22)
divider = make_axes_locatable(ax)
cbar_ax = divider.append_axes("right", size="10%", pad=0.05)
cbar = plt.colorbar(im, cax=cbar_ax)
cbar.ax.tick_params(labelsize=12)
fig.subplots_adjust(right=1.2)
plt.show()
def draw_component_variance(variance):
"""
Visualize the percent variance as a function of components.
Args:
variance (list): variance ratio explanation from dimensional
reduction technique.
"""
plt.close('all')
n_components = len(variance)
x = np.arange(1, n_components + 1)
plt.plot(x, np.cumsum(variance * 100), 'o-', color='#1a9641', linewidth=2)
plt.xlabel('Number of Components', fontsize=15)
plt.xlim(0, n_components + 1)
plt.ylabel('Percent Variance', fontsize=15)
plt.show()
def draw_components(datasets, labels, title=None, component_labels=None):
"""
Visualize low dimensional representations of microstructures.
Args:
datasets (list, 2D arrays): low dimensional data with dimensions
[n_samplles, n_componts]. The length of n_components must be 2 or
3.
labels (list, str): list of labes for each of each array datasets
title: main title for plot
component_labels: labels for components
"""
plt.close('all')
if title is None:
title = 'Low Dimensional Representation'
n_components = np.array(datasets[0][-1].shape)
if component_labels is None:
component_labels = range(1, n_components + 1)
if len(datasets) != len(labels):
raise RuntimeError('datasets and labels must have the same length')
if n_components != len(component_labels):
raise RuntimeError('number of components and component_labels must'
' have the same length')
if n_components[-1] == 2:
_draw_components_2D(datasets, labels, title, component_labels[:2])
elif n_components[-1] == 3:
_draw_components_3D(datasets, labels, title, component_labels)
else:
raise RuntimeError("n_components must be 2 or 3.")
def _draw_components_2D(X, labels, title, component_labels):
"""
Helper function to plot 2 components.
Args:
X: Arrays with low dimensional data
labels: labels for each of the low dimensional arrays
"""
n_sets = len(X)
color_list = _get_color_list(n_sets)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel('Component ' + str(component_labels[0]), fontsize=15)
ax.set_ylabel('Component ' + str(component_labels[1]), fontsize=15)
X_array = np.concatenate(X)
x_min, x_max = [np.min(X_array[:, 0]), np.max(X_array[:, 0])]
y_min, y_max = [np.min(X_array[:, 1]), np.max(X_array[:, 1])]
x_epsilon = (x_max - x_min) * 0.05
y_epsilon = (y_max - y_min) * 0.05
ax.set_xlim([x_min - x_epsilon, x_max + x_epsilon])
ax.set_ylim([y_min - y_epsilon, y_max + y_epsilon])
for label, pts, color in zip(labels, X, color_list):
ax.plot(pts[:, 0], pts[:, 1], 'o', color=color, label=label)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., fontsize=15)
plt.title(title, fontsize=20)
plt.show()
def _draw_components_3D(X, labels, title, component_labels):
"""
Helper function to plot 2 components.
Args:
X: Arrays with low dimensional data
labels: labels for each of the low dimensional arrays
"""
n_sets = len(X)
color_list = _get_color_list(n_sets)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel('Component ' + str(component_labels[0]), fontsize=10)
ax.set_ylabel('Component ' + str(component_labels[1]), fontsize=10)
ax.set_zlabel('Component ' + str(component_labels[2]), fontsize=10)
X_array = np.concatenate(X)
x_min, x_max = [np.min(X_array[:, 0]), np.max(X_array[:, 0])]
y_min, y_max = [np.min(X_array[:, 1]), np.max(X_array[:, 1])]
z_min, z_max = [np.min(X_array[:, 2]), np.max(X_array[:, 2])]
x_epsilon = (x_max - x_min) * 0.05
y_epsilon = (y_max - y_min) * 0.05
z_epsilon = (z_max - z_min) * 0.05
ax.set_xlim([x_min - x_epsilon, x_max + x_epsilon])
ax.set_ylim([y_min - y_epsilon, y_max + y_epsilon])
ax.set_zlim([z_min - z_epsilon, z_max + z_epsilon])
for label, pts, color in zip(labels, X, color_list):
ax.plot(pts[:, 0], pts[:, 1], pts[:, 2], 'o', color=color, label=label)
plt.title(title, fontsize=15)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., fontsize=15)
plt.show()
def draw_goodness_of_fit(fit_data, pred_data, labels):
"""Goodness of fit plot for MKSHomogenizationModel.
Args:
fit_data (2D array): Low dimensional representation of the prediction
values of the data used to fit the model and the actual values.
pred_data (2D array): Low dimensional representation of the prediction
values of the data used for prediction with the model and the
actual values.
"""
plt.close('all')
y_total = np.concatenate((fit_data, pred_data), axis=-1)
y_min, y_max = np.min(y_total), np.max(y_total)
middle = (y_max + y_min) / 2.
data_range = y_max - y_min
line = np.linspace(middle - data_range * 1.03 / 2,
middle + data_range * 1.03 / 2, endpoint=False)
plt.plot(line, line, '-', linewidth=3, color='#000000')
plt.plot(fit_data[0], fit_data[1], 'o', color='#1a9850', label=labels[0])
plt.plot(pred_data[0], pred_data[1], 'o',
color='#f46d43', label=labels[1])
plt.title('Goodness of Fit', fontsize=20)
plt.xlabel('Actual', fontsize=18)
plt.ylabel('Predicted', fontsize=18)
plt.legend(loc=2, fontsize=15)
plt.show()
def draw_correlations(X_corr, correlations=None):
"""
Visualize spatial correlations.
Args:
X_corr (ND array): correlations
correlations (list, optional): correlation labels
"""
if correlations is None:
n_cross = X_corr.shape[-1]
L = (np.sqrt(1 + 8 * n_cross) - 1).astype(int) / 2
correlations = _auto_correlations(L) + _cross_correlations(L)
_draw_stats(X_corr, correlations=correlations)
def draw_autocorrelations(X_auto, autocorrelations=None):
"""
Visualize spatial autocorrelations.
Args:
X_auto (ND array): autocorrelations
autocorrelations (list, optional): autocorrelation labels.
"""
if autocorrelations is None:
n_states = X_auto.shape[-1]
autocorrelations = _auto_correlations(n_states)
_draw_stats(X_auto, correlations=autocorrelations)
def draw_crosscorrelations(X_cross, crosscorrelations=None):
"""
Visualize spatial crosscorrelations.
Args:
X_cross (ND array): cross-correlations
correlations (list, optional): cross-correlation labels.
"""
if crosscorrelations is None:
n_cross = X_cross.shape[-1]
n_states = (np.sqrt(1 + 8 * n_cross) + 1).astype(int) / 2
crosscorrelations = _cross_correlations(n_states)
_draw_stats(X_cross, correlations=crosscorrelations)
def _draw_stats(X_, correlations=None):
"""Visualize the spatial correlations.
Args:
X_: correlations
correlations: list of tuples to select the spatial correlations
that will be displayed.
"""
plt.close('all')
X_cmap = _get_coeff_cmap()
n_plots = len(correlations)
vmin = np.min(X_)
vmax = np.max(X_)
x_loc, x_labels = _get_ticks_params(X_.shape[0])
y_loc, y_labels = _get_ticks_params(X_.shape[1])
fig, axs = plt.subplots(1, n_plots, figsize=(n_plots * 5, 5))
if n_plots == 1:
axs = list([axs])
for ax, label, img in zip(axs, correlations, np.rollaxis(X_, -1)):
ax.grid(False)
ax.set_xticks(x_loc)
ax.set_xticklabels(x_labels, fontsize=12)
ax.set_yticks(y_loc)
ax.set_yticklabels(y_labels, fontsize=12)
im = ax.imshow(np.swapaxes(img, 0, 1), cmap=X_cmap,
interpolation='none', vmin=vmin, vmax=vmax)
ax.set_title(r"Correlation $l = {0}$, $l' = {1}$".format(label[0],
label[1]),
fontsize=15)
fig.subplots_adjust(right=0.8)
divider = make_axes_locatable(ax)
cbar_ax = divider.append_axes("right", size="10%", pad=0.05)
cbar_ticks = _get_colorbar_ticks(img, 5)
cbar_ticks_diff = cbar_ticks[-1] - cbar_ticks[0]
cbar_top, cbar_grids = np.max(X_) * 0.005, 0.005
if cbar_ticks_diff <= 1e-15:
cbar_top = 0.
cbar_grids = 0.5
try:
cbar = plt.colorbar(im, cax=cbar_ax, ticks=cbar_ticks,
boundaries=np.arange(cbar_ticks[0],
cbar_ticks[-1] + cbar_top,
cbar_ticks_diff *
cbar_grids))
cbar.ax.tick_params(labelsize=12)
except:
cbar = plt.colorbar(im, cax=cbar_ax, boundaries=np.unique(X_))
cbar.ax.tick_params(labelsize=12)
fig.subplots_adjust(right=0.8)
plt.tight_layout()
plt.show()
def _get_ticks_params(l):
"""Get tick locations and labels for spatial correlation plots.
>>> l = 4
>>> result = ([0, 1, 2, 3, 4], [-2, -1, 0, 1, 2])
>>> assert result == _get_ticks_params(l)
Args:
l: shape of array along the axis
"""
segments = np.roll(np.arange(4, 7, dtype=int), 1, 0)
m = segments[np.argmin(l % segments)]
n = max((l + 1) / m, 1)
tick_loc = range(0, l + n, n)
tick_labels = range(- (l - 1) / 2, (l + 1) / 2 + n, n)
return tick_loc, tick_labels
def _get_colorbar_ticks(X_, n_ticks):
"""
Helper function to get colorbar color tick locations.
Args:
X: sspatial correlations array
(n_samples, x, y, local_state_correlation)
"""
tick_range = np.linspace(np.min(X_), np.max(X_), n_ticks)
return tick_range.astype(float)
def draw_learning_curves(estimator, X, y, ylim=None, cv=None, n_jobs=1,
scoring=None, train_sizes=np.linspace(.1, 1.0, 5)):
"""Code taken from scikit-learn examples for version 0.15.
Generate a simple plot of the test and traning learning curve.
Args:
estimator (class): object type that implements the "fit" and "predict"
methods
An object of that type which is cloned for each validation.
title (str): Used for the title for the chart.
X (2D array): array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y (1D array): array-like, shape (n_samples) or (n_samples,
n_features), optional Target relative to X for classification or
regression; None for unsupervised learning.
ylim (tuple, optional): Defines minimum and maximum yvalues plotted.
cv (int, optional): If an integer is passed, it is the number of folds
(defaults to 3). Specific cross-validation objects can be passed,
see sklearn.cross_validation module for the list of possible
objects
n_jobs(int, optional) : Number of jobs to run in parallel (default 1).
train_sizes (float): Relative or absolute numbers of training examples
that will be used to generate the learning curve. If the dtype is
float, it is regarded as a fraction of the maximum size of the
training set (that is determined by the selected validation
method), i.e. it has to be within (0, 1]. Otherwise it is
interpreted as absolute sizes of the training sets. Note that for
classification the number of samples usually | |
"98",
"name": "MediumPurple3",
"hex": "#875fd7",
"rgb": "rgb(135,95,215)",
"hsl": "hsl(260,60%,60%)"
},
"99": {
"term": "99",
"name": "SlateBlue",
"hex": "#875fff",
"rgb": "rgb(135,95,255)",
"hsl": "hsl(255,100%,68%)"
},
"100": {
"term": "100",
"name": "Yellow7",
"hex": "#878700",
"rgb": "rgb(135,135,0)",
"hsl": "hsl(60,100%,26%)"
},
"101": {
"term": "101",
"name": "Wheat4",
"hex": "#87875f",
"rgb": "rgb(135,135,95)",
"hsl": "hsl(60,17%,45%)"
},
"102": {
"term": "102",
"name": "LightGrey14",
"hex": "#878787",
"rgb": "rgb(135,135,135)",
"hsl": "hsl(0,0%,52%)"
},
"103": {
"term": "103",
"name": "LightSlateGrey",
"hex": "#8787af",
"rgb": "rgb(135,135,175)",
"hsl": "hsl(240,20%,60%)"
},
"104": {
"term": "104",
"name": "SlateBlue4",
"hex": "#8787d7",
"rgb": "rgb(135,135,215)",
"hsl": "hsl(240,50%,68%)"
},
"105": {
"term": "105",
"name": "LightSlateBlue",
"hex": "#8787ff",
"rgb": "rgb(135,135,255)",
"hsl": "hsl(240,100%,76%)"
},
"106": {
"term": "106",
"name": "Yellow6",
"hex": "#87af00",
"rgb": "rgb(135,175,0)",
"hsl": "hsl(3,100%,34%)"
},
"107": {
"term": "107",
"name": "DarkOliveGreen3",
"hex": "#87af5f",
"rgb": "rgb(135,175,95)",
"hsl": "hsl(90,33%,52%)"
},
"108": {
"term": "108",
"name": "DarkSeaGreen7",
"hex": "#87af87",
"rgb": "rgb(135,175,135)",
"hsl": "hsl(120,20%,60%)"
},
"109": {
"term": "109",
"name": "LightSkyBlue3",
"hex": "#87afaf",
"rgb": "rgb(135,175,175)",
"hsl": "hsl(180,20%,60%)"
},
"110": {
"term": "110",
"name": "LightSkyBlue2",
"hex": "#87afd7",
"rgb": "rgb(135,175,215)",
"hsl": "hsl(210,50%,68%)"
},
"111": {
"term": "111",
"name": "SkyBlue2",
"hex": "#87afff",
"rgb": "rgb(135,175,255)",
"hsl": "hsl(220,100%,76%)"
},
"112": {
"term": "112",
"name": "Chartreuse3",
"hex": "#87d700",
"rgb": "rgb(135,215,0)",
"hsl": "hsl(2,100%,42%)"
},
"113": {
"term": "113",
"name": "DarkOliveGreen4",
"hex": "#87d75f",
"rgb": "rgb(135,215,95)",
"hsl": "hsl(100,60%,60%)"
},
"114": {
"term": "114",
"name": "PaleGreen4",
"hex": "#87d787",
"rgb": "rgb(135,215,135)",
"hsl": "hsl(120,50%,68%)"
},
"115": {
"term": "115",
"name": "DarkSeaGreen4",
"hex": "#87d7af",
"rgb": "rgb(135,215,175)",
"hsl": "hsl(150,50%,68%)"
},
"116": {
"term": "116",
"name": "DarkSlateGray3",
"hex": "#87d7d7",
"rgb": "rgb(135,215,215)",
"hsl": "hsl(180,50%,68%)"
},
"117": {
"term": "117",
"name": "SkyBlue",
"hex": "#87d7ff",
"rgb": "rgb(135,215,255)",
"hsl": "hsl(200,100%,76%)"
},
"118": {
"term": "118",
"name": "Chartreuse",
"hex": "#87ff00",
"rgb": "rgb(135,255,0)",
"hsl": "hsl(8,100%,50%)"
},
"119": {
"term": "119",
"name": "LightGreen2",
"hex": "#87ff5f",
"rgb": "rgb(135,255,95)",
"hsl": "hsl(105,100%,68%)"
},
"120": {
"term": "120",
"name": "LightGreen",
"hex": "#87ff87",
"rgb": "rgb(135,255,135)",
"hsl": "hsl(120,100%,76%)"
},
"121": {
"term": "121",
"name": "PaleGreen2",
"hex": "#87ffaf",
"rgb": "rgb(135,255,175)",
"hsl": "hsl(140,100%,76%)"
},
"122": {
"term": "122",
"name": "Aquamarine2",
"hex": "#87ffd7",
"rgb": "rgb(135,255,215)",
"hsl": "hsl(160,100%,76%)"
},
"123": {
"term": "123",
"name": "DarkSlateGray",
"hex": "#87ffff",
"rgb": "rgb(135,255,255)",
"hsl": "hsl(180,100%,76%)"
},
"124": {
"term": "124",
"name": "Red4",
"hex": "#af0000",
"rgb": "rgb(175,0,0)",
"hsl": "hsl(0,100%,34%)"
},
"125": {
"term": "125",
"name": "DeepPink6",
"hex": "#af005f",
"rgb": "rgb(175,0,95)",
"hsl": "hsl(27,100%,34%)"
},
"126": {
"term": "126",
"name": "MediumVioletRed",
"hex": "#af0087",
"rgb": "rgb(175,0,135)",
"hsl": "hsl(13,100%,34%)"
},
"127": {
"term": "127",
"name": "Magenta6",
"hex": "#af00af",
"rgb": "rgb(175,0,175)",
"hsl": "hsl(300,100%,34%)"
},
"128": {
"term": "128",
"name": "DarkViolet",
"hex": "#af00d7",
"rgb": "rgb(175,0,215)",
"hsl": "hsl(88,100%,42%)"
},
"129": {
"term": "129",
"name": "Purple2",
"hex": "#af00ff",
"rgb": "rgb(175,0,255)",
"hsl": "hsl(81,100%,50%)"
},
"130": {
"term": "130",
"name": "DarkOrange2",
"hex": "#af5f00",
"rgb": "rgb(175,95,0)",
"hsl": "hsl(2,100%,34%)"
},
"131": {
"term": "131",
"name": "IndianRed4",
"hex": "#af5f5f",
"rgb": "rgb(175,95,95)",
"hsl": "hsl(0,33%,52%)"
},
"132": {
"term": "132",
"name": "Pink5",
"hex": "#af5f87",
"rgb": "rgb(175,95,135)",
"hsl": "hsl(330,33%,52%)"
},
"133": {
"term": "133",
"name": "MediumOrchid4",
"hex": "#af5faf",
"rgb": "rgb(175,95,175)",
"hsl": "hsl(300,33%,52%)"
},
"134": {
"term": "134",
"name": "MediumOrchid3",
"hex": "#af5fd7",
"rgb": "rgb(175,95,215)",
"hsl": "hsl(280,60%,60%)"
},
"135": {
"term": "135",
"name": "MediumPurple4",
"hex": "#af5fff",
"rgb": "rgb(175,95,255)",
"hsl": "hsl(270,100%,68%)"
},
"136": {
"term": "136",
"name": "DarkGoldenrod",
"hex": "#af8700",
"rgb": "rgb(175,135,0)",
"hsl": "hsl(6,100%,34%)"
},
"137": {
"term": "137",
"name": "LightSalmon3",
"hex": "#af875f",
"rgb": "rgb(175,135,95)",
"hsl": "hsl(30,33%,52%)"
},
"138": {
"term": "138",
"name": "RosyBrown",
"hex": "#af8787",
"rgb": "rgb(175,135,135)",
"hsl": "hsl(0,20%,60%)"
},
"139": {
"term": "139",
"name": "Plum4",
"hex": "#af87af",
"rgb": "rgb(175,135,175)",
"hsl": "hsl(300,20%,60%)"
},
"140": {
"term": "140",
"name": "MediumPurple2",
"hex": "#af87d7",
"rgb": "rgb(175,135,215)",
"hsl": "hsl(270,50%,68%)"
},
"141": {
"term": "141",
"name": "MediumPurple",
"hex": "#af87ff",
"rgb": "rgb(175,135,255)",
"hsl": "hsl(260,100%,76%)"
},
"142": {
"term": "142",
"name": "Gold3",
"hex": "#afaf00",
"rgb": "rgb(175,175,0)",
"hsl": "hsl(60,100%,34%)"
},
"143": {
"term": "143",
"name": "DarkKhaki",
"hex": "#afaf5f",
"rgb": "rgb(175,175,95)",
"hsl": "hsl(60,33%,52%)"
},
"144": {
"term": "144",
"name": "NavajoWhite3",
"hex": "#afaf87",
"rgb": "rgb(175,175,135)",
"hsl": "hsl(60,20%,60%)"
},
"145": {
"term": "145",
"name": "LightGrey8",
"hex": "#afafaf",
"rgb": "rgb(175,175,175)",
"hsl": "hsl(0,0%,68%)"
},
"146": {
"term": "146",
"name": "LightSteelBlue3",
"hex": "#afafd7",
"rgb": "rgb(175,175,215)",
"hsl": "hsl(240,33%,76%)"
},
"147": {
"term": "147",
"name": "LightSteelBlue2",
"hex": "#afafff",
"rgb": "rgb(175,175,255)",
"hsl": "hsl(240,100%,84%)"
},
"148": {
"term": "148",
"name": "Yellow5",
"hex": "#afd700",
"rgb": "rgb(175,215,0)",
"hsl": "hsl(1,100%,42%)"
},
"149": {
"term": "149",
"name": "DarkOliveGreen5",
"hex": "#afd75f",
"rgb": "rgb(175,215,95)",
"hsl": "hsl(80,60%,60%)"
},
"150": {
"term": "150",
"name": "DarkSeaGreen5",
"hex": "#afd787",
"rgb": "rgb(175,215,135)",
"hsl": "hsl(90,50%,68%)"
},
"151": {
"term": "151",
"name": "DarkSeaGreen6",
"hex": "#afd7af",
"rgb": "rgb(175,215,175)",
"hsl": "hsl(120,33%,76%)"
},
"152": {
"term": "152",
"name": "LightCyan3",
"hex": "#afd7d7",
"rgb": "rgb(175,215,215)",
"hsl": "hsl(180,33%,76%)"
},
"153": {
"term": "153",
"name": "LightSkyBlue",
"hex": "#afd7ff",
"rgb": "rgb(175,215,255)",
"hsl": "hsl(210,100%,84%)"
},
"154": {
"term": "154",
"name": "GreenYellow",
"hex": "#afff00",
"rgb": "rgb(175,255,0)",
"hsl": "hsl(8,100%,50%)"
},
"155": {
"term": "155",
"name": "DarkOliveGreen2",
"hex": "#afff5f",
"rgb": "rgb(175,255,95)",
"hsl": "hsl(90,100%,68%)"
},
"156": {
"term": "156",
"name": "PaleGreen",
"hex": "#afff87",
"rgb": "rgb(175,255,135)",
"hsl": "hsl(100,100%,76%)"
},
"157": {
"term": "157",
"name": "DarkSeaGreen2",
"hex": "#afffaf",
"rgb": "rgb(175,255,175)",
"hsl": "hsl(120,100%,84%)"
},
"158": {
"term": "158",
"name": "DarkSeaGreen3",
"hex": "#afffd7",
"rgb": "rgb(175,255,215)",
"hsl": "hsl(150,100%,84%)"
},
"159": {
"term": "159",
"name": "PaleTurquoise",
"hex": "#afffff",
"rgb": "rgb(175,255,255)",
"hsl": "hsl(180,100%,84%)"
},
"160": {
"term": "160",
"name": "Red3",
"hex": "#d70000",
"rgb": "rgb(215,0,0)",
"hsl": "hsl(0,100%,42%)"
},
"161": {
"term": "161",
"name": "DeepPink4",
"hex": "#d7005f",
"rgb": "rgb(215,0,95)",
"hsl": "hsl(33,100%,42%)"
},
"162": {
"term": "162",
"name": "DeepPink5",
"hex": "#d70087",
"rgb": "rgb(215,0,135)",
"hsl": "hsl(22,100%,42%)"
},
"163": {
"term": "163",
"name": "Magenta3",
"hex": "#d700af",
"rgb": "rgb(215,0,175)",
"hsl": "hsl(11,100%,42%)"
},
"164": {
"term": "164",
"name": "Magenta5",
"hex": "#d700d7",
"rgb": "rgb(215,0,215)",
"hsl": "hsl(300,100%,42%)"
},
"165": {
"term": "165",
"name": "Magenta4",
"hex": "#d700ff",
"rgb": "rgb(215,0,255)",
"hsl": "hsl(90,100%,50%)"
},
"166": {
"term": "166",
"name": "DarkOrange3",
"hex": "#d75f00",
"rgb": "rgb(215,95,0)",
"hsl": "hsl(6,100%,42%)"
},
"167": {
"term": "167",
"name": "IndianRed3",
"hex": "#d75f5f",
"rgb": "rgb(215,95,95)",
"hsl": "hsl(0,60%,60%)"
},
"168": {
"term": "168",
"name": "Pink3",
"hex": "#d75f87",
"rgb": "rgb(215,95,135)",
"hsl": "hsl(340,60%,60%)"
},
"169": {
"term": "169",
"name": "Pink4",
"hex": "#d75faf",
"rgb": "rgb(215,95,175)",
"hsl": "hsl(320,60%,60%)"
},
"170": {
"term": "170",
"name": "Orchid3",
"hex": "#d75fd7",
"rgb": "rgb(215,95,215)",
"hsl": "hsl(300,60%,60%)"
},
"171": {
"term": "171",
"name": "MediumOrchid2",
"hex": "#d75fff",
"rgb": "rgb(215,95,255)",
"hsl": "hsl(285,100%,68%)"
},
"172": {
"term": "172",
"name": "Orange2",
"hex": "#d78700",
"rgb": "rgb(215,135,0)",
"hsl": "hsl(7,100%,42%)"
},
"173": {
"term": "173",
"name": "LightSalmon2",
"hex": "#d7875f",
"rgb": "rgb(215,135,95)",
"hsl": "hsl(20,60%,60%)"
},
"174": {
"term": "174",
"name": "LightPink3",
"hex": "#d78787",
"rgb": "rgb(215,135,135)",
"hsl": "hsl(0,50%,68%)"
},
"175": {
"term": "175",
"name": "Pink2",
"hex": "#d787af",
"rgb": "rgb(215,135,175)",
"hsl": "hsl(330,50%,68%)"
},
"176": {
"term": "176",
"name": "Plum3",
"hex": "#d787d7",
"rgb": "rgb(215,135,215)",
"hsl": "hsl(300,50%,68%)"
},
"177": {
"term": "177",
"name": "Violet",
"hex": "#d787ff",
"rgb": "rgb(215,135,255)",
"hsl": "hsl(280,100%,76%)"
},
"178": {
"term": "178",
"name": "Gold2",
"hex": "#d7af00",
"rgb": "rgb(215,175,0)",
"hsl": "hsl(8,100%,42%)"
},
"179": {
"term": "179",
"name": "LightGoldenrod3",
"hex": "#d7af5f",
"rgb": "rgb(215,175,95)",
"hsl": "hsl(40,60%,60%)"
},
"180": {
"term": "180",
"name": "Tan",
"hex": "#d7af87",
"rgb": "rgb(215,175,135)",
"hsl": "hsl(30,50%,68%)"
},
"181": {
"term": "181",
"name": "MistyRose3",
"hex": "#d7afaf",
"rgb": "rgb(215,175,175)",
"hsl": "hsl(0,33%,76%)"
},
"182": {
"term": "182",
"name": "Thistle3",
"hex": "#d7afd7",
"rgb": "rgb(215,175,215)",
"hsl": "hsl(300,33%,76%)"
},
"183": {
"term": "183",
"name": "Plum2",
"hex": "#d7afff",
"rgb": "rgb(215,175,255)",
"hsl": "hsl(270,100%,84%)"
},
"184": {
"term": "184",
"name": "Yellow3",
"hex": "#d7d700",
"rgb": "rgb(215,215,0)",
"hsl": "hsl(60,100%,42%)"
},
"185": {
"term": "185",
"name": "Khaki3",
"hex": "#d7d75f",
"rgb": "rgb(215,215,95)",
"hsl": "hsl(60,60%,60%)"
},
"186": {
"term": "186",
"name": "LightGoldenrod5",
"hex": "#d7d787",
"rgb": "rgb(215,215,135)",
"hsl": "hsl(60,50%,68%)"
},
"187": {
"term": "187",
"name": "LightYellow3",
"hex": "#d7d7af",
"rgb": "rgb(215,215,175)",
"hsl": "hsl(60,33%,76%)"
},
"188": {
"term": "188",
"name": "LightGrey4",
"hex": "#d7d7d7",
"rgb": "rgb(215,215,215)",
"hsl": "hsl(0,0%,84%)"
},
"189": {
"term": "189",
"name": "LightSteelBlue",
"hex": "#d7d7ff",
"rgb": "rgb(215,215,255)",
"hsl": "hsl(240,100%,92%)"
},
"190": {
"term": "190",
"name": "Yellow4",
"hex": "#d7ff00",
"rgb": "rgb(215,255,0)",
"hsl": "hsl(9,100%,50%)"
},
"191": {
"term": "191",
"name": "DarkOliveGreen",
"hex": "#d7ff5f",
"rgb": "rgb(215,255,95)",
"hsl": "hsl(75,100%,68%)"
},
"192": {
"term": "192",
"name": "DarkOliveGreen6",
"hex": "#d7ff87",
"rgb": "rgb(215,255,135)",
"hsl": "hsl(80,100%,76%)"
},
"193": {
"term": "193",
"name": "DarkSeaGreen",
"hex": "#d7ffaf",
"rgb": "rgb(215,255,175)",
"hsl": "hsl(90,100%,84%)"
},
"194": {
"term": "194",
"name": "Honeydew2",
"hex": "#d7ffd7",
"rgb": "rgb(215,255,215)",
"hsl": "hsl(120,100%,92%)"
},
"195": {
"term": "195",
"name": "LightCyan",
"hex": "#d7ffff",
"rgb": "rgb(215,255,255)",
"hsl": "hsl(180,100%,92%)"
},
"196": {
"term": "196",
"name": "Red2",
"hex": "#ff0000",
"rgb": | |
<reponame>Official-CalcOS/CalcOS-1.1
import math
from math import sqrt
import statistics
import time
import os
import webbrowser
print("© 2020 - 2021 CalcOS by <NAME>. All Rights Reserved.")
time.sleep(2)
clearConsole = lambda: os.system('cls' if os.name in ('nt', 'dos') else 'clear')
print("Searching for OS")
print("Loading prompts comencing...")
print('Loading "start"')
def startup():
clearConsole()
print("CalcOS by <NAME>")
print("")
return
print('Loading "opselect"')
def opselect():
print("Select operation")
print("Simple Interest(S), Comp Interest(C), General Maths(GM),")
print("")
print("Pythag(PY), Pi(Pi) & Statistics(St)")
print("")
print('Encountered a Bug!? Type "req_update" or "9" to see a list of options')
print('Enter "0" to quit')
bestchoice = ["s", "c", "gm", "py", "pi", "st", "req_update", "1", "2", "3", "4", "5", "6", "9", "0"]
mainchoice = str(input("Enter choice: S(1), C(2), GM(3), PY(4), PI(5), ST(6): ")).lower()
#FAT input test against all choices, sorry Jake you cant break this one lol.
if mainchoice in bestchoice:
clearConsole()
if mainchoice == 's' or mainchoice == '1':
simpleint()
elif mainchoice == 'c' or mainchoice == '2':
compoundint()
elif mainchoice == 'gm' or mainchoice == '3':
genmaths()
elif mainchoice == 'py' or mainchoice == '4':
pythagoras()
elif mainchoice == 'pi' or mainchoice == '5':
delPi()
elif mainchoice == 'st' or mainchoice == '6':
#Del the "nuh uh" print once implemented.
#Delete these comments and uncomment statistics.
#no_lol = input("Nuh Uh, not just yet...")
#opselect()
statistics()
elif mainchoice == 'req_update' or mainchoice == "9":
req_update()
elif mainchoice == '0':
clearConsole()
whyQuit = input("Sad to see you leave. Thanks for using CalcOS!")
exit()
else:
contin = input("Invalid input, press enter to continue: ")
clearConsole()
opselect()
print('Loading "simpleInt"')
def simpleint():
#Simple interest starts here
print("Work for i or 1 (Paid / earned interest)")
print("Work for p or 2 (Initial amount of money)")
print("Work for r or 3 (Interest rate)")
print("Work for t or 4 (Time)")
print("")
print('If you want to go back to the main menu, type "0"')
print("")
# Take the input from the user
# Check if choice is one of the four options with some anti letter code that probably breaks it lol
try:
betterchoice = ["i", "p", "r", "t", "1", "2", "3", "4", "0", "main menu"]
megachoice = str(input("Enter choice, keep in mind this is using I=prt | i or 1 | p or 2 | r or 3 | t or 4 |: ")).lower()
if megachoice in betterchoice:
if megachoice == '0' or choice == 'main menu':
clearConsole()
opselect()
elif choice == 'i' or choice == '1':
try:
num1 = float(input("Enter value of p: "))
if type(num1) == int or type(num1) == float:
num2 = float(input("Enter value of r: "))
if type(num2) == int or type(num2) == float:
num3 = float(input("Enter value of t: "))
if type(num3) == int or type(num3) == float:
print("$%0.2f"%(round(num1*(num2/100)*num3,2)))
print("")
contin = input("Your welcome, press enter to continue!")
clearConsole()
opselect()
else:
CrashStopperSimp()
else:
CrashStopperSimp()
else:
CrashStopperSimp()
except:
CrashHandler()
simpleint()
elif megachoice == 'p' or choice == '2':
try:
num1 = float(input("Enter value of i: "))
if type(num1) == int or type(num1) == float:
num2 = float(input("Enter value of r: "))
if type(num2) == int or type(num2) == float:
num3 = float(input("Enter value of t: "))
if type(num3) == int or type(num3) == float:
print("$%0.2f"%(round(num1/(num2*num3),2)))
print("")
contin = input("Your welcome, press enter to continue!")
clearConsole()
opselect()
else:
CrashStopperSimp()
else:
CrashStopperSimp()
else:
CrashStopperSimp()
except:
CrashHandler()
elif megachoice == 'r' or choice == '3':
try:
num1 = float(input("Enter value of i: "))
if type(num1) == int or type(num1) == float:
num2 = float(input("Enter value of p: "))
if type(num2) == int or type(num2) == float:
num3 = float(input("Enter value of t: "))
if type(num3) == int or type(num3) == float:
print("%f%%"%(round((num1/(num2*num3))*100,10)))
print("")
contin = input("Your welcome, press enter to continue!")
clearConsole()
opselect()
else:
CrashStopperSimp()
else:
CrashStopperSimp()
else:
CrashStopperSimp()
except:
CrashHandler()
elif megachoice == 't' or choice == '4':
try:
num1 = float(input("Enter value of i: "))
if type(num1) == int or type(num1) == float:
num2 = float(input("Enter value of p: "))
if type(num2) == int or type(num2) == float:
num3 = float(input("Enter value of r: "))
if type(num3) == int or type(num3) == float:
print("%0.2f ∴Total time"%(round((num1/(num2*num3))*100,2)))
print("")
contin = input("Your welcome, press enter to continue!")
clearConsole()
opselect()
else:
CrashStopperSimp()
else:
CrashStopperSimp()
else:
CrashStopperSimp()
except:
CrashHandler()
else:
CrashStopperSImp()
else:
CrashStopperSimp()
except:
CrashHandler()
return
print('Loading "compoundInt"')
def compoundint():
#Here be compund interests!
try:
global n
compoundTime = ["yearly", "1", "daily", "2", "weekly", "3", "bi-weekly", "4", "semi-monthly", "5", "monthly", "6", "bi-monthly", "7", "quarterly", "8", "semi-annually", "9", "daily2", "10", "0", "main menu"]
print('If you want to go back to the main menu, type "0" or "main menu\n"')
print("List of supported compound times")
print("yearly(1)\ndaily(2)\nweekly(3)\nbi-weekly(3)\nsemi-monthly(5)\nmonthly(6)\nbi-monthly(7)\nquarterly(8)\nsemi-annually(9)\ndaily2(10) this one is 360 days.")
compTimeChoice = input("Enter your choice here: ").lower()
if compTimeChoice in compoundTime:
if compTimeChoice == 'main menu' or compTimeChoice =='0':
clearConsole()
opselect()
elif compTimeChoice == 'yearly' or compTimeChoice =='1':
n = 1
compoundint2()
elif compTimeChoice == 'daily' or compTimeChoice =='2':
n = 365
compoundint2()
elif compTimeChoice == 'weekly' or compTimeChoice =='3':
n = 52
compoundint2()
elif compTimeChoice == 'bi-weekly' or compTimeChoice =='4':
n = 26
compoundint2()
elif compTimeChoice == 'semi-monthly' or compTimeChoice =='5':
n = 24
compoundint2()
elif compTimeChoice == 'monthly' or compTimeChoice =='6':
n = 12
compoundint2()
elif compTimeChoice == 'bi-monthly' or compTimeChoice =='7':
n = 6
compoundint2()
elif compTimeChoice == 'quarterly' or compTimeChoice =='8':
n = 4
compoundint2()
elif compTimeChoice == 'daily2' or compTimeChoice =='10':
n = 360
compoundint2()
else:
print("Something went wrong, check input and try again.")
compoundint()
else:
print("Something went wrong, check input and try again.")
compoundint()
except:
CrashHandler()
return
print('Loading "compoundint2"')
def compoundint2():
try:
awesomeChoice = ("i", "1", "p", "2", "r", "3", "0", "main menu")
print('If you want to go back to the main menu, type "0" or "main menu"')
Compchoice = input("What would you like to work for? |a or 1| |p or 2| |r or 3| :").lower()
if Compchoice in awesomeChoice:
if Compchoice == "i" or Compchoice == "1":
P = float(input("Enter principle amount(p): "))
r = float(input("Enter interest rate(r): "))
y = float(input("Enter the time period in years(t): "))
FV = P * (((1 + ((r/100.0)/1)) ** (n*y)))
print("")
print("The final amount after", y, "years is", FV)
print("")
contin = input("Your welcome, press enter to continue!")
clearConsole()
compoundint()
elif Compchoice == "p" or Compchoice == "2":
A = float(input("Enter final amount of money(a): "))
r = float(input("Enter interest rate(r): "))
y = float(input("Enter the time period in years(t): "))
FV = A / (((1 + ((r/100.0)/1)) ** (n*y)))
print("")
print("The final amount after", y, "years is", FV)
print("")
contin = input("Your welcome, press enter to continue!")
clearConsole()
compoundint()
elif Compchoice == "r" or Compchoice == "3":
A = float(input("Enter final amount of money(a): "))
P = float(input("Enter Principle Amount: "))
y = float(input("Enter the time period in years(t): "))
FV = n*((A/P)**(1/(n*y))-1)
FV = FV * 100
print("")
print("The interest rate is:", FV,"%")
print("")
contin = input("Your welcome, press enter to continue!")
clearConsole()
compoundint()
elif Compchoice == "0" or Compchoice == "main menu":
clearConsole()
opselect()
else:
input("Check your input and try again.")
clearConsole()
compoundint()
except:
CrashHandler()
return
print('Loading "genMaths"')
def genmaths():
#General Mathmatics starts here
try:
print('Type "m" to return to the main menu')
print("(Multiply = * Divide = / Subtract = - Add = + Power of = **")
print("")
calc = input("Type calculation:\n").lower()
if calc != 'm':
print("Thinking...")
print("")
print("Answer: " + str(eval(calc)))
print("")
contin = input("Your welcome, press enter to continue!")
clearConsole()
genmaths()
else:
clearConsole()
opselect()
except:
CrashHandler()
genmaths()
return
print('Loading "pythagoras"')
def pythagoras():
#Pythagoras starts here
print("Pythagorean theorem calculator! Calculate your right angled triangles sides!")
print("Assume the sides are a, b, c and c is the hypotenuse")
betterchoice = ["a", "b", "c"]
choice = input("Which side do you wish to calculate? Enter choice(A/B/C): ").lower()
try:
if choice in betterchoice:
if choice == 'c':
try:
side_a = float(input("Input the length of side a: "))
if type(side_a) == int or type(side_a) == float:
side_b = float(input("Input the length of side b: | |
# -*- coding: utf-8 -*-
"""Implementation of the ``wgs_sv_calling`` step
The (germline) ``wgs_sv_calling`` step takes as the input the results of the ``ngs_mapping`` step
(aligned NGS reads) and performs germline SV calling on them. The result are called SVs in VCF
format.
In contrast to WGS CNV calling, WGS SV calling is able to identify more than just copy number
variation, e.g., inversions. Large-range CNVs are often better detected by WGS CNV calling.
The WGS SV calling step is mostly followed by WGS SV filtration to reduce the FDR and remove
artifacts.
.. warning::
Note that only one NGS library is currently supported per bio entity/donor, also if you have
both Illumina and PacBio data for one patient, for example. This means that you have to
create a separate patient with different pk (and secondary id if you only use secondary id for
file names) for the PacBio data set or create the patient in another project.
==========
Step Input
==========
The variant annotation step uses Snakemake sub workflows for using the result of the
``ngs_mapping`` step.
===========
Step Output
===========
For all pedigrees, variant calling will be performed on the primary DNA NGS libraries of all
members, separately for each configured read mapper and variant caller. The name of the primary
DNA NGS library of the index will be used as an identification token in the output file. For each
read mapper, variant caller, and pedigree, the following files will be generated:
- ``{mapper}.{var_caller}.{lib_name}-{lib_pk}.vcf.gz``
- ``{mapper}.{var_caller}.{lib_name}-{lib_pk}.vcf.gz.tbi``
- ``{mapper}.{var_caller}.{lib_name}-{lib_pk}.vcf.gz.md5``
- ``{mapper}.{var_caller}.{lib_name}-{lib_pk}.vcf.gz.tbi.md5``
For example, it might look as follows for the example from above:
::
output/
+-- bwa.delly2.P001-N1-DNA1-WGS1-4
| `-- out
| |-- bwa.delly2.P001-N1-DNA1-WGS1-4.vcf.gz
| |-- bwa.delly2.P001-N1-DNA1-WGS1-4.vcf.gz.tbi
| |-- bwa.delly2.P001-N1-DNA1-WGS1-4.vcf.gz.md5
| `-- bwa.delly2.P001-N1-DNA1-WGS1-4.vcf.gz.tbi.md5
[...]
Generally, these files will be unfiltered, i.e., contain low-quality variants.
--------------
Delly 2 Output
--------------
The Delly 2 workflow used in this pipeline step incorporates the variants of the whole cohort for
regenotyping.
.. note::
The file will contain variant records for variants not present in the pedigree at hand. This
will change in the future and variants not present in the pedigree will be removed.
====================
Global Configuration
====================
- ``static_data_config/reference/path`` must be set appropriately
=====================
Default Configuration
=====================
The default configuration is as follows.
.. include:: DEFAULT_CONFIG_wgs_sv_calling.rst
=============================
Available Germline SV Callers
=============================
The following germline SV callers are currently available
- ``"dna"`` (Illumina)
- ``"delly2"``
- ``"manta"``
- ``"dna_long"`` (PacBio)
- ``"pb_honey_spots"``
- ``"sniffles"``
=======
Reports
=======
Currently, no reports are generated.
"""
# TODO: remove variants not in pedigree after the final merge step
# TODO: assumption: same platform type
# TODO: only one primary NGS library!
# TODO: only WGS libraries!
from collections import OrderedDict
import os
import sys
from biomedsheets.shortcuts import GermlineCaseSheet, is_not_background
from snakemake.io import expand
from snappy_pipeline.utils import dictify, listify
from snappy_pipeline.workflows.abstract import BaseStep, BaseStepPart, LinkOutStepPart
from snappy_pipeline.workflows.ngs_mapping import NgsMappingWorkflow
from snappy_wrappers.tools.genome_windows import yield_regions
__author__ = "<NAME> <<EMAIL>>"
#: Extensions of files to create as main payload
EXT_VALUES = (".vcf.gz", ".vcf.gz.tbi", ".vcf.gz.md5", ".vcf.gz.tbi.md5")
#: Names of the files to create for the extension
EXT_NAMES = ("vcf", "tbi", "vcf_md5", "tbi_md5")
#: Available (short) DNA WGS SV callers
DNA_WGS_SV_CALLERS = ("delly2", "manta", "popdel")
#: Available (long) DNA WGS SV callers
LONG_DNA_WGS_SV_CALLERS = ("pb_honey_spots", "sniffles")
#: Default configuration for the wgs_sv_calling step
DEFAULT_CONFIG = r"""
# Default configuration
step_config:
wgs_sv_calling:
tools:
dna: # short
- delly2
dna_long: # PacBio/Oxford Nanopore
- sniffles
path_ngs_mapping: ../ngs_mapping # REQUIRED
delly2:
path_exclude_tsv: null # optional
max_threads: 16
map_qual: 1
geno_qual: 5
qual_tra: 20
mad_cutoff: 9
manta:
max_threads: 16
popdel:
window_size: 10000000
max_sv_size: 20000 # == padding
ignore_chroms:
- NC_007605 # herpes virus
- hs37d5 # GRCh37 decoy
- chrEBV # Eppstein-Barr Virus
- '*_decoy' # decoy contig
- 'HLA-*' # HLA genes
- 'chrUn_*' # unplaced contigs
pb_honey_spots:
num_threads: 16
sniffles:
num_threads: 16
"""
class Delly2StepPart(BaseStepPart):
"""WGS SV identification using Delly2
Delly2 supports the calling based on whole cohorts. The rough steps are as follows:
- Perform variant calling on each sample individually ("delly2_call")
- Merge called variants to get a cohort-wide site list ("delly2_merge_calls")
- Perform genotyping of the variants in the cohort-wide site list in each sample
("delly2_genotype")
- Merge cohort-wide site list ("delly2_merge_genotypes"); using bcftools
- Reorder VCF and put pedigree in front; later on, non-pedigree variants should be removed.
"""
name = "delly2"
#: Actions in Delly 2 workflow
actions = ("call", "merge_calls", "genotype", "merge_genotypes", "reorder_vcf")
#: Directory infixes
dir_infixes = {
"call": "{mapper,[^\.]+}.delly2.call.{library_name,[^\.]+}",
"merge_calls": "{mapper,[^\.]+}.delly2.merge_calls",
"genotype": "{mapper,[^\.]+}.delly2.genotype.{library_name,[^\.]+}",
"merge_genotypes": "{mapper,[^\.]+}.delly2.merge_genotypes",
"reorder_vcf": r"{mapper,[^\.]+}.delly2.{index_ngs_library,[^\.]+}",
}
def __init__(self, parent):
super().__init__(parent)
self.base_path_out = (
"work/{{mapper}}.{var_caller}.{{index_ngs_library}}/out/"
"{{mapper}}.{var_caller}.{{index_ngs_library}}{ext}"
)
# Build shortcut from index library name to pedigree
self.index_ngs_library_to_pedigree = OrderedDict()
for sheet in self.parent.shortcut_sheets:
self.index_ngs_library_to_pedigree.update(sheet.index_ngs_library_to_pedigree)
# Build shortcut from library name to library info
self.library_name_to_library = OrderedDict()
for sheet in self.parent.shortcut_sheets:
self.library_name_to_library.update(sheet.library_name_to_library)
def get_library_extra_infos(self, wildcards):
"""Returns library extra infos for the given library name"""
return self.library_name_to_library[wildcards.library_name].ngs_library.extra_infos
def get_input_files(self, action):
"""Return appropriate input function for the given action"""
assert action in self.actions
mapping = {
"call": self._get_input_files_call,
"merge_calls": self._get_input_files_merge_calls,
"genotype": self._get_input_files_genotype,
"merge_genotypes": self._get_input_files_merge_genotypes,
"reorder_vcf": self._get_input_files_reorder_vcf,
}
return mapping[action]
@dictify
def _get_input_files_call(self, wildcards):
"""Return input files for "call" action"""
ngs_mapping = self.parent.sub_workflows["ngs_mapping"]
tpl = "output/{mapper}.{library_name}/out/{mapper}.{library_name}{ext}"
for name, ext in {"bam": ".bam", "bai": ".bam.bai"}.items():
yield name, ngs_mapping(tpl.format(ext=ext, **wildcards))
@listify
def _get_input_files_merge_calls(self, wildcards):
"""Return input files for "merge_calls" action"""
infix = self.dir_infixes["call"]
infix = infix.replace(r",[^\.]+", "")
tpl = os.path.join("work", infix, "out", infix + ".bcf")
for donor in self._donors_with_dna_ngs_library():
yield tpl.format(library_name=donor.dna_ngs_library.name, **wildcards)
@dictify
def _get_input_files_genotype(self, wildcards):
"""Return input files for "genotype" action"""
# Sites VCF file
infix = self.dir_infixes["merge_calls"]
infix = infix.replace(r",[^\.]+", "")
yield "bcf", os.path.join("work", infix, "out", infix + ".bcf").format(**wildcards)
# BAM files
ngs_mapping = self.parent.sub_workflows["ngs_mapping"]
tpl = "output/{mapper}.{library_name}/out/{mapper}.{library_name}{ext}"
for name, ext in {"bam": ".bam", "bai": ".bam.bai"}.items():
yield name, ngs_mapping(tpl.format(ext=ext, **wildcards))
@listify
def _get_input_files_merge_genotypes(self, wildcards):
"""Return input files for "merge_genotypes" action"""
for donor in self._donors_with_dna_ngs_library():
infix = self.dir_infixes["genotype"]
infix = infix.replace(r",[^\.]+", "")
tpl = os.path.join("work", infix, "out", infix + ".bcf")
yield tpl.format(library_name=donor.dna_ngs_library.name, **wildcards)
@dictify
def _get_input_files_reorder_vcf(self, wildcards):
"""Return input files for "reorder_vcf" action"""
infix = self.dir_infixes["merge_genotypes"]
infix = infix.replace(r",[^\.]+", "")
tpl = os.path.join("work", infix, "out", infix + ".bcf")
yield "bcf", tpl.format(**wildcards)
def _donors_with_dna_ngs_library(self):
"""Yield donors with DNA NGS library"""
for sheet in self.parent.shortcut_sheets:
for donor in sheet.donors:
if donor.dna_ngs_library:
yield donor
def get_ped_members(self, wildcards):
pedigree = self.index_ngs_library_to_pedigree[wildcards.index_ngs_library]
return " ".join(
donor.dna_ngs_library.name for donor in pedigree.donors if donor.dna_ngs_library
)
@dictify
def get_output_files(self, action):
"""Return output paths for the given action; include wildcards"""
assert action in self.actions
for name, ext in zip(EXT_NAMES, EXT_VALUES):
infix = self.dir_infixes[action]
infix2 = infix.replace(r",[^\.]+", "")
if action != "reorder_vcf": # generate bcf files internally
name = name.replace("vcf", "bcf")
ext = ext.replace("vcf.gz", "bcf")
name = name.replace("tbi", "csi")
ext = ext.replace(".tbi", ".csi")
yield name, "work/" + infix + "/out/" + infix2 + ext
def get_log_file(self, action):
"""Return log file path for the given action; includes wildcards"""
assert action in self.actions
infix = self.dir_infixes[action]
infix = infix.replace(r",[^\.]+", "")
return "work/" + infix + "/log/snakemake.log"
def update_cluster_config(self, cluster_config):
for action in self.actions:
if action in ("merge_genotypes", "merge_calls", "reorder_vcf"): # cheap actions
cluster_config["wgs_sv_calling_delly2_{}".format(action)] = {
"mem": 7 * 1024 * 2,
"time": "96:00",
"ntasks": 2,
}
else:
cluster_config["wgs_sv_calling_delly2_{}".format(action)] = {
"mem": 20 * 1024 * 2,
"time": "168:00",
"ntasks": 2,
}
class MantaStepPart(BaseStepPart):
"""WGS SV identification using Manta
The work flow for Manta is very simple as it allows direct input of a pedigree's input.
However, this has the drawback of not supporting any background information.
"""
name = "manta"
def __init__(self, parent):
super().__init__(parent)
self.base_path_out = (
"work/{mapper}.manta.{index_ngs_library}/out/{mapper}.manta.{index_ngs_library}{ext}"
)
# Build shortcut from index library name to pedigree
self.index_ngs_library_to_pedigree = OrderedDict()
for sheet in self.parent.shortcut_sheets:
self.index_ngs_library_to_pedigree.update(sheet.index_ngs_library_to_pedigree)
def get_input_files(self, action):
"""Return appropriate input function for the given action"""
@listify
def input_function(wildcards):
"""Helper wrapper function"""
# Get shorcut to NGS mapping sub workflow
ngs_mapping = self.parent.sub_workflows["ngs_mapping"]
# Get names of primary libraries of the selected pedigree. The pedigree is selected
# by the primary DNA NGS library of the index.
pedigree = self.index_ngs_library_to_pedigree[wildcards.index_ngs_library]
for donor in pedigree.donors:
if donor.dna_ngs_library:
for ext in (".bam", ".bam.bai"):
tpl = "output/{mapper}.{library_name}/out/{mapper}.{library_name}{ext}"
yield ngs_mapping(
tpl.format(
library_name=donor.dna_ngs_library.name, ext=ext, **wildcards
)
)
assert action == "run", "Unsupported actions"
return input_function
@dictify
def get_output_files(self, action):
"""Return output paths for the given action; include wildcards"""
assert action == "run"
infix = "{mapper}.manta.{index_ngs_library}"
for name, ext in zip(EXT_NAMES, EXT_VALUES):
yield name, "work/" + infix + "/out/" + infix | |
history. Instead, /etc/apt/trusted.gpg.d is used directly to drop
# gpg
key_asc = _get_key_by_keyid(key)
# write the key in GPG format so that apt-key list shows it
key_gpg = _dearmor_gpg_key(key_asc)
_write_apt_gpg_keyfile(key_name=key, key_material=key_gpg)
def _get_keyid_by_gpg_key(key_material):
"""Get a GPG key fingerprint by GPG key material.
Gets a GPG key fingerprint (40-digit, 160-bit) by the ASCII armor-encoded
or binary GPG key material. Can be used, for example, to generate file
names for keys passed via charm options.
:param key_material: ASCII armor-encoded or binary GPG key material
:type key_material: bytes
:raises: GPGKeyError if invalid key material has been provided
:returns: A GPG key fingerprint
:rtype: str
"""
# Use the same gpg command for both Xenial and Bionic
cmd = 'gpg --with-colons --with-fingerprint'
ps = subprocess.Popen(cmd.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
out, err = ps.communicate(input=key_material)
if six.PY3:
out = out.decode('utf-8')
err = err.decode('utf-8')
if 'gpg: no valid OpenPGP data found.' in err:
raise GPGKeyError('Invalid GPG key material provided')
# from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10)
return re.search(r"^fpr:{9}([0-9A-F]{40}):$", out, re.MULTILINE).group(1)
def _get_key_by_keyid(keyid):
"""Get a key via HTTPS from the Ubuntu keyserver.
Different key ID formats are supported by SKS keyservers (the longer ones
are more secure, see "dead beef attack" and https://evil32.com/). Since
HTTPS is used, if SSLBump-like HTTPS proxies are in place, they will
impersonate keyserver.ubuntu.com and generate a certificate with
keyserver.ubuntu.com in the CN field or in SubjAltName fields of a
certificate. If such proxy behavior is expected it is necessary to add the
CA certificate chain containing the intermediate CA of the SSLBump proxy to
every machine that this code runs on via ca-certs cloud-init directive (via
cloudinit-userdata model-config) or via other means (such as through a
custom charm option). Also note that DNS resolution for the hostname in a
URL is done at a proxy server - not at the client side.
8-digit (32 bit) key ID
https://keyserver.ubuntu.com/pks/lookup?search=0x4652B4E6
16-digit (64 bit) key ID
https://keyserver.ubuntu.com/pks/lookup?search=0x6E85A86E4652B4E6
40-digit key ID:
https://keyserver.ubuntu.com/pks/lookup?search=0x35F77D63B5CEC106C577ED856E85A86E4652B4E6
:param keyid: An 8, 16 or 40 hex digit keyid to find a key for
:type keyid: (bytes, str)
:returns: A key material for the specified GPG key id
:rtype: (str, bytes)
:raises: subprocess.CalledProcessError
"""
# options=mr - machine-readable output (disables html wrappers)
keyserver_url = ('https://keyserver.ubuntu.com'
'/pks/lookup?op=get&options=mr&exact=on&search=0x{}')
curl_cmd = ['curl', keyserver_url.format(keyid)]
# use proxy server settings in order to retrieve the key
return subprocess.check_output(curl_cmd,
env=env_proxy_settings(['https']))
def _dearmor_gpg_key(key_asc):
"""Converts a GPG key in the ASCII armor format to the binary format.
:param key_asc: A GPG key in ASCII armor format.
:type key_asc: (str, bytes)
:returns: A GPG key in binary format
:rtype: (str, bytes)
:raises: GPGKeyError
"""
ps = subprocess.Popen(['gpg', '--dearmor'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
out, err = ps.communicate(input=key_asc)
# no need to decode output as it is binary (invalid utf-8), only error
if six.PY3:
err = err.decode('utf-8')
if 'gpg: no valid OpenPGP data found.' in err:
raise GPGKeyError('Invalid GPG key material. Check your network setup'
' (MTU, routing, DNS) and/or proxy server settings'
' as well as destination keyserver status.')
else:
return out
def _write_apt_gpg_keyfile(key_name, key_material):
"""Writes GPG key material into a file at a provided path.
:param key_name: A key name to use for a key file (could be a fingerprint)
:type key_name: str
:param key_material: A GPG key material (binary)
:type key_material: (str, bytes)
"""
with open('/etc/apt/trusted.gpg.d/{}.gpg'.format(key_name),
'wb') as keyf:
keyf.write(key_material)
def add_source(source, key=None, fail_invalid=False):
"""Add a package source to this system.
@param source: a URL or sources.list entry, as supported by
add-apt-repository(1). Examples::
ppa:charmers/example
deb https://stub:[email protected]/ubuntu trusty main
In addition:
'proposed:' may be used to enable the standard 'proposed'
pocket for the release.
'cloud:' may be used to activate official cloud archive pockets,
such as 'cloud:icehouse'
'distro' may be used as a noop
Full list of source specifications supported by the function are:
'distro': A NOP; i.e. it has no effect.
'proposed': the proposed deb spec [2] is wrtten to
/etc/apt/sources.list/proposed
'distro-proposed': adds <version>-proposed to the debs [2]
'ppa:<ppa-name>': add-apt-repository --yes <ppa_name>
'deb <deb-spec>': add-apt-repository --yes deb <deb-spec>
'http://....': add-apt-repository --yes http://...
'cloud-archive:<spec>': add-apt-repository -yes cloud-archive:<spec>
'cloud:<release>[-staging]': specify a Cloud Archive pocket <release> with
optional staging version. If staging is used then the staging PPA [2]
with be used. If staging is NOT used then the cloud archive [3] will be
added, and the 'ubuntu-cloud-keyring' package will be added for the
current distro.
Otherwise the source is not recognised and this is logged to the juju log.
However, no error is raised, unless sys_error_on_exit is True.
[1] deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
where {} is replaced with the derived pocket name.
[2] deb http://archive.ubuntu.com/ubuntu {}-proposed \
main universe multiverse restricted
where {} is replaced with the lsb_release codename (e.g. xenial)
[3] deb http://ubuntu-cloud.archive.canonical.com/ubuntu <pocket>
to /etc/apt/sources.list.d/cloud-archive-list
@param key: A key to be added to the system's APT keyring and used
to verify the signatures on packages. Ideally, this should be an
ASCII format GPG public key including the block headers. A GPG key
id may also be used, but be aware that only insecure protocols are
available to retrieve the actual public key from a public keyserver
placing your Juju environment at risk. ppa and cloud archive keys
are securely added automtically, so sould not be provided.
@param fail_invalid: (boolean) if True, then the function raises a
SourceConfigError is there is no matching installation source.
@raises SourceConfigError() if for cloud:<pocket>, the <pocket> is not a
valid pocket in CLOUD_ARCHIVE_POCKETS
"""
_mapping = OrderedDict([
(r"^distro$", lambda: None), # This is a NOP
(r"^(?:proposed|distro-proposed)$", _add_proposed),
(r"^cloud-archive:(.*)$", _add_apt_repository),
(r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository),
(r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging),
(r"^cloud:(.*)-(.*)$", _add_cloud_distro_check),
(r"^cloud:(.*)$", _add_cloud_pocket),
(r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check),
])
if source is None:
source = ''
for r, fn in six.iteritems(_mapping):
m = re.match(r, source)
if m:
if key:
# Import key before adding the source which depends on it,
# as refreshing packages could fail otherwise.
try:
import_key(key)
except GPGKeyError as e:
raise SourceConfigError(str(e))
# call the associated function with the captured groups
# raises SourceConfigError on error.
fn(*m.groups())
break
else:
# nothing matched. log an error and maybe sys.exit
err = "Unknown source: {!r}".format(source)
log(err)
if fail_invalid:
raise SourceConfigError(err)
def _add_proposed():
"""Add the PROPOSED_POCKET as /etc/apt/source.list.d/proposed.list
Uses get_distrib_codename to determine the correct stanza for
the deb line.
For intel architecutres PROPOSED_POCKET is used for the release, but for
other architectures PROPOSED_PORTS_POCKET is used for the release.
"""
release = get_distrib_codename()
arch = platform.machine()
if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET):
raise SourceConfigError("Arch {} not supported for (distro-)proposed"
.format(arch))
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
apt.write(ARCH_TO_PROPOSED_POCKET[arch].format(release))
def _add_apt_repository(spec):
"""Add the spec using add_apt_repository
:param spec: the parameter to pass to add_apt_repository
:type spec: str
"""
if '{series}' in spec:
series = get_distrib_codename()
spec = spec.replace('{series}', series)
_run_with_retries(['add-apt-repository', '--yes', spec],
cmd_env=env_proxy_settings(['https', 'http']))
def _add_cloud_pocket(pocket):
"""Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list
Note that this overwrites the existing file if there is one.
This function also converts the simple pocket in to the actual pocket using
the CLOUD_ARCHIVE_POCKETS mapping.
:param pocket: string representing the pocket to add a deb spec for.
:raises: SourceConfigError if the cloud pocket doesn't exist or the
requested release doesn't match the current distro version.
"""
apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
fatal=True)
if pocket not in CLOUD_ARCHIVE_POCKETS:
raise SourceConfigError(
'Unsupported cloud: source option %s' %
pocket)
actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
def _add_cloud_staging(cloud_archive_release, openstack_release):
"""Add the cloud staging repository which is in
ppa:ubuntu-cloud-archive/<openstack_release>-staging
This function checks that the cloud_archive_release matches the current
codename for the distro that charm is being installed on.
:param cloud_archive_release: string, codename for the release.
:param openstack_release: String, codename for the openstack release.
:raises: SourceConfigError if the cloud_archive_release doesn't match the
current version of the os.
"""
_verify_is_ubuntu_rel(cloud_archive_release, openstack_release)
ppa = 'ppa:ubuntu-cloud-archive/{}-staging'.format(openstack_release)
cmd = 'add-apt-repository -y {}'.format(ppa)
_run_with_retries(cmd.split(' '))
def _add_cloud_distro_check(cloud_archive_release, openstack_release):
"""Add the cloud pocket, but also check the cloud_archive_release against
the current distro, and use the openstack_release as the full lookup.
This just calls _add_cloud_pocket() with the openstack_release as pocket
to get the correct cloud-archive.list for dpkg | |
# Copyright 2020, 2021 Emory University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = '<NAME>'
import sys
from datetime import datetime
try:
import importlib.resources as pkg_resources
except ImportError:
import importlib_resources as pkg_resources
from streamside.resources import wiser, amr
import argparse
import json
import os
import re
from typing import List, Dict, Optional, Tuple, Callable, Set, Union
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QKeySequence, QTextCursor, QTextCharFormat, QFont
from PyQt5.QtWidgets import QApplication, QWidget, QLabel, QMainWindow, QAction, qApp, QFileDialog, QHBoxLayout, \
QMessageBox, QGridLayout, QTextEdit, QCompleter, QLineEdit, QDialog, QPushButton, QCheckBox, QPlainTextEdit, QShortcut, QStatusBar, QInputDialog, QVBoxLayout
from streamside.struct import Graph, OffsetMap, Offset, penman_reader
class InputDialog(QDialog):
def __init__(self, parent, title: str, width: int, completer_list: List[str], completer_max: int):
super().__init__(parent)
self.setWindowTitle(title)
self.setMinimumWidth(width)
# completer
completer = QCompleter(completer_list)
completer.setMaxVisibleItems(completer_max)
self.ledit = QLineEdit()
self.ledit.setCompleter(completer)
self.ledit.editingFinished.connect(self.edit_finished)
self.ok = False
self.btn_ok = QPushButton("OK", self)
self.btn_ok.clicked.connect(self.button_ok)
self.btn_cancel = QPushButton("Cancel", self)
self.btn_cancel.clicked.connect(self.button_cancel)
def edit_finished(self):
pass
def button_ok(self):
self.ok = self.sender() == self.btn_ok
self.close()
def button_cancel(self):
self.close()
class ConceptDialog(InputDialog):
def __init__(self, parent, title: str, concept_name: str, attribute: bool):
ctype = 'an attribute' if attribute else 'a concept'
super().__init__(parent, '{} {}'.format(title, ctype), 350, parent.concept_list, 50)
self.concept_dict = parent.concept_dict
layout = QVBoxLayout()
self.setLayout(layout)
# components
self.ledit.setText(concept_name)
self.lb_desc = QPlainTextEdit('Description')
self.lb_desc.setReadOnly(True)
wg_concept = QWidget()
l = QHBoxLayout()
l.setContentsMargins(0, 0, 0, 0)
wg_concept.setLayout(l)
l.addWidget(self.ledit)
# self.ck_attr = QCheckBox()
# l.addWidget(self.ck_attr)
# l.addWidget(QLabel('Attribute'))
# buttons
wg_button = QWidget()
l = QHBoxLayout()
l.setContentsMargins(50, 0, 50, 0)
wg_button.setLayout(l)
l.addWidget(self.btn_ok)
l.addWidget(self.btn_cancel)
# layout
layout.addWidget(QLabel('Enter the name:'))
layout.addWidget(wg_concept)
layout.addWidget(self.lb_desc)
layout.addWidget(wg_button)
# shortcut
self.sct_attribute = QShortcut(QKeySequence('Ctrl+R'), self)
self.sct_attribute.activated.connect(self.check_attribute)
def edit_finished(self):
v = self.concept_dict.get(self.ledit.text().strip(), None)
text = v['description'] if v else 'No description available'
self.lb_desc.setPlainText(text)
self.lb_desc.repaint()
def check_attribute(self):
self.ck_attr.setChecked(not self.ck_attr.isChecked())
def exec_(self) -> Optional[str]:
super().exec_()
c = self.ledit.text().strip()
return c if c and self.ok else None
class RelationDialog(InputDialog):
def __init__(self, parent, title: str, parent_id: str, child_id: str, label: str = '', update: bool = False):
super().__init__(parent, title, 550, parent.relation_list, 50)
self.relation_dict = parent.relation_dict
layout = QVBoxLayout()
self.setLayout(layout)
graph = parent.current_graph
parent_concept = graph.get_concept(parent_id)
child_concept = graph.get_concept(child_id)
parent_desc = '({} / {})'.format(parent_id, parent_concept.name)
child_desc = '({} / {})'.format(child_id, child_concept.name)
# components
self.ledit.setText(label)
self.referent = QCheckBox()
self.inverse = QCheckBox()
self.lb_desc = QPlainTextEdit('Description')
self.lb_desc.setReadOnly(True)
# AMR only
self.concept_desc = None
if parent.mode == 'amr':
con = parent.current_graph.get_concept(parent_id)
d = parent.concept_dict.get(con.name, None)
if d and d['type'] == 'pred':
self.concept_desc = d['description']
self.lb_desc.setPlainText(self.concept_desc)
# child + referent
wg_child = QWidget()
l = QHBoxLayout()
l.setContentsMargins(0, 0, 0, 0)
wg_child.setLayout(l)
l.addWidget(QLabel('Child: {}'.format(child_desc)), 80)
if not update:
if graph.parent_relations(child_id) or graph.is_ancestor(child_id, parent_id):
self.referent.setChecked(True)
self.referent.setEnabled(False)
l.addWidget(self.referent)
l.addWidget(QLabel('Referent'))
# ledit + inverse
wg_ledit = QWidget()
l = QHBoxLayout()
l.setContentsMargins(0, 0, 0, 0)
wg_ledit.setLayout(l)
l.addWidget(self.ledit)
if not update:
l.addWidget(self.inverse)
l.addWidget(QLabel('-of'))
# buttons
wg_button = QWidget()
l = QHBoxLayout()
l.setContentsMargins(50, 0, 50, 0)
wg_button.setLayout(l)
l.addWidget(self.btn_ok)
l.addWidget(self.btn_cancel)
# layout
layout.addWidget(QLabel('Parent: {}'.format(parent_desc)))
layout.addWidget(wg_child)
layout.addWidget(wg_ledit)
layout.addWidget(self.lb_desc)
layout.addWidget(wg_button)
# shortcuts
self.sct_referent = QShortcut(QKeySequence('Ctrl+R'), self)
self.sct_referent.activated.connect(self.check_referent)
self.sct_inverse = QShortcut(QKeySequence('Ctrl+F'), self)
self.sct_inverse.activated.connect(self.check_inverse)
def edit_finished(self):
if self.concept_desc is None:
v = self.relation_dict.get(self.ledit.text().strip(), None)
text = v['description'] if v else 'No description available'
self.lb_desc.setPlainText(text)
self.lb_desc.repaint()
def check_referent(self):
self.referent.setChecked(not self.referent.isChecked())
def check_inverse(self):
self.inverse.setChecked(not self.inverse.isChecked())
def exec_(self) -> Optional[Tuple[str, bool]]:
super().exec_()
if self.ok:
label = self.ledit.text().strip()
if not label: return None
if self.inverse.isChecked(): label += '-of'
return label, self.referent.isChecked()
else:
return None
class GraphAnnotator(QMainWindow):
def __init__(self, resource_dir: str, mode: str, annotator: str = 'unknown'):
super().__init__()
# constants
font = QFont()
font.setFamily('Courier New')
self.VERSION = '0.5'
self.RE_CONCEPT_ID = re.compile(r'([ca]\d+)')
self.RE_CONCEPT_ID_PAREN = re.compile(r'(?:^| )\(([ca]\d+) /')
self.FONT_GRAPH = font
self.COLOR_COVERED_TOKEN = '<PASSWORD>'
self.COLOR_SELECTED_PARENT = 'lightpink'
self.COLOR_SELECTED_CHILD = 'lightgreen'
self.COLOR_COVERED_TEXT_SPAN = 'khaki'
self.COLOR_SELECTED_CONCEPT = 'burlywood'
# resources
self.concept_dict: Dict[str, str] = dict()
self.concept_list: List[str] = []
self.relation_dict: Dict[str, str] = dict()
self.relation_list: List[str] = []
self.init_resources(resource_dir, mode)
# fields
self.mode = mode
self.annotator: str = annotator
self.filename: str = ''
self.tid: int = -1
self.graphs: List[Graph] = []
self.offset_maps: List[OffsetMap] = []
self.selected_parent: Optional[Tuple[str, int]] = None
self.selected_child: Optional[Tuple[str, int]] = None
self.selected_concept: Optional[Tuple[str, int]] = None
self.selected_text_spans: Set[int] = set()
# graphical user interface
layout = self._init_central_widget('StreamSide Graph Annotator: {}'.format(annotator), 800, 800)
self.lb_tid = QLabel('Index:')
self.lb_text = QLabel('Open a text or json file to start annotating')
self.te_graph = QTextEdit()
self.statusbar = self._init_statusbar()
self._init_annotation(layout)
self._init_menubar()
######################################## Properties ########################################
@property
def current_graph(self) -> Optional[Graph]:
return self.graphs[self.tid] if 0 <= self.tid < len(self.graphs) else None
@property
def current_offset_map(self) -> Optional[OffsetMap]:
return self.offset_maps[self.tid] if 0 <= self.tid < len(self.offset_maps) else None
######################################## Init ########################################
def init_resources(self, resource_dir: str, mode: str):
if resource_dir:
f_concepts = open(os.path.join(resource_dir, 'concepts.json'))
f_relations = open(os.path.join(resource_dir, 'relations.json'))
else:
m = wiser if mode == 'wiser' else amr
f_concepts = pkg_resources.open_text(m, 'concepts.json')
f_relations = pkg_resources.open_text(m, 'relations.json')
# concepts
self.concept_dict = json.load(f_concepts)
self.concept_list = sorted(self.concept_dict.keys())
self.relation_dict = json.load(f_relations)
self.relation_list = sorted(self.relation_dict.keys())
def _init_central_widget(self, title: str, width: int, height: int) -> QGridLayout:
widget = QWidget()
layout = QGridLayout()
widget.setLayout(layout)
self.setWindowTitle(title)
self.setCentralWidget(widget)
self.setMinimumSize(width, height)
return layout
def _init_annotation(self, layout: QGridLayout):
# text
self.lb_text.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.lb_text.setWordWrap(True)
w = QWidget()
tb_layout = QHBoxLayout()
w.setLayout(tb_layout)
tb_layout.setContentsMargins(0, 0, 0, 0)
tb_layout.addWidget(self.lb_tid, 2)
tb_layout.addWidget(self.lb_text, 98)
layout.addWidget(w)
# graph
self.te_graph.setReadOnly(True)
layout.addWidget(self.te_graph)
def _init_statusbar(self) -> QStatusBar:
statusbar = QStatusBar()
self.setStatusBar(statusbar)
return statusbar
def _init_menubar(self):
def action(name: str, shortcut: str, handler: Callable[[], None]):
act = QAction(name, self)
act.setShortcut(shortcut)
act.triggered.connect(handler)
return act
menubar = self.menuBar()
menubar.setNativeMenuBar(False)
# file
menu = menubar.addMenu('File')
menu.addAction(action('Open', 'Ctrl+o', self.menu_file_open))
menu.addAction(action('Save', 'Ctrl+s', self.menu_file_save))
menu.addSeparator()
menu.addAction(action('About', 'Ctrl+i', self.menu_file_about))
menu.addAction(action('Quit', 'Ctrl+q', qApp.quit))
# edit
menu = menubar.addMenu('Edit')
menu.addAction(action('Create Concept', 'c', self.menu_create_concept))
menu.addAction(action('Create Attribute', 'a', self.menu_create_attribute))
menu.addAction(action('Create Relation', 'r', self.menu_create_relation))
menu.addSeparator()
menu.addAction(action('Update', 'Ctrl+f', self.menu_update))
menu.addAction(action('Delete', 'Ctrl+d', self.menu_delete))
menu.addSeparator()
menu.addAction(action('Add Text Span', 'v', self.menu_add_text_spans_to_concept))
menu.addAction(action('Remove Text Span', 'Ctrl+v', self.menu_remove_text_spans_from_concept))
# select
menu = menubar.addMenu('&Select')
menu.addAction(action('Select Text Span', 'x', self.menu_select_text_span))
menu.addAction(action('Select Concept', 'z', self.menu_select_concept))
menu.addAction(action('Select Parent ID', 'w', self.menu_select_parent))
menu.addAction(action('Select Child ID', "e", self.menu_select_child))
menu.addSeparator()
menu.addAction(action('Deselect Text Span', 'Shift+x', self.menu_deselect_text_span))
menu.addAction(action('Deselect Concept', 'Shift+z', self.menu_deselect_concept))
menu.addAction(action('Deselect Parent ID', 'Shift+w', self.menu_deselect_parent))
menu.addAction(action('Deselect Child ID', 'Shift+e', self.menu_deselect_child))
# navigate
menu = menubar.addMenu('Navigate')
menu.addAction(action('Previous', ',', self.menu_navigate_previous))
menu.addAction(action('Next', '.', self.menu_navigate_next))
menu.addSeparator()
menu.addAction(action('Jump to First', 'Ctrl+,', self.menu_navigate_jump_first))
menu.addAction(action('Jump to Last', 'Ctrl+.', self.menu_navigate_jump_last))
menu.addAction(action('Jump to ...', 'Ctrl+/', self.menu_navigate_jump))
#################### Menubar: File ####################
def menu_file_open(self):
def open_txt(txt_file):
json_file = '{}{}.json'.format(txt_file[:-3], self.annotator)
self.filename = json_file
if os.path.exists(json_file):
msg = 'Annotation exists for the selected text file. Opening the annotation file instead.'
message_box(msg, QMessageBox.Ok)
open_json(json_file)
else:
fin = open(txt_file)
tid = os.path.basename(txt_file)[:-4]
self.graphs = [Graph(text, '{}.{}'.format(tid, i), self.annotator) for i, text in enumerate(fin)]
def open_json(json_file):
self.filename = json_file
with open(self.filename) as fin:
d = json.load(fin)
self.graphs = [Graph.factory(graph) for graph in d['graphs']]
def open_penman(penman_file):
json_file = '{}{}.json'.format(penman_file[:-6], self.annotator)
self.filename = json_file
if os.path.exists(json_file):
msg = 'Annotation exists for the selected text file. Opening the annotation file instead.'
message_box(msg, QMessageBox.Ok)
open_json(json_file)
else:
self.graphs = penman_reader(penman_file)
# get filename
filename = QFileDialog.getOpenFileName(self, 'Open File')[0]
if not filename: return
self.menu_file_save()
# check extension
if filename[-4:].lower() == '.txt':
open_txt(filename)
elif filename[-5:].lower() == '.json':
open_json(filename)
elif filename[-7:].lower() == '.penman':
open_penman(filename)
else:
self.statusbar.showMessage('Unsupported file type: {}'.format(os.path.basename(filename)))
return
# initialize
self.statusbar.showMessage('Open: {}'.format(self.filename))
self.offset_maps = [OffsetMap(graph.tokens) for i, graph in enumerate(self.graphs)]
self.setWindowTitle(os.path.basename(self.filename))
self.select_annotation(0)
def menu_file_save(self):
if not self.filename:
self.statusbar.showMessage('Output file is not specified.')
return
self.current_graph.last_saved = current_time()
with open(self.filename, 'w') as fout:
d = [' ' + graph.json_dumps() for graph in self.graphs]
fout.write('{{\n "graphs": [\n{}\n ]\n}}\n'.format(',\n'.join(d)))
self.statusbar.showMessage('Save: {}'.format(self.filename))
def menu_file_about(self):
msg = QMessageBox()
text = 'StreamSide v{} developed by <NAME>LP\nhttps://github.com/emorynlp/StreamSide\nContact: <EMAIL>'.format(self.VERSION)
msg.setText(text)
msg.setIcon(QMessageBox.Information)
msg.setStandardButtons(QMessageBox.Ok)
msg.exec_()
def closeEvent(self, event):
self.menu_file_save()
event.accept()
#################### Menubar: Edit ####################
def _menu_create_concept(self, attribute: bool):
self.menu_select_text_span()
graph = self.current_graph
tokens = graph.get_tokens(self.selected_text_spans)
text = ' '.join(tokens) if attribute else '-'.join(tokens).lower()
name = ConceptDialog(self, 'Create', text, attribute).exec_()
ctype = 'Attribute' if attribute else 'Concept'
if name:
cid = graph.add_concept(name, self.selected_text_spans, attribute)
self.selected_text_spans.clear()
self.refresh_annotation()
self.statusbar.showMessage('{} created: ({} / {}) - {}'.format(ctype, cid, name, str(tokens)))
else:
self.statusbar.showMessage('{} creation is cancelled.'.format(ctype))
def menu_create_concept(self):
| |
<filename>FinsterTab/W2020/DataForecast.py
# import libraries to be used in this code module
import pandas as pd
from statsmodels.tsa.arima_model import ARIMA
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from math import sqrt
from statistics import stdev
import numpy as np
import xgboost as xgb
import calendar
import datetime as dt
from datetime import timedelta, datetime
import FinsterTab.W2020.AccuracyTest
import sqlalchemy as sal
from sklearn.model_selection import train_test_split # not used at this time
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import matplotlib.pyplot as plt
# class declaration and definition
class DataForecast:
def __init__(self, engine, table_name):
"""
Calculate historic one day returns and 10 days of future price forecast
based on various methods
Store results in dbo_AlgorithmForecast table
:param engine: provides connection to MySQL Server
:param table_name: table name where ticker symbols are stored
"""
self.engine = engine
self.table_name = table_name
def calculate_forecast(self):
"""
Calculate historic one day returns based on traditional forecast model
and 10 days of future price forecast
Store results in dbo_AlgorithmForecast
Improved forecast where we took out today's close price to predict today's price
10 prior business days close prices are used as inputs to predict next day's price
"""
# retrieve InstrumentMaster table from the database
query = 'SELECT * FROM {}'.format(self.table_name)
df = pd.read_sql_query(query, self.engine)
algoCode = "'PricePred'" # Master `algocode` for improved prediction from previous group, user created codes
# add code to database if it doesn't exist
code_query = 'SELECT COUNT(*) FROM dbo_algorithmmaster WHERE algorithmcode=%s' % algoCode
count = pd.read_sql_query(code_query, self.engine)
if count.iat[0, 0] == 0:
algoName = "'PricePrediction'"
insert_code_query = 'INSERT INTO dbo_algorithmmaster VALUES({},{})'.format(algoCode, algoName)
self.engine.execute(insert_code_query)
# loop through each ticker symbol
for ID in df['instrumentid']:
# remove all future prediction dates
remove_future_query = 'DELETE FROM dbo_algorithmforecast WHERE algorithmcode={} AND prederror=0 AND ' \
'instrumentid={}'.format(algoCode, ID)
self.engine.execute(remove_future_query)
# find the latest forecast date
date_query = 'SELECT forecastdate FROM dbo_algorithmforecast WHERE algorithmcode={} AND instrumentid={} ' \
'ORDER BY forecastdate DESC LIMIT 1'.format(algoCode, ID)
latest_date = pd.read_sql_query(date_query, self.engine) # most recent forecast date calculation
# if table has forecast prices already find the latest one and delete it
# need to use most recent data for today if before market close at 4pm
if not latest_date.empty:
latest_date_str = "'" + str(latest_date['forecastdate'][0]) + "'"
delete_query = 'DELETE FROM dbo_algorithmforecast WHERE algorithmcode={} AND instrumentid={} AND ' \
'forecastdate={}'.format(algoCode, ID, latest_date_str)
self.engine.execute(delete_query)
# get raw price data from database
data_query = 'SELECT A.date, A.close, B.ltrough, B.lpeak, B.lema, B.lcma, B.highfrllinelong, ' \
'B. medfrllinelong, B.lowfrllinelong FROM dbo_instrumentstatistics AS A, '\
'dbo_engineeredfeatures AS B WHERE A.instrumentid=B.instrumentid AND A.date=B.date ' \
'AND A.instrumentid=%s ORDER BY Date ASC' %ID
data = pd.read_sql_query(data_query, self.engine)
# prediction formula inputs
# IF THESE VALUES ARE CHANGED, ALL RELATED PREDICTIONS STORED IN THE DATABASE BECOME INVALID!
sMomentum = 2
lMomentum = 5
sDev = 10
ma = 10
start = max(sMomentum, lMomentum, sDev, ma)
# calculate prediction inputs
data['sMomentum'] = data['close'].diff(sMomentum)
data['lMomentum'] = data['close'].diff(lMomentum)
data['stDev'] = data['close'].rolling(sDev).std()
data['movAvg'] = data['close'].rolling(ma).mean()
# first predictions can be made after 'start' number of days
for n in range(start, len(data)):
insert_query = 'INSERT INTO dbo_algorithmforecast VALUES ({}, {}, {}, {}, {})'
# populate entire table if empty
# or add new dates based on information in Statistics table
"""Look into this to add SMA"""
if latest_date.empty or latest_date['forecastdate'][0] <= data['date'][n]:
if data['sMomentum'][n-1] >= 0 and data['lMomentum'][n-1] >= 0:
forecastClose = data['close'][n-1] + (2.576 * data['stDev'][n-1] / sqrt(sDev))
elif data['sMomentum'][n-1] <= 0 and data['lMomentum'][n-1] <= 0:
forecastClose = data['close'][n - 1] + (2.576 * data['stDev'][n - 1] / sqrt(sDev))
else:
forecastClose = data['movAvg'][n-1]
predError = 100 * abs(forecastClose - data['close'][n])/data['close'][n]
forecastDate = "'" + str(data['date'][n]) + "'"
#insert new prediction into table
insert_query = insert_query.format(forecastDate, ID, forecastClose, algoCode, predError)
self.engine.execute(insert_query)
# model for future price movements
data['momentumA'] = data['close'].diff(10)
data['lagMomentum'] = data['momentumA'].shift(5)
fdate = "'" + str(data['date'][n]) + "'"
# number of weekdays
weekdays = 10
# 3 weeks of weekdays
days = 15
forecast = []
forecast_dates_query = 'SELECT date from dbo_datedim WHERE date > {} AND weekend=0 AND isholiday=0 ' \
'ORDER BY date ASC LIMIT {}'.format(fdate, weekdays)
future_dates = pd.read_sql_query(forecast_dates_query, self.engine)
insert_query = 'INSERT INTO dbo_algorithmforecast VALUES ({}, {}, {}, {}, {})'
# Forecast close price tomorrow
if data['sMomentum'][n] >= 0 and data['lMomentum'][n] >= 0:
forecastClose = data['close'][n] + (2.576 * data['stDev'][n] / sqrt(sDev))
elif data['sMomentum'][n] <= 0 and data['lMomentum'][n] <= 0:
forecastClose = data['close'][n] + (2.576 * data['stDev'][n] / sqrt(sDev))
else:
forecastClose = data['movAvg'][n]
predError = 0
forecastDate = "'" + str(future_dates['date'][0]) + "'"
insert_query = insert_query.format(forecastDate, ID, forecastClose, algoCode, predError)
self.engine.execute(insert_query)
# forecast next 9 days
# for i in range # of weekdays
"""Forecast for future from here down"""
for i in range(1, len(future_dates)):
insert_query = 'INSERT INTO dbo_algorithmforecast VALUES ({}, {}, {}, {}, {})'
# if the momentum is negative
if data['momentumA'].tail(1).iloc[0] < 0.00:
# Set Fibonacci extensions accordingly
data['fibExtHighNeg'] = data['lpeak'] - (
(data['lpeak'] - data['ltrough']) * 1.236)
data['fibExtLowNeg'] = data['lpeak'] - (
(data['lpeak'] - data['ltrough']) * 1.382)
highfrllinelong = data['highfrllinelong'].tail(1).iloc[0]
# Compute average over last 3 weeks of weekdays
avg_days = np.average(data['close'].tail(days))
# Compute standard Deviation over the last 3 weeks and the average.
std_days = stdev(data['close'].tail(days), avg_days)
# Compute Standard Error and apply to variable decrease
# assign CMA and EMA values
decrease = avg_days - (1.960 * std_days) / (sqrt(days))
data['fibExtHighPos'] = 0
data['fibExtLowPos'] = 0
l_cma = data['lcma'].tail(1)
l_cma = l_cma.values[0]
l_ema = data['lema'].tail(1)
l_ema = l_ema.values[0]
# Loop through each upcoming day in the week
for x in range(weekdays-1):
# Compare to current location of cma and frl values
# if CMA and FRL are lower than forecast
# Forecast lower with a medium magnitude
if decrease > l_cma or decrease >= (highfrllinelong + (highfrllinelong * 0.01)) \
or decrease > l_ema:
decrease -= .5 * std_days
forecast.append(decrease)
# If CMA and FRL are higher than forecast
# Forecast to rise with an aggressive magnitude
elif decrease <= l_cma and decrease <= (
highfrllinelong - (highfrllinelong * 0.01)) and decrease <= l_ema:
decrease += 1.5 * std_days
forecast.append(decrease)
x = x + 1
# if the momentum is positive
elif data['momentumA'].tail(1).iloc[0] > 0.00:
# ...Set fibonacci extensions accordingly
data['fibExtHighPos'] = data['lpeak'] + (
(data['lpeak'] - data['ltrough']) * 1.236)
data['fibExtLowPos'] = data['lpeak'] + (
(data['lpeak'] - data['ltrough']) * 1.382)
highfrllinelong = data['highfrllinelong'].tail(1).iloc[0]
# Compute average over last 3 weeks of weekdays
avg_days = np.average(data['close'].tail(days))
# Compute standard Deviation over the last 3 weeks and the average.
std_days = stdev(data['close'].tail(days), avg_days)
# Compute Standard Error and apply to variable increase
increase = avg_days + (1.960 * std_days) / (sqrt(days))
data['fibExtHighNeg'] = 0
data['fibExtLowNeg'] = 0
l_cma = data['lcma'].tail(1)
l_cma = l_cma.values[0]
l_ema = data['lema'].tail(1)
l_ema = l_ema.values[0]
for x in range(weekdays-1):
# Compare to current location of cma and frl values
# if CMA and FRL are lower than forecast
# Forecast lower with a normal magnitude
if increase > l_cma and increase >= (highfrllinelong - (highfrllinelong * 0.01)) \
and increase > l_ema:
increase -= std_days
forecast.append(increase)
# if CMA and FRL are lower than forecast
# Forecast lower with an aggressive magnitude
elif increase <= l_cma or increase <= (
highfrllinelong - (highfrllinelong * 0.01)) or increase <= l_ema:
increase += 1.5 * std_days
forecast.append(increase)
forecastDateStr = "'" + str(future_dates['date'][i]) + "'"
# Send the addition of new variables to SQL
# predicted values error is 0 because the actual close prices for the future is not available
predError = 0
insert_query = insert_query.format(forecastDateStr, ID, forecast[i], algoCode, predError)
self.engine.execute(insert_query)
"""Look into why warnings due to incorrect inputs"""
def calculate_arima_forecast(self):
"""
Calculate historic next-day returns based on ARIMA forecast model
and 10 days of future price forecast
Store results in dbo_AlgorithmForecast
To predict next day's value, prior 50 business day's close prices are used
"""
# retrieve InstrumentsMaster table from database
query = 'SELECT * FROM {}'.format(self.table_name)
df = pd.read_sql_query(query, self.engine)
algoCode = "'ARIMA'"
# add code to database if | |
#!/usr/bin/env python3
"""
Copyright::
+===================================================+
| © 2021 Privex Inc. |
| https://www.privex.io |
+===================================================+
| |
| Python PIP Wrapper Library |
| License: X11/MIT |
| Repo: https://github.com/Privex/pipwrapper |
| |
| Core Developer(s): |
| |
| (+) Chris (@someguy123) [Privex] |
| |
+===================================================+
Python PIP Wrapper (PyPi Wrapper) - A simple, dependency-free library for using PIP via wrapping the CLI utility (python3.x -m pip ARGS)
Copyright (c) 2021 Privex Inc. ( https://www.privex.io )
"""
import sys
import subprocess
import logging
from os import getenv as env
from subprocess import PIPE, STDOUT
from typing import Any, Callable, List, NamedTuple, Optional, Union
VERSION = '0.5.0'
PIP_LOG_LEVEL = env('PIP_LOG_LEVEL', 'WARNING').upper()
log = logging.getLogger('pipwrapper')
log.setLevel(logging.DEBUG)
_hd = logging.StreamHandler(sys.stderr)
_hd.setLevel(logging.getLevelName(PIP_LOG_LEVEL))
log.addHandler(_hd)
__all__ = ['ProcResult', 'Pip', 'PIP_LOG_LEVEL', 'VERSION']
def err(*args, file=sys.stderr, **kwargs):
return print(*args, file=file, **kwargs)
class ProcResult(NamedTuple):
stdout: Optional[bytes] = None
stderr: Optional[bytes] = None
retcode: int = 0
def stringify(d, encoding='utf-8', fail_none=False, fail_bool=False) -> str:
if d is None:
if fail_none: raise ValueError("Passed object to stringify is None and fail_none is True! Can't stringify None.")
return ''
if isinstance(d, bool):
if fail_bool: raise ValueError(f"Passed object to stringify is boolean ('{d}') and fail_bool is True! Can't stringify boolean.")
return str(d)
if isinstance(d, str): return d
if isinstance(d, (bytes, bytearray)): return d.decode(encoding)
return str(d)
class Pip(object):
"""
Python PIP Wrapper (PyPi Wrapper) - A simple, dependency-free library for using PIP via wrapping the CLI utility
Copyright (c) 2021 Privex Inc. ( https://www.privex.io )
License: X11/MIT Repo: https://github.com/Privex/pipwrapper
**Example usage**
Using :meth:`.auto_install` - it will check for any required packages that aren't already installed, and then
automatically install any specified packages which are missing::
>>> xrs = Pip.auto_install('privex-helpers', 'pypng', 'cairosvg', 'humanize')
[!!!] Installing missing packages: ['privex-helpers', 'pypng', 'cairosvg', 'humanize']
>>> xrs.retcode
0
>>> print(xrs.stdout.decode('utf-8'))
Collecting privex-helpers
Using cached privex_helpers-3.2.1-py3-none-any.whl (231 kB)
Processing /Users/chris/Library/Caches/pip/wheels/28/dd/ea/756ac2cb38f4e73f04a756fb3b4650e5f5dcd019a641098959/pypng-0.0.20-py3-none-any.whl
Collecting cairosvg
Using cached CairoSVG-2.5.2-py3-none-any.whl (45 kB)
Collecting humanize
Using cached humanize-3.3.0-py3-none-any.whl (70 kB)
.............
Installing collected packages: privex-helpers, pypng, cairosvg, humanize
Successfully installed cairosvg-2.5.2 humanize-3.3.0 privex-helpers-3.2.1 pypng-0.0.20
Using :meth:`.install` , you can install/upgrade one or more Python packages, and collect pip's output / return code::
>>> Pip.install('pillow')
Out[14]: ProcResult(
stdout=b"Collecting pillow\n Using cached Pillow-8.1.2-cp39-cp39-macosx_10_10_x86_64.whl (2.2 MB)\nInstalling
collected packages: pillow\nSuccessfully installed pillow-8.1.2\nWARNING: You are using pip version 20.2.3; however,
version 21.0.1 is available.\nYou should consider upgrading via the '/tmp/tmp.awnu6YYn/venv/bin/python3 -m pip
install --upgrade pip' command.\n",
stderr=None,
retcode=0
)
>>> res = Pip.install('privex-asdf-does-not-exist')
>>> print(res.retcode)
1
>>> print(res.stdout.decode('utf-8'))
ERROR: Could not find a version that satisfies the requirement privex-asdf-does-not-exist (from versions: none)
ERROR: No matching distribution found for privex-asdf-does-not-exist
WARNING: You are using pip version 20.2.3; however, version 21.0.1 is available.
You should consider upgrading via the '/tmp/tmp.awnu6YYn/venv/bin/python3 -m pip install --upgrade pip' command.
By passing ``output=True`` to most methods such as :meth:`.install`, :meth:`.uninstall`, :meth:`.auto_install` etc. -
the argument will be passed through down to :meth:`.call`.
The kwarg ``output=True`` acts as a shortcut for ``stdout=None, stderr=None``, and causes both the ``stdout`` and ``stderr`` of the
command being ran to bypass :class:`subprocess.Popen` 's capturing, instead, they'll both be piped into the current script's
stdout/stderr in realtime as they execute::
>>> Pip.install('pillow', output=True)
Requirement already up-to-date: pillow in ./venv/lib/python3.9/site-packages (8.1.2)
WARNING: You are using pip version 20.2.3; however, version 21.0.1 is available.
You should consider upgrading via the '/tmp/tmp.awnu6YYn/venv/bin/python3 -m pip install --upgrade pip' command.
Out[12]: ProcResult(stdout=None, stderr=None, retcode=0)
>>> Pip.uninstall('pillow', output=True)
Found existing installation: Pillow 8.1.2
Uninstalling Pillow-8.1.2:
Successfully uninstalled Pillow-8.1.2
Out[13]: ProcResult(stdout=None, stderr=None, retcode=0)
Copyright::
+===================================================+
| © 2021 Privex Inc. |
| https://www.privex.io |
+===================================================+
| |
| Python PIP Wrapper Library |
| License: X11/MIT |
| Repo: https://github.com/Privex/pipwrapper |
| |
| Core Developer(s): |
| |
| (+) Chris (@someguy123) [Privex] |
| |
+===================================================+
Python PIP Wrapper (PyPi Wrapper) - A simple, dependency-free library for using PIP via wrapping the CLI utility
Copyright (c) 2021 Privex Inc. ( https://www.privex.io )
"""
PYEXE: str = sys.executable
PIP: List[str] = [PYEXE, '-m', 'pip']
QUIET: bool = False
VERSION: str = VERSION
def __init__(self):
pass
@classmethod
def call(cls, *args, write=None, output=False, comm_timeout=30.0, **kwargs) -> ProcResult:
"""
This is the lower level method which main command methods use to call ``pip`` with whichever
arguments they use, along with handling advanced settings such as ``stdin`` / ``stdout`` / ``stderr``
for controlling how/if each file descriptor is captured, ``write`` which allows piping arbitrary bytes
into the stdin of the command being executed, and many others.
Generally you should use a high level wrapper method such as :meth:`.install` / :meth:`.uninstall` / :meth:`.auto_install`
rather than this method ( :meth:`.call` ) - unless the pip sub-command you need to call, isn't yet implemented as a
command wrapper method.
Example usage::
>>> res = Pip.call('install', '-U', 'privex-helpers', 'django', 'pypng')
>>> res.retcode
0
>>> print(res.stdout.decode())
Requirement already up-to-date: privex-helpers in ./venv/lib/python3.9/site-packages (3.2.1)
Collecting django
Downloading Django-3.1.7-py3-none-any.whl (7.8 MB)
Requirement already up-to-date: pypng in ./venv/lib/python3.9/site-packages (0.0.20)
Requirement already satisfied, skipping upgrade: python-dateutil in ./venv/lib/python3.9/site-packages (from privex-helpers) (2.8.1)
Requirement already satisfied, skipping upgrade: privex-loghelper>=1.0.4 in ./venv/lib/python3.9/site-packages (from privex-helpers) (1.0.6)
Collecting asgiref<4,>=3.2.10 Using cached asgiref-3.3.1-py3-none-any.whl (19 kB)
Collecting sqlparse>=0.2.2 Using cached sqlparse-0.4.1-py3-none-any.whl (42 kB)
Requirement already satisfied, skipping upgrade: pytz in ./venv/lib/python3.9/site-packages (from django) (2021.1)
Installing collected packages: asgiref, sqlparse, django
Successfully installed asgiref-3.3.1 django-3.1.7 sqlparse-0.4.1
:param str args: Arguments to pass (in order) after ``python3.x -m pip```
:param bytes|None write: Optional :class:`.bytes` data to be fed into the command's standard input (stdin)
:param bool output: (Def: ``False``) When ``True``, forces ``stdin=None,stderr=None`` which results in the process' stdout/err
being joined into the current Python application's - i.e. the command's output/err will be printed straight
to your Python app's stdout/stderr.
:param float comm_timeout: Max amount of time to wait for the executed command to finish receiving input and sending us output.
If it takes longer than ``comm_timeout``, it should be killed and an exception will be raised,
:param kwargs:
:return:
"""
if output:
stdout, stderr, stdin = None, None, kwargs.pop('stdin', PIPE)
else:
stdout, stderr, stdin = kwargs.pop('stdout', PIPE), kwargs.pop('stderr', STDOUT), kwargs.pop('stdin', PIPE)
with Pip._popen(*args, stderr=stderr, stdin=stdin, stdout=stdout, **kwargs) as proc:
if write is not None:
res_out, res_err = proc.communicate(write, timeout=comm_timeout)
else:
res_out, res_err = proc.communicate(timeout=comm_timeout)
res_out: bytes
res_err: bytes
res = ProcResult(stdout=res_out, stderr=res_err, retcode=proc.returncode)
return res
@classmethod
def _popen(cls, *args, stderr=PIPE, stdin=PIPE, stdout=PIPE, **kwargs) -> subprocess.Popen:
return subprocess.Popen(list(cls.PIP) + list(args), stdout=stdout, stderr=stderr, stdin=stdin, **kwargs)
@classmethod
def freeze(cls, *args, **kwargs) -> List[str]:
"""Calls ``pip freeze`` and returns the list of packages and their versions in requirements.txt format as a :class:`.list`"""
procres = cls.call(*args, 'freeze', stderr=PIPE, **kwargs)
return stringify(procres.stdout).splitlines()
@classmethod
def installed_pkgs(cls, *args, **kwargs) -> List[str]:
"""Calls :meth:`.freeze` - extracts and returns just the package names as a list, with the version specifiers stripped off"""
return [p.split('==')[0] for p in cls.freeze(*args, **kwargs)]
@classmethod
def install(cls, *pkgs, upgrade=True, **kwargs) -> ProcResult:
"""Calls ``pip install [-U] PKGS`` - by default ``upgrade`` is True, so ``-U`` is passed unless you set ``upgrade=False``"""
return cls.call('install', '-U', *pkgs, **kwargs) if upgrade else cls.call('install', *pkgs, **kwargs)
@classmethod
def uninstall(cls, *pkgs, yes=True, **kwargs) -> ProcResult:
"""Calls ``pip uninstall [-y] PKGS`` - by default ``yes`` is True, so ``-y`` is passed unless you set ``yes=False``"""
return cls.call('uninstall', '-y', *pkgs, **kwargs) if yes else cls.call('uninstall', *pkgs, **kwargs)
remove = uninstall
@staticmethod
def pkg_in(pl: str, pklist: list) -> bool:
"""
Returns ``True`` if the package ``pl`` is present in the package list ``pklist`` - cleans up the pkg name ``pl``
and converts all items in ``pklist`` to lowercase to ensure reliable matching, even if the case differs
(e.g. ``Django`` instead of ``django``).
"""
pklist = [p.lower() for p in pklist]
pl = Pip._clean_pkg(pl)
return pl in pklist or pl.replace('-', '_') in pklist or pl.replace('_', '-') in pklist
@staticmethod
def _clean_pkg(pkg: str) -> str:
"""
Cleans package name ``pkg`` by casting | |
from expression.abstract_expression import ExpressionType
from expression.comparison_expression import ComparisonExpression
from expression.constant_value_expression import ConstantValueExpression
from query_planner.logical_projection_plan import LogicalProjectionPlan
from query_planner.logical_inner_join_plan import LogicalInnerJoinPlan
from query_planner.seq_scan_plan import SeqScanPlan
from query_parser.table_ref import TableRef
from enum import Enum
from expression.tuple_value_expression import TupleValueExpression
class Rules(Enum):
"""Enum to encapsulate the list of rules we have available"""
PREDICATE_PUSHDOWN = 1,
PROJECTION_PUSHDOWN_SELECT = 2,
PROJECTION_PUSHDOWN_JOIN= 3,
SIMPLIFY_PREDICATE = 4,
JOIN_ELIMINATION = 5,
TRANSITIVE_CLOSURE = 6
class RuleQueryOptimizer:
"""Class to Encapsulate the functionality of the Rule Based Query Optimizer (Query Rewriter)"""
def __init__(self):
self.rule2value = {Rules.PREDICATE_PUSHDOWN: (self.predicate_pushdown, self.do_predicate_pushdown),
Rules.SIMPLIFY_PREDICATE: (self.simply_predicate, self.do_simplify_predicate),
Rules.TRANSITIVE_CLOSURE: (self.transitive_closure, self.do_transitive_closure),
Rules.PROJECTION_PUSHDOWN_SELECT: (self.projection_pushdown_select, self.do_projection_pushdown_select),
Rules.PROJECTION_PUSHDOWN_JOIN: (self.projection_pushdown_join, self.do_projection_pushdown_join),
Rules.JOIN_ELIMINATION: (self.join_elimination, self.do_join_elimination)}
def run(self, root_node, rule_list):
""" Runs the rule based Optimizer on the list of rules user selects
Keyword Arguments:
root_node -- The root node of the logical plan tree.
rule_list -- The list containing rules user want to apply to the logical plan tree
(i.e. [Rules.PREDICATE_PUSHDOWN, Rules.SIMPLIFY_PREDICATE])
:return: The modified logical tree (pointing the root node)
"""
for rule in rule_list:
self.traverse(root_node, rule)
return root_node
def traverse(self, curnode, rule):
""" Recursive function that traverses the tree and applies all of the rules in the rule list
Keyword Arguments:
curnode -- Current node in the logical plan tree. Type will always be one of the Abstract Plan types.
rule -- Rule applied to the current node.
:return: Void
"""
if type(curnode) == TableRef or type(curnode.children) == TableRef or len(curnode.children) == 0:
return
# for rule in rule_list:
for child_ix, child in enumerate(curnode.children):
func, condition = self.rule2value[rule]
if condition(curnode, child):
func(curnode, child_ix)
self.traverse(child, rule)
def predicate_pushdown(self, curnode, child_ix):
""" Push down predicates so filters done as early as possible
Keyword Arguments:
curnode -- is the current node visited in the plan tree and is a type that inherits from the AbstractPlan type
child_ix -- is an integer that represents the index of the child in the curnode's child list
:return: Void
"""
# curnode is the select and child is the join
child = curnode.children[child_ix]
# setting the parent's new child to be the join node
curnode.parent.set_children([child])
# setting the select's child to be after the join
# find the join child with from the same video
correct_ix = None
curnode_tabnames = set([col.split('.')[0] for col in curnode.column_ids])
vids = []
for jc_ix, jc in enumerate(child.children):
if type(jc) == TableRef:
jc_tabnames = set([jc.table_info.table_name])
vids = [jc.video]
elif type(jc) == SeqScanPlan:
jc_tabnames = set([attr.split('.')[0] for attr in jc.column_ids])
vids = jc.videos
elif type(jc) == LogicalInnerJoinPlan:
jc_tabnames = set([attr.split('.')[0] for attr in jc.join_ids])
vids = jc.videos
else:
return
# getting all of the columns that the current node uses (other columns not in the join columns)
if curnode_tabnames.issubset(jc_tabnames):
correct_ix = jc_ix
break
if correct_ix is None:
return
# Set the videos because now, that we are below the join, we do not need both videos
curnode.set_videos(vids)
curnode.set_children([child.children[correct_ix]])
child.children[correct_ix].parent = curnode
# set the join's children to be the select
child.children[correct_ix] = curnode
child.parent = curnode.parent
curnode.parent = child
def projection_pushdown_select(self, curnode, child_ix):
""" Push down projects so that we do not have unnecessary attributes
Keyword Arguments:
curnode -- is the current node visited in the plan tree and is a type that inherits from the AbstractPlan type
child_ix -- is an integer that represents the index of the child in the curnode's child list
:return: Void
"""
# curnode is the projection
# child is the select
child = curnode.children[child_ix]
# getting all of the columns that the current node uses (other columns not in the join columns)
cols_project = [col for col in curnode.column_ids]
# getting all of the columns that the select uses that are the same as it's child
cols_select = [col for col in child.column_ids]
cols_project.extend(cols_select)
cols_project = list(set(cols_project))
new_proj = LogicalProjectionPlan(videos=curnode.videos, column_ids=cols_project, foreign_column_ids=[])
old_children = curnode.children[child_ix].children
curnode.children[child_ix].set_children([new_proj])
new_proj.set_children(old_children)
for cc in old_children:
cc.parent = new_proj
# we did a previous select projection pushdown of the same columns
# This means we need to push down further, and can delete the current node (the first projection)
if type(child.parent) == LogicalProjectionPlan \
and set(child.parent.column_ids) == set(new_proj.column_ids) \
and curnode.parent is not None:
cur_children = curnode.children
curnode_ix = curnode.parent.children.index(curnode)
curnode.parent.children[curnode_ix] = cur_children[0]
cur_children[0].parent = curnode.parent
def projection_pushdown_join(self, curnode, child_ix):
""" Push down projects so that we do not have unnecessary attributes
Keyword Arguments:
curnode -- The current node visited in the plan tree and is a type that inherits from the AbstractPlan type
child_ix -- An integer that represents the index of the child in the curnode's child list
:return: Void
"""
# curnode is the projection
# child is the join
child = curnode.children[child_ix]
for cc_ix, cc in enumerate(child.children):
if type(cc) == TableRef:
cc_tabnames = [cc.table_info.table_name]
elif type(cc) == LogicalInnerJoinPlan:
cc_tabnames = [col.split('.')[0] for col in cc.join_ids]
elif type(cc) == SeqScanPlan:
cc_tabnames = [col.split('.')[0] for col in cc.column_ids]
else:
break
# getting all of the columns that the join uses that are the same as it's child
cols = [col for col in child.join_ids for tabname in cc_tabnames if tabname in col]
# getting all of the columns that the current node uses (other columns not in the join columns)
cols2 = [col for col in curnode.column_ids for tabname in cc_tabnames if tabname in col]
cols.extend(cols2)
# creating new Projection Node
if type(cc) == TableRef:
vids = [cc.video]
else:
vids = cc.videos
new_proj1 = LogicalProjectionPlan(videos=vids, column_ids=list(set(cols)), foreign_column_ids=[])
new_proj1.set_children([child.children[cc_ix]])
new_proj1.parent = child
child.children[cc_ix].parent = new_proj1
child.children[cc_ix] = new_proj1
# in this case, we have a join of three or more tables.
# we already created a projection node in the previous recursive call of projection_pushdown_join
# We can delete the projection in the middle between the joins
if type(curnode.parent) == LogicalInnerJoinPlan:
child.parent = curnode.parent
curnode_ix = curnode.parent.children.index(curnode)
curnode.parent.children[curnode_ix] = child
def selective_first(self):
"""reorder predicates so that DBMS applies most selective first"""
pass
def simply_predicate(self, curnode, child_ix):
""" Simplify predicate to remove unnecessary conditions (i.e. 1 = 0 or 0 = 0)
Keyword Arguments:
curnode -- The current node visited in the plan tree and is a type that inherits from the AbstractPlan type
child_ix -- An integer that represents the index of the child in the curnode's child list
:return: Void
"""
boolean=curnode.predicate.evaluate()
if not boolean:
self.delete_node(curnode)
def delete_node(self, curnode):
""" Recreates the parent and child pointers to skip the sigma
Keyword Arguments:
curnode -- The current node visited in the plan tree and is a type that inherits from the AbstractPlan type
:return: void
"""
curnode.parent.set_children(curnode.children)
for child in curnode.children:
child.parent=curnode.parent
def transitive_closure(self, curnode, child_ix):
""" Ensures precise cardinality estimation when same predicate is being applied to both tables
Keyword Arguments:
curnode -- The current node visited in the plan tree and is a type that inherits from the AbstractPlan type
child_ix -- An integer that represents the index of the child in the curnode's child list
:return: void
"""
child = curnode.children[child_ix]
# checking if the current node is a comparison expression
if type(curnode.predicate) == ComparisonExpression:
const_idx = None
col_tab_idx = None
const_val = None
if type(curnode.predicate.get_child(1)) == ConstantValueExpression:
##print ("R.H.S. Is constant val")
const_idx = 1
col_tab_idx = 0
elif type(curnode.predicate.get_child(0)) == ConstantValueExpression:
##print ("L.H.S. is constant val")
const_idx = 0
col_tab_idx = 1
# extracting the constant value from the predicate and table name and attribute
const_val = curnode.predicate.get_child(const_idx).evaluate()
selection_table = curnode.column_ids[0].split(".")[0]
selection_column = curnode.column_ids[0].split(".")[1]
# Now looking at the child
join_cols = child.join_ids
matched_join_idx = None
for join_col_idx in range(len(join_cols)):
if join_cols[join_col_idx] == curnode.column_ids[0]:
# remembering which of all the join columns matched with the parent selection column
matched_join_idx = join_col_idx
# If the columns did not matched
if matched_join_idx == None:
print ("Not possible")
return
# checking supported types for grand child
for gc_idx, gc in enumerate(child.children):
if type(gc) == TableRef:
jc_tabnames = set([gc.table_info.table_name])
vids = [gc.video]
elif type(gc) == SeqScanPlan:
jc_tabnames = set([attr.split('.')[0] for attr in gc.column_ids])
vids = | |
<reponame>malneni/cantools
# Load and dump a CAN database in SYM format.
import collections
from itertools import groupby
import re
import logging
from collections import OrderedDict as odict
from decimal import Decimal
from typing import Callable, Iterator, List, Optional as TypingOptional
import textparser
from textparser import Sequence
from textparser import choice
from textparser import ZeroOrMore
from textparser import ZeroOrMoreDict
from textparser import DelimitedList
from textparser import tokenize_init
from textparser import Token
from textparser import TokenizeError
from textparser import Optional
from textparser import Any
from ..signal import Signal
from ..signal import NamedSignalValue
from ..signal import Decimal as SignalDecimal
from ..message import Message
from ..internal_database import InternalDatabase
from .utils import num
from ...utils import SORT_SIGNALS_DEFAULT, type_sort_signals, sort_signals_by_start_bit
from ...errors import ParseError
LOGGER = logging.getLogger(__name__)
# PCAN Symbol Editor will fail to open a SYM File with signals of a longer length
MAX_SIGNAL_NAME_LENGTH = 32
# If a message is in the SEND section of a SYM file, it is sent by the ECU
SEND_MESSAGE_SENDER = 'ECU'
# If a message is in the RECEIVE section of a SYM file, it is sent by the Peripheral devices
RECEIVE_MESSAGE_SENDER = 'Peripherals'
class Parser60(textparser.Parser):
"""Create the SYM 6.0 parser.
"""
KEYWORDS = set([
'FormatVersion',
'Title',
'UniqueVariables',
'FloatDecimalPlaces',
'BRS',
'Enum',
'Sig',
'ID',
'Len',
'Mux',
'CycleTime',
'Timeout',
'MinInterval',
'Color',
'Var',
'Type'
])
def tokenize(self, string):
names = {
'LPAREN': '(',
'RPAREN': ')',
'LBRACE': '[',
'RBRACE': ']',
'COMMA': ',',
'ASSIGN': '=',
'ENUMS': '{ENUMS}',
'SIGNALS': '{SIGNALS}',
'SEND': '{SEND}',
'RECEIVE': '{RECEIVE}',
'SENDRECEIVE': '{SENDRECEIVE}',
'U': '/u:',
'F': '/f:',
'O': '/o:',
'MIN': '/min:',
'MAX': '/max:',
'SPN': '/spn:',
'D': '/d:',
'LN': '/ln:',
'E': '/e:',
'P': '/p:',
'M': '-m',
'H': '-h',
'B': '-b',
'S': '-s',
'T': '-t',
'V': '-v',
'DP': '-p'
}
re_string = r'"(\\"|[^"])*?"'
token_specs = [
('SKIP', r'[ \r\n\t]+'),
('COMMENT', r'//.*?\n'),
('HEXNUMBER', r'-?\d+\.?[0-9A-F]*([eE][+-]?\d+)?(h)'),
('NUMBER', r'-?\d+\.?[0-9A-F]*([eE][+-]?\d+)?'),
('STRING', re_string),
('U', r'/u:({}|\S+)'.format(re_string)),
('F', r'/f:'),
('O', r'/o:'),
('MIN', r'/min:'),
('MAX', r'/max:'),
('SPN', r'/spn:'),
('D', r'/d:'),
('LN', r'/ln:'),
('E', r'/e:'),
('P', r'/p:'),
('M', r'\-m'),
('H', r'\-h'),
('B', r'\-b'),
('S', r'\-s'),
('T', r'\-t'),
('V', r'\-v'),
('DP', r'\-p'),
('LPAREN', r'\('),
('RPAREN', r'\)'),
('LBRACE', r'\['),
('RBRACE', r'\]'),
('COMMA', r','),
('ASSIGN', r'='),
('ENUMS', r'\{ENUMS\}'),
('SIGNALS', r'\{SIGNALS\}'),
('SEND', r'\{SEND\}'),
('RECEIVE', r'\{RECEIVE\}'),
('SENDRECEIVE', r'\{SENDRECEIVE\}'),
('WORD', r'[^\s=\(\]\-]+'),
('MISMATCH', r'.')
]
tokens, token_regex = tokenize_init(token_specs)
for mo in re.finditer(token_regex, string, re.DOTALL):
kind = mo.lastgroup
if kind == 'SKIP':
pass
elif kind == 'STRING':
value = mo.group(kind)[1:-1].replace('\\"', '"')
tokens.append(Token(kind, value, mo.start()))
elif kind != 'MISMATCH':
value = mo.group(kind)
if value in self.KEYWORDS:
kind = value
if kind in names:
kind = names[kind]
tokens.append(Token(kind, value, mo.start()))
else:
raise TokenizeError(string, mo.start())
return tokens
def grammar(self):
word = choice('WORD', *list(self.KEYWORDS))
version = Sequence('FormatVersion', '=', 'NUMBER', 'COMMENT')
title = Sequence('Title' , '=', 'STRING')
unique_variables = Sequence('UniqueVariables' , '=', word)
float_decimal_places = Sequence('FloatDecimalPlaces' , '=', 'NUMBER')
bit_rate_switch = Sequence('BRS' , '=', word)
enum_value = Sequence('NUMBER', '=', 'STRING')
delim = Sequence(',', Optional('COMMENT'))
enum = Sequence('Enum', '=', word,
'(', Optional(DelimitedList(enum_value, delim=delim)), ')',
Optional('COMMENT'))
sig_unit = '/u:'
sig_factor = Sequence('/f:', 'NUMBER')
sig_offset = Sequence('/o:', 'NUMBER')
sig_min = Sequence('/min:', 'NUMBER')
sig_max = Sequence('/max:', 'NUMBER')
sig_spn = Sequence('/spn:', 'NUMBER')
sig_default = Sequence('/d:', choice('NUMBER', 'WORD'))
sig_long_name = Sequence('/ln:', 'STRING')
sig_enum = Sequence('/e:', word)
sig_places = Sequence('/p:', 'NUMBER')
signal = Sequence('Sig', '=', Any(), word,
Optional('NUMBER'),
Optional(choice('-h', '-b')),
Optional('-m'),
ZeroOrMore(choice(sig_unit,
sig_factor,
sig_offset,
sig_min,
sig_max,
sig_default,
sig_long_name,
sig_enum,
sig_places,
sig_spn)),
Optional('COMMENT'))
variable = Sequence('Var', '=', Any(), word,
'NUMBER', ',', 'NUMBER',
ZeroOrMore(choice('-v', '-m', '-s', '-h')),
ZeroOrMore(choice(sig_unit,
sig_factor,
sig_offset,
sig_min,
sig_max,
sig_default,
sig_long_name,
sig_enum,
sig_places)),
Optional('COMMENT'))
symbol = Sequence('[', Any(), ']',
ZeroOrMoreDict(choice(
Sequence('ID', '=', 'HEXNUMBER',
Optional('HEXNUMBER'),
Optional('COMMENT')),
Sequence('Len', '=', 'NUMBER'),
Sequence('Mux', '=', Any(), 'NUMBER', ',',
'NUMBER', choice('NUMBER', 'HEXNUMBER'),
ZeroOrMore(choice('-t', '-m')),
Optional('COMMENT')),
Sequence('CycleTime', '=', 'NUMBER', Optional('-p')),
Sequence('Timeout', '=', 'NUMBER'),
Sequence('MinInterval', '=', 'NUMBER'),
Sequence('Color', '=', 'HEXNUMBER'),
variable,
Sequence('Sig', '=', Any(), 'NUMBER'),
Sequence('Type', '=', Any()))))
enums = Sequence('{ENUMS}', ZeroOrMore(choice(enum, 'COMMENT')))
signals = Sequence('{SIGNALS}', ZeroOrMore(choice(signal, 'COMMENT')))
send = Sequence('{SEND}', ZeroOrMore(choice(symbol, 'COMMENT')))
receive = Sequence('{RECEIVE}', ZeroOrMore(choice(symbol, 'COMMENT')))
sendreceive = Sequence('{SENDRECEIVE}', ZeroOrMore(choice(symbol, 'COMMENT')))
section = choice(enums,
signals,
send,
receive,
sendreceive)
grammar = Sequence(Optional('COMMENT'),
version,
ZeroOrMore(choice(unique_variables,
float_decimal_places,
title,
bit_rate_switch)),
ZeroOrMore(section))
return grammar
def _get_section_tokens(tokens, name):
for section in tokens[3]:
if section[0] == name:
return [row for row in section[1] if isinstance(row, list)]
return []
def _load_comment(tokens):
return tokens[3:].rstrip('\r\n')
def _get_enum(enums, name):
try:
return enums[name]
except KeyError:
raise ParseError("Enum '{}' is not defined.".format(name))
def _load_enums(tokens):
section = _get_section_tokens(tokens, '{ENUMS}')
all_enums = {}
for _, _, name, _, values, _, _ in section:
if values:
values = values[0]
enum = odict()
for v in values:
value = num(v[0])
value_name = v[2]
enum[value] = NamedSignalValue(value, value_name)
all_enums[name] = enum
return all_enums
def _load_signal_type_and_length(type_, tokens, enums):
# Default values.
is_signed = False
is_float = False
length = 0
enum = None
minimum = None
maximum = None
decimal = SignalDecimal()
if type_ == 'signed':
is_signed = True
length = int(tokens[0])
elif type_ == 'unsigned':
length = int(tokens[0])
elif type_ == 'float':
is_float = True
length = 32
elif type_ == 'double':
is_float = True
length = 64
elif type_ == 'bit':
# As unsigned integer for now.
length = 1
minimum = 0
maximum = 1
decimal.minimum = Decimal('0')
decimal.maximum = Decimal('1')
elif type_ == 'char':
# As unsigned integer for now.
length = 8
elif type_ in ['string', 'raw']:
# As unsigned integer for now.
length = int(tokens[0])
else:
# Enum. As unsigned integer for now.
length = int(tokens[0])
enum = _get_enum(enums, type_)
return is_signed, is_float, length, enum, minimum, maximum, decimal
def _load_signal_attributes(tokens, enum, enums, minimum, maximum, decimal, spn):
# Default values.
factor = 1
offset = 0
unit = None
decimal.scale = Decimal(factor)
decimal.offset = Decimal(offset)
for item in tokens:
if isinstance(item, list):
key, value = item
if key == '/f:':
factor = num(value)
decimal.scale = Decimal(value)
elif key == '/o:':
offset = num(value)
decimal.offset = Decimal(value)
elif key == '/min:':
minimum = num(value)
decimal.minimum = Decimal(value)
elif key == '/max:':
maximum = num(value)
decimal.maximum = Decimal(value)
elif key == '/e:':
enum = _get_enum(enums, value)
elif key == '/spn:':
spn = int(value)
else:
LOGGER.debug("Ignoring unsupported message attribute '%s'.", key)
elif item.startswith('/u:"'):
unit = item[4:-1]
elif item.startswith('/u:'):
unit = item[3:]
else:
raise ParseError('Iternal error {}.'.format(item))
return unit, factor, offset, enum, minimum, maximum, decimal, spn
def _load_signal(tokens, enums):
# Default values.
name = tokens[2]
byte_order = 'little_endian'
comment = None
spn = None
# Type and length.
(is_signed,
is_float,
length,
enum,
minimum,
maximum,
decimal) = _load_signal_type_and_length(tokens[3],
tokens[4],
enums)
# Byte order.
if tokens[6] == ['-m']:
byte_order = 'big_endian'
# Comment.
if tokens[8]:
comment = _load_comment(tokens[8][0])
# The rest.
unit, factor, offset, enum, minimum, maximum, decimal, spn = _load_signal_attributes(
tokens[7],
enum,
enums,
minimum,
maximum,
decimal,
spn)
return Signal(name=name,
start=offset,
length=length,
receivers=[],
byte_order=byte_order,
is_signed=is_signed,
scale=factor,
offset=offset,
minimum=minimum,
maximum=maximum,
unit=unit,
choices=enum,
comment=comment,
is_multiplexer=False,
is_float=is_float,
decimal=decimal,
spn=spn)
def _load_signals(tokens, enums):
section = _get_section_tokens(tokens, '{SIGNALS}')
signals = {}
for signal in section:
signal = _load_signal(signal, enums)
signals[signal.name] = signal
return signals
def _load_message_signal(tokens,
signals,
multiplexer_signal,
multiplexer_ids):
signal = signals[tokens[2]]
start = int(tokens[3])
start = _convert_start(start, signal.byte_order)
return Signal(name=signal.name,
start=start,
length=signal.length,
receivers=signal.receivers,
byte_order=signal.byte_order,
is_signed=signal.is_signed,
scale=signal.scale,
offset=signal.offset,
minimum=signal.minimum,
maximum=signal.maximum,
unit=signal.unit,
choices=signal.choices,
comment=signal.comment,
is_multiplexer=signal.is_multiplexer,
multiplexer_ids=multiplexer_ids,
multiplexer_signal=multiplexer_signal,
is_float=signal.is_float,
decimal=signal.decimal,
spn=signal.spn)
def _convert_start(start, byte_order):
if byte_order == 'big_endian':
start = (8 * (start // 8) + (7 - (start % 8)))
return start
def _load_message_variable(tokens,
enums,
multiplexer_signal,
multiplexer_ids):
# Default values.
name = tokens[2]
byte_order = 'little_endian'
start = int(tokens[4])
comment = None
spn = None
# Type and length.
(is_signed,
is_float,
length,
enum,
minimum,
maximum,
decimal) = _load_signal_type_and_length(tokens[3],
[tokens[6]],
enums)
# Byte order.
if '-m' in tokens[7]:
byte_order = 'big_endian'
# Comment.
if tokens[9]:
comment = _load_comment(tokens[9][0])
# The rest.
unit, factor, offset, enum, minimum, maximum, decimal, spn = _load_signal_attributes(
tokens[8],
enum,
enums,
minimum,
maximum,
decimal,
spn)
start = _convert_start(start, byte_order)
return Signal(name=name,
start=start,
length=length,
receivers=[],
byte_order=byte_order,
is_signed=is_signed,
scale=factor,
offset=offset,
minimum=minimum,
maximum=maximum,
unit=unit,
choices=enum,
comment=comment,
is_multiplexer=False,
multiplexer_ids=multiplexer_ids,
multiplexer_signal=multiplexer_signal,
is_float=is_float,
decimal=decimal,
spn=spn)
def _load_message_signals_inner(message_tokens,
signals,
enums,
multiplexer_signal=None,
multiplexer_ids=None):
return [
_load_message_signal(signal,
signals,
multiplexer_signal,
multiplexer_ids)
for signal in message_tokens[3].get('Sig', [])
] + [
_load_message_variable(variable,
enums,
multiplexer_signal,
multiplexer_ids)
for variable in message_tokens[3].get('Var', [])
]
def _load_muxed_message_signals(message_tokens,
message_section_tokens,
signals,
enums):
def get_mutliplexer_ids(mux_tokens):
base = 10
mux_id = mux_tokens[6]
if mux_id.endswith('h'):
base = 16
mux_id = mux_id[:-1]
return [int(mux_id, base=base)]
mux_tokens = message_tokens[3]['Mux'][0]
multiplexer_signal = mux_tokens[2]
if '-m' in mux_tokens[7]:
byte_order = 'big_endian'
else:
byte_order = | |
"""
Implement the small version of NASBench dataset for Monodepth estimation
"""
import functools
# import pandas as pd
# from peewee import CharField, FloatField, ForeignKeyField, IntegerField, Model
from peewee import fn
from playhouse.sqlite_ext import SqliteExtDatabase
from playhouse.shortcuts import model_to_dict
from nasws.cnn.search_space.darts.nni.helper import *
from nni.nas.benchmarks.nds.model import NdsTrialConfig, NdsIntermediateStats, NdsTrialStats
# from nni.nas.benchmarks.nds.query import query_nds_trial_stats
def query_nds_trial_stats(model_family, proposer, generator, model_spec, cell_spec, dataset,
num_epochs=None, reduction=None):
"""
Query trial stats of NDS given conditions.
Parameters
----------
model_family : str or None
If str, can be one of the model families available in :class:`nni.nas.benchmark.nds.NdsTrialConfig`.
Otherwise a wildcard.
proposer : str or None
If str, can be one of the proposers available in :class:`nni.nas.benchmark.nds.NdsTrialConfig`. Otherwise a wildcard.
generator : str or None
If str, can be one of the generators available in :class:`nni.nas.benchmark.nds.NdsTrialConfig`. Otherwise a wildcard.
model_spec : dict or None
If specified, can be one of the model spec available in :class:`nni.nas.benchmark.nds.NdsTrialConfig`.
Otherwise a wildcard.
cell_spec : dict or None
If specified, can be one of the cell spec available in :class:`nni.nas.benchmark.nds.NdsTrialConfig`.
Otherwise a wildcard.
dataset : str or None
If str, can be one of the datasets available in :class:`nni.nas.benchmark.nds.NdsTrialConfig`. Otherwise a wildcard.
num_epochs : float or None
If int, matching results will be returned. Otherwise a wildcard.
reduction : str or None
If 'none' or None, all trial stats will be returned directly.
If 'mean', fields in trial stats will be averaged given the same trial config.
Returns
-------
generator of dict
A generator of :class:`nni.nas.benchmark.nds.NdsTrialStats` objects,
where each of them has been converted into a dict.
"""
fields = []
if reduction == 'none':
reduction = None
if reduction == 'mean':
for field_name in NdsTrialStats._meta.sorted_field_names:
if field_name not in ['id', 'config', 'seed']:
fields.append(fn.AVG(getattr(NdsTrialStats, field_name)).alias(field_name))
elif reduction is None:
fields.append(NdsTrialStats)
else:
raise ValueError('Unsupported reduction: \'%s\'' % reduction)
query = NdsTrialStats.select(*fields, NdsTrialConfig).join(NdsTrialConfig)
conditions = []
# interesting coding style
for field_name in ['model_family', 'proposer', 'generator', 'model_spec', 'cell_spec',
'dataset', 'num_epochs']:
if locals()[field_name] is not None:
conditions.append(getattr(NdsTrialConfig, field_name) == locals()[field_name])
if conditions:
query = query.where(functools.reduce(lambda a, b: a & b, conditions))
if reduction is not None:
query = query.group_by(NdsTrialStats.config)
for k in query:
yield model_to_dict(k)
def query_darts_nds_trial_stats(arch, num_epochs, dataset, reduction=None, other_conditions=None):
"""
Query trial stats of DarsNDS given conditions.
Parameters
----------
arch : dict or None
If a dict, it is in the format that is described in
:class:`nni.nas.benchmark.nasbench201.NdsTrialConfig`. Only trial stats
matched will be returned. If none, architecture will be a wildcard.
num_epochs : int or None
If int, matching results will be returned. Otherwise a wildcard.
dataset : str or None
If specified, can be one of the dataset available in :class:`nni.nas.benchmark.nasbench201.NdsTrialConfig`.
Otherwise a wildcard.
reduction : str or None
If 'none' or None, all trial stats will be returned directly.
If 'mean', fields in trial stats will be averaged given the same trial config.
Returns
-------
generator of dict
A generator of :class:`nni.nas.benchmark.nasbench201.NdsTrialStats` objects,
where each of them has been converted into a dict.
"""
fields = []
if reduction == 'none':
reduction = None
if reduction == 'mean':
for field_name in NdsTrialStats._meta.sorted_field_names:
if field_name not in ['id', 'config', 'seed']:
fields.append(fn.AVG(getattr(NdsTrialStats, field_name)).alias(field_name))
elif reduction is None:
fields.append(NdsTrialStats)
else:
raise ValueError('Unsupported reduction: \'%s\'' % reduction)
query = NdsTrialStats.select(*fields, NdsTrialConfig).join(NdsTrialConfig)
conditions = []
if arch is not None:
conditions.append(NdsTrialConfig.arch == arch)
if num_epochs is not None:
conditions.append(NdsTrialConfig.num_epochs == num_epochs)
if dataset is not None:
conditions.append(NdsTrialConfig.dataset == dataset)
if other_conditions:
conditions.extend(other_conditions)
if conditions:
query = query.where(functools.reduce(lambda a, b: a & b, conditions))
if reduction is not None:
query = query.group_by(NdsTrialStats.config)
for k in query:
yield model_to_dict(k)
# def preprocess_retrain_landmarks_to_csv(root_dirs, output_file):
# # default values
# root_dirs = root_dirs or [
# 'random-nonzero/nasbench201/mutator_nonzero-epochs300-lr0.0001',
# 'random-nonzero/nasbench201-upsample/mutator_nonzero-epochs300-lr0.0001',
# 'random-nonzero-sync/nasbench201/mutator_nonzero-sync-epochs300-lr0.0001',
# 'random-nonzero-sync/nasbench201-upsample/mutator_nonzero-sync-epochs300-lr0.0001',
# ]
# output_file = output_file or 'configs/monodepth-nasbench201-2020-08-27.csv'
# # let plot something for the meeting :D
# # plot_data = pd.DataFrame(columns=['arch', 'space', 'train_loss', 'valid_loss', 'params', 'group', 'epochs'])
# pd_data_frames = []
# for group_id, root in enumerate(root_dirs):
# space_name = root.split('/')[1]
# for arch_id in range(30):
# res = collect_experiment_result(f'experiments/landmarks/{root}/{arch_id}')
# num_epoch, train_loss, valid_loss, num_param, arch, all_train, all_valid = res
# pd_data_frames.append(
# pd.DataFrame([[arch, space_name, train_loss, valid_loss, num_param,
# 'sync' if 'sync' in root else 'normal',
# 'nonzero',
# num_epoch, all_train, all_valid],],
# columns=['arch', 'space', 'train_loss', 'valid_loss', 'params', 'mode', 'sampler',
# 'epochs', 'all_train_loss', 'all_valid_loss'])
# )
# plot_data = pd.concat(pd_data_frames, ignore_index=True)
# plot_data.to_csv(output_file)
# def update_nb201_dataset(csv_path='data/imagenet/darts_nds-2020-10-08.csv', iteration=0):
# print('Loading dataset from csv_path: ', csv_path)
# data = pd.read_csv(csv_path)
# print(f'Finished with {len(data)} data points...' )
# if len(db.get_tables()) < 3:
# db.create_tables([NdsTrialConfig, NdsTrialStats, NdsIntermediateStats])
# for ind in range(len(data)):
# d = data.iloc[ind]
# data_parsed ={
# 'arch': d['arch'],
# 'num_epochs': d['epochs'],
# 'dataset': 'redweb',
# 'num_channels': 256,
# 'num_cells': 4,
# 'iteration': iteration,
# 'space': d['space'],
# 'mode': d['mode'],
# 'sampler': d['sampler']
# }
# config = NdsTrialConfig.create(
# # arch=d['arch'], num_epochs=d['epochs'], dataset='redweb', num_channels=256, num_cells=4,
# **data_parsed
# )
# # parse the trial stat data
# data_parsed = {
# 'train_acc': None,
# 'valid_acc': None,
# 'test_acc': None,
# 'ori_test_acc': None,
# 'train_loss': d['train_loss'],
# 'valid_loss': d['valid_loss'],
# 'test_loss': None,
# 'ori_test_loss': None,
# 'parameters': d['params'],
# 'flops': None,
# 'latency': None,
# 'training_time': None,
# 'valid_evaluation_time': None,
# 'test_evaluation_time': None,
# 'ori_test_evaluation_time': None,
# }
# trial_stats = NdsTrialStats.create(config=config, seed=0, **data_parsed)
# intermediate_stats = []
# for epoch in range(d['epochs']):
# # parse intermediate stat
# data_parsed = {
# 'train_acc': None,
# 'valid_acc': None,
# 'test_acc': None,
# 'ori_test_acc': None,
# 'train_loss': d['all_train_loss'][epoch],
# 'valid_loss': d['all_valid_loss'][epoch],
# 'test_loss': None,
# 'ori_test_loss': None,
# }
# data_parsed.update(current_epoch=epoch+1, trial=trial_stats)
# intermediate_stats.append(data_parsed)
# NdsIntermediateStats.insert_many(intermediate_stats).execute(db)
# class NdsTrialConfig(Model):
# """
# Trial config for NAS-Bench-201.
# Attributes
# ----------
# arch : dict
# A dict with keys ``0_1``, ``0_2``, ``0_3``, ``1_2``, ``1_3``, ``2_3``, each of which
# is an operator chosen from :const:`nni.nas.benchmark.nasbench201.NONE`,
# :const:`nni.nas.benchmark.nasbench201.SKIP_CONNECT`,
# :const:`nni.nas.benchmark.nasbench201.CONV_1X1`,
# :const:`nni.nas.benchmark.nasbench201.CONV_3X3` and :const:`nni.nas.benchmark.nasbench201.AVG_POOL_3X3`.
# num_epochs : int
# Number of epochs planned for this trial. Should be one of 12 and 200.
# num_channels: int
# Number of channels for initial convolution. 16 by default.
# num_cells: int
# Number of cells per stage. 5 by default.
# dataset: str
# Dataset used for training and evaluation.
# redweb indicate the ReDWeb dataset used to train mono-depth task.
# """
# arch = JSONField(index=True)
# num_epochs = IntegerField(index=True)
# num_channels = IntegerField()
# num_cells = IntegerField()
# iteration = IntegerField()
# mode = CharField(max_length=20, choices=['normal', 'sync'])
# space = CharField(max_length=20, choices=['v1', 'v1+upsample'])
# dataset = CharField(max_length=20, index=True, choices=[
# 'redweb', # 25k+25k+10k
# ])
# class Meta:
# database = db
# class NdsTrialStats(Model):
# """
# Computation statistics for NAS-Bench-201. Each corresponds to one trial.
# Attributes
# ----------
# config : NdsTrialConfig
# Setup for this trial data.
# seed : int
# Random seed selected, for reproduction.
# train_acc : float
# Final accuracy on training data, ranging from 0 to 100.
# valid_acc : float
# Final accuracy on validation data, ranging from 0 to 100.
# test_acc : float
# Final accuracy on test data, ranging from 0 to 100.
# ori_test_acc : float
# Test accuracy on original validation set (10k for CIFAR and 12k for Imagenet16-120),
# ranging from 0 to 100.
# train_loss : float or None
# Final cross entropy loss on training data. Note that loss could be NaN, in which case
# this attributed will be None.
# valid_loss : float or None
# Final cross entropy loss on validation data.
# test_loss : float or None
# Final cross entropy loss on test data.
# ori_test_loss : float or None
# Final cross entropy loss on original validation set.
# parameters : float
# Number of trainable parameters in million.
# latency : float
# Latency in seconds.
# flops : float
# FLOPs in million.
# training_time : float
# Duration of training in seconds.
# valid_evaluation_time : float
# Time elapsed to evaluate on validation set.
# test_evaluation_time : float
# Time elapsed to evaluate on test set.
# ori_test_evaluation_time : float
# Time elapsed to evaluate on original test set.
# """
# config = ForeignKeyField(NdsTrialConfig, backref='trial_stats', index=True)
# seed = IntegerField()
# train_acc = FloatField(null=True)
# valid_acc = FloatField(null=True)
# test_acc = FloatField(null=True)
# ori_test_acc = FloatField(null=True) # test accuracy of the original test set
# train_loss = FloatField() # possibly nan
# valid_loss = FloatField()
# test_loss = FloatField(null=True)
# ori_test_loss = FloatField(null=True)
# parameters = FloatField() # parameters in million
# latency = FloatField(null=True) # latency in milliseconds
# flops = FloatField(null=True) # flops in million
# training_time = FloatField(null=True)
# valid_evaluation_time = FloatField(null=True)
# test_evaluation_time = FloatField(null=True)
# ori_test_evaluation_time = FloatField(null=True)
# class Meta:
# database = db
# class NdsIntermediateStats(Model):
# """
# Intermediate statistics for NAS-Bench-201.
# Attributes
# ----------
# trial : NdsTrialStats
# Corresponding trial.
# current_epoch : int
# Elapsed epochs.
# train_acc : float
# Current accuracy on training data, ranging from 0 to 100.
# | |
<reponame>santanshy/GamestonkTerminal<gh_stars>1-10
import argparse
import datetime
import os
import traceback
from warnings import simplefilter
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from pandas.plotting import register_matplotlib_converters
from TimeSeriesCrossValidation import splitTrain
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, SimpleRNN, Dense, Dropout
from gamestonk_terminal.helper_funcs import (
check_positive,
parse_known_args_and_warn,
valid_date,
patch_pandas_text_adjustment,
get_next_stock_market_days,
plot_autoscale,
)
from gamestonk_terminal.prediction_techniques.pred_helper import (
print_pretty_prediction,
price_prediction_backtesting_color,
print_prediction_kpis,
)
from gamestonk_terminal import feature_flags as gtff
from gamestonk_terminal.config_plot import PLOT_DPI
from gamestonk_terminal import config_neural_network_models as cfg_nn_models
register_matplotlib_converters()
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
simplefilter(action="ignore", category=FutureWarning)
# store the user's TensorFlow environment variables
ORIGINAL_TF_XLA_FLAGS = os.environ.get("TF_XLA_FLAGS")
ORIGINAL_TF_FORCE_GPU_ALLOW_GROWTH = os.environ.get("TF_FORCE_GPU_ALLOW_GROWTH")
def build_neural_network_model(Recurrent_Neural_Network, n_inputs, n_days):
model = Sequential()
for idx_layer, d_layer in enumerate(Recurrent_Neural_Network):
# Recurrent Neural Network
if str(*d_layer) == "SimpleRNN":
# Is this the input layer? If so, define input_shape
if idx_layer == 0:
model.add(SimpleRNN(**d_layer["SimpleRNN"], input_shape=(n_inputs, 1)))
# Is this the last output layer? If so, set units to prediction days
elif idx_layer == (len(Recurrent_Neural_Network) - 1):
model.add(SimpleRNN(**d_layer["SimpleRNN"], units=n_days))
else:
model.add(SimpleRNN(**d_layer["SimpleRNN"]))
# Long-Short Term-Memory
elif str(*d_layer) == "LSTM":
# Is this the input layer? If so, define input_shape
if idx_layer == 0:
model.add(LSTM(**d_layer["LSTM"], input_shape=(n_inputs, 1)))
# Is this the last output layer? If so, set units to prediction days
elif idx_layer == (len(Recurrent_Neural_Network) - 1):
model.add(LSTM(**d_layer["LSTM"], units=n_days))
else:
model.add(LSTM(**d_layer["LSTM"]))
# Dense (Simple Neuron)
elif str(*d_layer) == "Dense":
# Is this the input layer? If so, define input_shape
if idx_layer == 0:
model.add(Dense(**d_layer["Dense"], input_dim=n_inputs))
# Is this the last output layer? If so, set units to prediction days
elif idx_layer == (len(Recurrent_Neural_Network) - 1):
model.add(Dense(**d_layer["Dense"], units=n_days))
else:
model.add(Dense(**d_layer["Dense"]))
# Dropout (Regularization)
elif str(*d_layer) == "Dropout":
model.add(Dropout(**d_layer["Dropout"]))
else:
print(f"Incorrect neuron type: {str(*d_layer)}")
return model
def _parse_args(prog, description, l_args):
"""Create an argparser and parse l_args. Will print help if user requests it.
:return: ns_parser"""
parser = argparse.ArgumentParser(
prog=prog,
description=description,
add_help=False,
formatter_class=argparse.RawTextHelpFormatter, # enable multiline help messages
)
parser.add_argument(
"-d",
"--days",
action="store",
dest="n_days",
type=check_positive,
default=5,
help="prediction days.",
)
parser.add_argument(
"-i",
"--input",
action="store",
dest="n_inputs",
type=check_positive,
default=40,
help="number of days to use for prediction.",
)
parser.add_argument(
"--epochs",
action="store",
dest="n_epochs",
type=check_positive,
default=200,
help="number of training epochs.",
)
parser.add_argument(
"-j",
"--jumps",
action="store",
dest="n_jumps",
type=check_positive,
default=1,
help="number of jumps in training data.",
)
parser.add_argument(
"-p",
"--pp",
action="store",
dest="s_preprocessing",
default="normalization",
choices=["normalization", "standardization", "none"],
help="pre-processing data.",
)
parser.add_argument(
"-o",
"--optimizer",
action="store",
dest="s_optimizer",
default="adam",
choices=[
"adam",
"adagrad",
"adadelta",
"adamax",
"ftrl",
"nadam",
"rmsprop",
"sgd",
],
help="optimization technique (see https://www.tensorflow.org/api_docs/python/tf/keras/optimizers)",
)
parser.add_argument(
"-l",
"--loss",
action="store",
dest="s_loss",
default="mae",
choices=[
"mae",
"mape",
"mse",
"msle",
"poisson",
"logcosh",
"kld",
"hinge",
"squared_hinge",
"huber",
],
help="loss function (see https://www.tensorflow.org/api_docs/python/tf/keras/losses)",
)
parser.add_argument(
"-e",
"--end",
action="store",
type=valid_date,
dest="s_end_date",
default=None,
help="The end date (format YYYY-MM-DD) to select - Backtesting",
)
parser.add_argument(
"--batch_size",
action="store",
dest="n_batch_size",
type=check_positive,
default=None,
help="batch size for model fitting (use a power of 2)",
)
parser.add_argument(
"--xla_cpu",
action="store_true",
dest="b_xla_cpu",
default=False,
help="enable XLA for CPU (see https://www.tensorflow.org/xla)",
)
parser.add_argument(
"--xla_gpu",
action="store_true",
dest="b_xla_gpu",
default=False,
help="enable XLA for GPU (see https://www.tensorflow.org/xla)",
)
parser.add_argument(
"--force_gpu_allow_growth",
action="store",
dest="s_force_gpu_allow_growth",
default="true",
choices=["true", "false", "default"],
help="true: GPU memory will grow as needed. \n"
"false: TensorFlow will allocate 100%% of GPU memory. \n"
"default: usually the same as false, uses env/TensorFlow default",
)
ns_parser = parse_known_args_and_warn(parser, l_args)
if not ns_parser:
return None
# set xla flags if requested
xla_flags = (
set(ORIGINAL_TF_XLA_FLAGS.split(" ")) if ORIGINAL_TF_XLA_FLAGS else set()
)
if ns_parser.b_xla_cpu or ns_parser.b_xla_gpu:
xla_flags.add("--tf_xla_enable_xla_devices")
if ns_parser.b_xla_cpu:
xla_flags.add("--tf_xla_cpu_global_jit")
if ns_parser.b_xla_gpu:
xla_flags.add("--tf_xla_auto_jit=2")
os.environ["TF_XLA_FLAGS"] = " ".join(xla_flags)
# set GPU memory growth flag
if ns_parser.s_force_gpu_allow_growth == "true":
os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true"
elif ns_parser.s_force_gpu_allow_growth == "false":
os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "false"
return ns_parser
def _restore_env():
"""Restore environment variables to original values"""
def restore(key, value):
if value is None:
if key in os.environ:
del os.environ[key]
else:
os.environ[key] = value
restore("TF_XLA_FLAGS", ORIGINAL_TF_XLA_FLAGS)
restore("TF_FORCE_GPU_ALLOW_GROWTH", ORIGINAL_TF_FORCE_GPU_ALLOW_GROWTH)
def _setup_backtesting(df_stock, ns_parser):
"""Set up backtesting if enabled
:return: (df_stock, df_future), where df_future is None if s_end_date is not set.
:raises Exception: if configuration is invalid"""
df_future = None
if ns_parser.s_end_date:
if ns_parser.s_end_date < df_stock.index[0]:
raise Exception(
"Backtesting not allowed, since End Date is older than Start Date of historical data"
)
if ns_parser.s_end_date < get_next_stock_market_days(
last_stock_day=df_stock.index[0],
n_next_days=ns_parser.n_inputs + ns_parser.n_days,
)[-1]:
raise Exception(
"Backtesting not allowed, since End Date is too close to Start Date to train model"
)
future_index = get_next_stock_market_days(
last_stock_day=ns_parser.s_end_date, n_next_days=ns_parser.n_days
)
if future_index[-1] > datetime.datetime.now():
raise Exception(
"Backtesting not allowed, since End Date + Prediction days is in the future"
)
df_future = df_stock[future_index[0] : future_index[-1]]
df_stock = df_stock[: ns_parser.s_end_date]
return df_stock, df_future
def _preprocess_split(df_stock, ns_parser):
"""Preprocess and split training data.
:return: (scaler, stock_train_data, stock_x, stock_y)
:raises Exception: if more training data is needed."""
# Pre-process data
if ns_parser.s_preprocessing == "standardization":
scaler = StandardScaler()
stock_train_data = scaler.fit_transform(
np.array(df_stock["5. adjusted close"].values.reshape(-1, 1))
)
elif ns_parser.s_preprocessing == "normalization":
scaler = MinMaxScaler()
stock_train_data = scaler.fit_transform(
np.array(df_stock["5. adjusted close"].values.reshape(-1, 1))
)
else: # No pre-processing
stock_train_data = np.array(df_stock["5. adjusted close"].values.reshape(-1, 1))
# Split training data for the neural network
stock_x, stock_y = splitTrain.split_train(
stock_train_data,
ns_parser.n_inputs,
ns_parser.n_days,
numJumps=ns_parser.n_jumps,
)
if not stock_x:
raise Exception("Given the model parameters more training data is needed.")
stock_x = np.array(stock_x)
stock_y = np.array(stock_y)
return scaler, stock_train_data, stock_x, stock_y
def _rescale_data(df_stock, ns_parser, scaler, yhat):
"""Re-scale the data back and return the prediction dataframe. """
if (ns_parser.s_preprocessing == "standardization") or (
ns_parser.s_preprocessing == "normalization"
):
y_pred_test_t = scaler.inverse_transform(yhat.tolist())
else:
y_pred_test_t = yhat
l_pred_days = get_next_stock_market_days(
last_stock_day=df_stock["5. adjusted close"].index[-1],
n_next_days=ns_parser.n_days,
)
df_pred = pd.Series(y_pred_test_t[0].tolist(), index=l_pred_days, name="Price")
return df_pred
def _plot_and_print_results(
df_stock, ns_parser, df_future, df_pred, model_name, s_ticker
):
"""Plot and print the results. """
# Plotting
plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI)
plt.plot(df_stock.index, df_stock["5. adjusted close"], lw=3)
# BACKTESTING
if ns_parser.s_end_date:
plt.title(
f"BACKTESTING: {model_name} on {s_ticker} - {ns_parser.n_days} days prediction"
)
else:
plt.title(f"{model_name} on {s_ticker} - {ns_parser.n_days} days prediction")
plt.xlim(df_stock.index[0], get_next_stock_market_days(df_pred.index[-1], 1)[-1])
plt.xlabel("Time")
plt.ylabel("Share Price ($)")
plt.grid(b=True, which="major", color="#666666", linestyle="-")
plt.minorticks_on()
plt.grid(b=True, which="minor", color="#999999", linestyle="-", alpha=0.2)
plt.plot(
[df_stock.index[-1], df_pred.index[0]],
[df_stock["5. adjusted close"].values[-1], df_pred.values[0]],
lw=1,
c="tab:green",
linestyle="--",
)
plt.plot(df_pred.index, df_pred, lw=2, c="tab:green")
plt.axvspan(
df_stock.index[-1], df_pred.index[-1], facecolor="tab:orange", alpha=0.2
)
_, _, ymin, ymax = plt.axis()
plt.vlines(
df_stock.index[-1],
ymin,
ymax,
colors="k",
linewidth=3,
linestyle="--",
color="k",
)
# BACKTESTING
if ns_parser.s_end_date:
plt.plot(
df_future.index,
df_future["5. adjusted close"],
lw=2,
c="tab:blue",
ls="--",
)
plt.plot(
[df_stock.index[-1], df_future.index[0]],
[
df_stock["5. adjusted close"].values[-1],
df_future["5. adjusted close"].values[0],
],
lw=1,
c="tab:blue",
linestyle="--",
)
if gtff.USE_ION:
plt.ion()
plt.show()
# BACKTESTING
if ns_parser.s_end_date:
plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI)
plt.subplot(211)
plt.plot(
df_future.index,
df_future["5. adjusted close"],
lw=2,
c="tab:blue",
ls="--",
)
plt.plot(df_pred.index, df_pred, lw=2, c="green")
plt.scatter(df_future.index, df_future["5. adjusted close"], c="tab:blue", lw=3)
plt.plot(
[df_stock.index[-1], df_future.index[0]],
[
df_stock["5. adjusted close"].values[-1],
df_future["5. adjusted close"].values[0],
],
lw=2,
c="tab:blue",
ls="--",
)
plt.scatter(df_pred.index, df_pred, c="green", lw=3)
plt.plot(
[df_stock.index[-1], df_pred.index[0]],
[df_stock["5. adjusted close"].values[-1], df_pred.values[0]],
lw=2,
c="green",
ls="--",
)
plt.title("BACKTESTING: Real data price versus Prediction")
plt.xlim(df_stock.index[-1], df_pred.index[-1] + datetime.timedelta(days=1))
plt.xticks(
[df_stock.index[-1], df_pred.index[-1] + datetime.timedelta(days=1)],
visible=True,
)
plt.ylabel("Share Price ($)")
plt.grid(b=True, which="major", color="#666666", linestyle="-")
plt.minorticks_on()
plt.grid(b=True, which="minor", color="#999999", linestyle="-", alpha=0.2)
plt.legend(["Real data", "Prediction data"])
plt.xticks([])
plt.subplot(212)
plt.axhline(y=0, color="k", linestyle="--", linewidth=2)
plt.plot(
df_future.index,
100
* (df_pred.values - df_future["5. adjusted close"].values)
/ df_future["5. adjusted close"].values,
lw=2,
c="red",
)
plt.scatter(
df_future.index,
100
* (df_pred.values - df_future["5. adjusted close"].values)
/ df_future["5. adjusted close"].values,
c="red",
lw=5,
)
plt.title("BACKTESTING: Error between Real data and Prediction [%]")
plt.plot(
[df_stock.index[-1], df_future.index[0]],
[
0,
100
* (df_pred.values[0] - df_future["5. adjusted close"].values[0])
/ df_future["5. adjusted close"].values[0],
],
lw=2,
ls="--",
c="red",
)
plt.xlim(df_stock.index[-1], df_pred.index[-1] + datetime.timedelta(days=1))
plt.xticks(
[df_stock.index[-1], df_pred.index[-1] + datetime.timedelta(days=1)],
visible=True,
)
plt.xlabel("Time")
plt.ylabel("Prediction Error (%)")
plt.grid(b=True, which="major", color="#666666", linestyle="-")
plt.minorticks_on()
plt.grid(b=True, which="minor", color="#999999", linestyle="-", alpha=0.2)
plt.legend(["Real data", "Prediction data"])
if gtff.USE_ION:
plt.ion()
plt.show()
# Refactor prediction dataframe for backtesting print
df_pred.name = "Prediction"
df_pred = df_pred.to_frame()
df_pred["Real"] = df_future["5. adjusted close"]
if gtff.USE_COLOR:
patch_pandas_text_adjustment()
print("Time Real [$] x Prediction [$]")
print(df_pred.apply(price_prediction_backtesting_color, axis=1).to_string())
else:
print(df_pred[["Real", "Prediction"]].round(2).to_string())
print("")
print_prediction_kpis(df_pred["Real"].values, df_pred["Prediction"].values)
else:
# Print prediction data
print_pretty_prediction(df_pred, df_stock["5. adjusted close"].values[-1])
print("")
def mlp(l_args, s_ticker, df_stock):
try:
ns_parser = _parse_args(
prog="mlp", description="""Multilayer Perceptron. """, l_args=l_args
)
if not ns_parser:
return
# Setup backtesting
df_stock, df_future = _setup_backtesting(df_stock, ns_parser)
# Pre-process data
scaler, stock_train_data, stock_x, stock_y = _preprocess_split(
df_stock, ns_parser
)
stock_x = np.reshape(stock_x, (stock_x.shape[0], stock_x.shape[1]))
stock_y = np.reshape(stock_y, (stock_y.shape[0], stock_y.shape[1]))
# Build Neural Network model
model = build_neural_network_model(
cfg_nn_models.MultiLayer_Perceptron, ns_parser.n_inputs, ns_parser.n_days
)
model.compile(optimizer=ns_parser.s_optimizer, loss=ns_parser.s_loss)
# Train our model
model.fit(
stock_x,
stock_y,
epochs=ns_parser.n_epochs,
batch_size=ns_parser.n_batch_size,
verbose=1,
)
print("")
print(model.summary())
print("")
# Prediction
yhat = model.predict(
stock_train_data[-ns_parser.n_inputs :].reshape(1, | |
<reponame>Zamwell/pandapower
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2019 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import functools
import os
from time import time
import numpy as np
import pandas as pd
try:
import pplog
except ImportError:
import logging as pplog
from pandapower.io_utils import mkdirs_if_not_existent
from pandas import DataFrame, isnull
logger = pplog.getLogger(__name__)
__author__ = 'fschaefer'
class OutputWriter:
"""
This class supplies you with methods to store and format your output.
The general idea is to have a python-dictionary *output* which provides
a container for arbitrary information you would like to store. By default
a pandas DataFrame is initialized for the key *Parameters*.
For each value you want to store you may add a function to the *output_list*
of the OutputWriter, which contains calculations and return a value to store.
These values are then stored in the DataFrame mentioned above in a column
named after the function you implemented.
A lot of function are already implemented (for full list, see sourcecode).
If there are any interesting output values missing, feel free to
add them.
INPUT:
**net** - The pandapower format network
**time_steps** (list) - time_steps to calculate as list
OPTIONAL:
**output_path** (string, None) - Path to the file or folder we want to write the output to.
Allowed file extensions: *.xls, *.xlsx
**output_file_type** (string, ".p") - output filetype to use if output_path is not a file.
Allowed file extensions: *.xls, *.xlsx, *.csv, *.pickle, *.json
**csv_seperator** (string, ";") - The seperator used when writing to a csv file
**write_time** (int, None) - Time to save periodically to disk in minutes. Deactivated by default (=None)
Note: XLS has a maximum number of 256 rows.
"""
def __init__(self, net, time_steps=None, output_path=None, output_file_type=".p", write_time=None,
csv_seperator=";"):
self.net = net
self.csv_seperator = csv_seperator
self.output_path = output_path
self.output_file_type = output_file_type
self.write_time = write_time
if write_time is not None:
self.write_time *= 60.0 # convert to seconds
# init the matrix and the list of output functions
self.output = dict()
# internal results stored as numpy arrays in dict. Is created from output_list
self.np_results = dict()
# output list contains functools.partial with tables, variables, index...
self.output_list = []
# real time is tracked to save results to disk regularly
self.cur_realtime = time()
# total time steps to calculate
self.time_steps = time_steps
self.init_all()
def __str__(self):
return self.__class__.__name__
def __repr__(self):
s = "%s with output to %s" % (self.__class__.__name__, self.output_path)
return s
def init_all(self):
if isinstance(self.time_steps, list) or isinstance(self.time_steps, range):
self.init_timesteps(self.time_steps)
self._init_np_results()
self._init_output()
else:
logger.debug("Time steps not set at init ")
def _init_output(self):
self.output = dict()
# init parameters
self.output["Parameters"] = DataFrame(False, index=self.time_steps,
columns=["time_step", "controller_unstable",
"powerflow_failed"])
self.output["Parameters"].loc[:, "time_step"] = self.time_steps
def _init_np_results(self):
# inits numpy array (contains results)
self.np_results = dict()
for partial_func in self.output_list:
self._init_np_array(partial_func)
# self._init_np_array(var_name, index, eval_function)
def _save_to_memmap(self, append):
raise NotImplementedError("Sorry not implemented yet")
def _save_seperate(self, append, file_extension):
for partial in self.output_list:
table = partial.args[0]
variable = partial.args[1]
if table is not "Parameters":
file_path = os.path.join(self.output_path, table)
mkdirs_if_not_existent(file_path)
if append:
file_name = str(variable) + "_" + str(self.cur_realtime) + file_extension
else:
file_name = str(variable) + file_extension
file_path = os.path.join(file_path, file_name)
data = self.output[self._get_output_name(table, variable)]
# Todo: this can be done without this if else here, but I don't know how to call function by string. Please somebody help me
if file_extension == ".json":
data.to_json(file_path)
elif file_extension == ".p":
data.to_pickle(file_path)
elif file_extension == ".xls" or file_extension == ".xlsx":
try:
data.to_excel(file_path)
except ValueError as e:
if data.shape[1] > 255:
raise ValueError("pandas.to_excel() is not capable to handle big data" +
"with more than 255 columns. Please use other " +
"file_extensions instead, e.g. 'json'.")
else:
raise ValueError(e)
elif file_extension == ".csv":
data.to_csv(file_path, sep=self.csv_seperator)
def dump_to_file(self, append=False):
"""
Save the output matrix to a specific filetype (determined by basename)
**append** (bool, False) - Option for appending instead of overwriting the file
"""
file_extension = self.output_file_type
save_single = False
self._np_to_pd()
if self.output_path is not None:
try:
if save_single and (file_extension == ".xls" or file_extension == ".xlsx"):
self._save_single_xls_sheet(append)
elif file_extension in [".csv", ".xls", ".xlsx", ".json", ".p"]:
self._save_seperate(append, file_extension)
elif file_extension == ".dat":
self._save_to_memmap(append)
else:
raise UserWarning(
"Specify output file with .csv, .xls, .xlsx, .p, .json or .dat ending")
if append:
self._init_output()
except Exception:
raise
def dump(self):
append = False if self.time_step == self.time_steps[-1] else True
self.dump_to_file(append=append)
self.cur_realtime = time() # reset real time counter for next period
def save_results(self, time_step, pf_converged, ctrl_converged):
"""
Saves the results of the current time step to a matrix,
using the output functions in the self.output_list
"""
# remember the last time step
self.time_step = time_step
# add an entry to the output matrix if something failed
if not pf_converged:
self.save_nans_to_parameters()
self.output["Parameters"].loc[time_step, "powerflow_failed"] = True
elif not ctrl_converged:
self.output["Parameters"].loc[time_step, "controller_unstable"] = True
else:
self.save_to_parameters()
# if write time is exceeded or it is the last time step, data is written
if self.write_time is not None:
if time() - self.cur_realtime > self.write_time:
self.dump()
if self.time_step == self.time_steps[-1]:
self.dump()
def save_to_parameters(self):
"""
Saves the results of the current time step to Parameters table,
using the output functions in the self.output_list
"""
for of in self.output_list:
try:
of()
except:
import traceback
traceback.print_exc()
logger.error("Error in output function! Stored NaN for '%s' in time-step %i"
% (of.__name__, self.time_step))
self.save_nans_to_parameters()
def save_nans_to_parameters(self):
"""
Saves NaNs to for the given time step.
"""
time_step_idx = self.time_step_lookup[self.time_step]
for of in self.output_list:
self.output["Parameters"].loc[time_step_idx, of.__name__] = np.NaN
def remove_output_variable(self, table, variable):
"""
Removes a single output from the output variable function stack
"""
# ToDo: Implement this function
pass
def log_variable(self, table, variable, index=None, eval_function=None, eval_name=None):
"""
Adds a variable to log during simulation.
- table: table where the variable islocated as a string (i.e. "res_bus")
- variable: variable that should be logged as string (i.e. "p_kw")
- index: can be either one index or a list of indeces, or a numpy array of indices,
or a pandas Index, or a pandas Series (e.g. net.load.bus) for which
the variable will be logged. If no index is given, the variable
will be logged for all elements in the table
- eval_function: A function to be applied on the table / variable / index combination.
For example: pd.min oder pd.max
- eval_name: the name for an applied function.
For example: "grid_losses"
Note: Variable will be written to an extra sheet when writing to Excel
or to an extra file when writing to csv.
"""
if np.any(isnull(index)):
# check how many elements there are in net
index = self.net[table.split("res_")[-1]].index
if not hasattr(index, '__iter__'):
index = [index]
if isinstance(index, (np.ndarray, pd.Index, pd.Series)):
index = index.tolist()
if eval_function is not None and eval_name is None:
eval_name = "%s.%s.%s.%s" % (table, variable, str(index), eval_function.__name__)
if eval_function is None and eval_name is not None:
logger.info("'eval_name' is to give a name in case of evaluation functions. Since " +
"no function is given for eval_name '%s', " % eval_name +
"eval_name is neglected.")
eval_name = None
# var_name = self._get_hash((table, variable, index, eval_function))
var_name = self._get_output_name(table, variable)
idx = self._get_same_log_variable_partial_func_idx(table, variable, eval_function,
eval_name)
if idx is not None:
self._append_existing_log_variable_partial_func(idx, index)
else:
self._append_output_list(table, variable, index, eval_function, eval_name, var_name)
def _get_same_log_variable_partial_func_idx(self, table, variable, eval_function, eval_name):
""" Returns the position index in self.output_list of partial_func which has the same table
and variable and no evaluation function. """
if eval_function is None and eval_name is None:
for i, partial_func in enumerate(self.output_list):
partial_args = partial_func.args
match = partial_args[0] == table
match &= partial_args[1] == variable
if match:
return i
def _append_existing_log_variable_partial_func(self, idx, index):
""" Appends the index of existing, same partial_func in output_list. """
for i in index:
if i not in self.output_list[idx].args[2]:
self.output_list[idx].args[2].append(i)
def _append_output_list(self, table, variable, index, eval_function, eval_name, var_name):
""" Appends the output_list by an additional partial_func. """
partial_func = functools.partial(self._log, table, variable, index, eval_function,
eval_name)
partial_func.__name__ = var_name
self.output_list.append(partial_func)
if self.time_steps is not None:
self._init_np_array(partial_func)
def add_output(self, output):
"""
Adds a single output to the list
"""
logger.warning(
"Your | |
['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/3/snapshot/schedules', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreateSnapshotScheduleResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_snapshot_snapshot(self, snapshot_snapshot, **kwargs): # noqa: E501
"""create_snapshot_snapshot # noqa: E501
Create a new snapshot. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_snapshot_snapshot(snapshot_snapshot, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SnapshotSnapshotCreateParams snapshot_snapshot: (required)
:return: SnapshotSnapshotExtended
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_snapshot_snapshot_with_http_info(snapshot_snapshot, **kwargs) # noqa: E501
else:
(data) = self.create_snapshot_snapshot_with_http_info(snapshot_snapshot, **kwargs) # noqa: E501
return data
def create_snapshot_snapshot_with_http_info(self, snapshot_snapshot, **kwargs): # noqa: E501
"""create_snapshot_snapshot # noqa: E501
Create a new snapshot. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_snapshot_snapshot_with_http_info(snapshot_snapshot, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SnapshotSnapshotCreateParams snapshot_snapshot: (required)
:return: SnapshotSnapshotExtended
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['snapshot_snapshot'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_snapshot_snapshot" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'snapshot_snapshot' is set
if ('snapshot_snapshot' not in params or
params['snapshot_snapshot'] is None):
raise ValueError("Missing the required parameter `snapshot_snapshot` when calling `create_snapshot_snapshot`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'snapshot_snapshot' in params:
body_params = params['snapshot_snapshot']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/snapshot/snapshots', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SnapshotSnapshotExtended', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_snapshot_alias(self, snapshot_alias_id, **kwargs): # noqa: E501
"""delete_snapshot_alias # noqa: E501
Delete the snapshot alias # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_snapshot_alias(snapshot_alias_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str snapshot_alias_id: Delete the snapshot alias (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_snapshot_alias_with_http_info(snapshot_alias_id, **kwargs) # noqa: E501
else:
(data) = self.delete_snapshot_alias_with_http_info(snapshot_alias_id, **kwargs) # noqa: E501
return data
def delete_snapshot_alias_with_http_info(self, snapshot_alias_id, **kwargs): # noqa: E501
"""delete_snapshot_alias # noqa: E501
Delete the snapshot alias # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_snapshot_alias_with_http_info(snapshot_alias_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str snapshot_alias_id: Delete the snapshot alias (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['snapshot_alias_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_snapshot_alias" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'snapshot_alias_id' is set
if ('snapshot_alias_id' not in params or
params['snapshot_alias_id'] is None):
raise ValueError("Missing the required parameter `snapshot_alias_id` when calling `delete_snapshot_alias`") # noqa: E501
collection_formats = {}
path_params = {}
if 'snapshot_alias_id' in params:
path_params['SnapshotAliasId'] = params['snapshot_alias_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/snapshot/aliases/{SnapshotAliasId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_snapshot_aliases(self, **kwargs): # noqa: E501
"""delete_snapshot_aliases # noqa: E501
Delete all or matching snapshot aliases. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_snapshot_aliases(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_snapshot_aliases_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.delete_snapshot_aliases_with_http_info(**kwargs) # noqa: E501
return data
def delete_snapshot_aliases_with_http_info(self, **kwargs): # noqa: E501
"""delete_snapshot_aliases # noqa: E501
Delete all or matching snapshot aliases. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_snapshot_aliases_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_snapshot_aliases" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/snapshot/aliases', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_snapshot_changelist(self, snapshot_changelist_id, **kwargs): # noqa: E501
"""delete_snapshot_changelist # noqa: E501
Delete the specified changelist. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_snapshot_changelist(snapshot_changelist_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str snapshot_changelist_id: Delete the specified changelist. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_snapshot_changelist_with_http_info(snapshot_changelist_id, **kwargs) # noqa: E501
else:
(data) = self.delete_snapshot_changelist_with_http_info(snapshot_changelist_id, **kwargs) # noqa: E501
return data
def delete_snapshot_changelist_with_http_info(self, snapshot_changelist_id, **kwargs): # noqa: E501
"""delete_snapshot_changelist # noqa: E501
Delete the specified changelist. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_snapshot_changelist_with_http_info(snapshot_changelist_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str snapshot_changelist_id: Delete the specified changelist. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['snapshot_changelist_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_snapshot_changelist" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'snapshot_changelist_id' is set
if ('snapshot_changelist_id' not in params or
params['snapshot_changelist_id'] is None):
raise ValueError("Missing the required parameter `snapshot_changelist_id` when calling `delete_snapshot_changelist`") # noqa: E501
collection_formats = {}
path_params = {}
if 'snapshot_changelist_id' in params:
path_params['SnapshotChangelistId'] = params['snapshot_changelist_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/snapshot/changelists/{SnapshotChangelistId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_snapshot_repstate(self, snapshot_repstate_id, **kwargs): # noqa: E501
"""delete_snapshot_repstate # noqa: E501
Delete the specified repstate. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_snapshot_repstate(snapshot_repstate_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str snapshot_repstate_id: Delete the specified repstate. | |
""" Module for finding patterns in arc line spectra
.. include common links, assuming primary doc root is up one directory
.. include:: ../include/links.rst
"""
import numpy as np
import inspect
from astropy.io import fits
from pypeit.core.wavecal import autoid
from pypeit.core.wavecal import defs
from pypeit.core import fitting
from pypeit import msgs
from pypeit import datamodel
from IPython import embed
class WaveFit(datamodel.DataContainer):
"""
DataContainer for the output from BuildWaveCalib
All of the items in the datamodel are required for instantiation,
although they can be None (but shouldn't be)
"""
version = '1.0.0'
datamodel = {'spat_id': dict(otype=(int,np.integer), descr='Spatial position of slit/order for this fit. Required for I/O'),
'pypeitfit': dict(otype=fitting.PypeItFit,
descr='Fit to 1D wavelength solutions'),
'pixel_fit': dict(otype=np.ndarray, atype=np.floating,
descr='Pixel values of arc lines'),
'wave_fit': dict(otype=np.ndarray, atype=np.floating,
descr='Wavelength IDs assigned'),
'xnorm': dict(otype=float, descr='Normalization for fit'),
'ion_bits': dict(otype=np.ndarray, atype=np.integer,
descr='Ion bit values for the Ion names'),
'cen_wave': dict(otype=float, descr='Central wavelength'),
'cen_disp': dict(otype=float, descr='Approximate wavelength dispersion'),
'spec': dict(otype=np.ndarray, atype=np.floating, descr='Arc spectrum'),
'wave_soln': dict(otype=np.ndarray, atype=np.floating,
descr='Evaluated wavelengths at pixel_fit'),
'sigrej': dict(otype=float, descr='Final sigma rejection applied'),
'shift': dict(otype=float, descr='Shift applied'),
'tcent': dict(otype=np.ndarray, atype=np.floating,
descr='Pixel centroids of all arc lines found'),
'rms': dict(otype=float, descr='RMS of the solution')}
bitmask = defs.LinesBitMask()
@staticmethod
def hduext_prefix_from_spatid(spat_id):
""" Naming for HDU extensions"""
return 'SPAT_ID-{}_'.format(spat_id)
def __init__(self, spat_id, pypeitfit=None, pixel_fit=None, wave_fit=None, ion_bits=None,
cen_wave=None, cen_disp=None, spec=None, wave_soln=None,
sigrej=None, shift=None, tcent=None, rms=None, xnorm=None):
# Parse
args, _, _, values = inspect.getargvalues(inspect.currentframe())
d = dict([(k,values[k]) for k in args[1:]])
# Setup the DataContainer
datamodel.DataContainer.__init__(self, d=d)
def _bundle(self, **kwargs):
"""
Over-ride DataContainer._bundle() to deal with PYPEITFIT
Args:
kwargs:
Passed to DataContainer._bundle()
Returns:
list:
"""
# Extension prefix (for being unique with slits)
hdue_pref = self.hduext_prefix_from_spatid(self.spat_id)
# Without PypeItFit
_d = super(WaveFit, self)._bundle(
ext=hdue_pref+'WAVEFIT', **kwargs)
# Deal with PypeItFit
if _d[0][hdue_pref+'WAVEFIT']['pypeitfit'] is not None:
_d.append({hdue_pref+'PYPEITFIT': _d[0][hdue_pref + 'WAVEFIT'].pop('pypeitfit')})
# Return
return _d
def to_hdu(self, hdr=None, add_primary=False, primary_hdr=None, limit_hdus=None):
""" Over-ride for force_to_bintbl
See :class:`pypeit.datamodel.DataContainer.to_hdu` for Arguments
Returns:
:obj:`list`, `astropy.io.fits.HDUList`_: A list of HDUs,
where the type depends on the value of ``add_primary``.
"""
return super(WaveFit, self).to_hdu(hdr=hdr, add_primary=add_primary, primary_hdr=primary_hdr,
limit_hdus=limit_hdus, force_to_bintbl=True)
@classmethod
def from_hdu(cls, hdu, chk_version=True):
"""
Parse the data from the provided HDU.
See :func:`pypeit.datamodel.DataContainer._parse` for the
argument descriptions.
"""
# Set hdu_prefix
if isinstance(hdu, fits.HDUList):
hdu_prefix = cls.hduext_prefix_from_spatid(hdu[1].header['SPAT_ID'])
else:
hdu_prefix = cls.hduext_prefix_from_spatid(hdu.header['SPAT_ID'])
# Run the default parser to get the data
return super(WaveFit, cls).from_hdu(hdu, hdu_prefix=hdu_prefix)
@property
def ions(self):
"""
Returns an array of ion labels
Returns:
`numpy.ndarray`_: Array of the ion label for each line as recorded in ion_bits
"""
ionlist = []
for ionbit in self.ion_bits:
ionlist += self.bitmask.flagged_bits(ionbit)
# Return
return np.asarray(ionlist)
def fit_slit(spec, patt_dict, tcent, line_lists, vel_tol = 1.0, outroot=None, slittxt="Slit", thar=False,match_toler=3.0,
func='legendre', n_first=2,sigrej_first=2.0,n_final=4,sigrej_final=3.0,verbose=False):
""" Perform a fit to the wavelength solution. Wrapper for iterative fitting code.
Parameters
----------
spec : ndarray
arc spectrum
patt_dict : dict
dictionary of patterns
tcent: ndarray
List of the detections in this slit to be fit using the patt_dict
line_lists: astropy Table
Table containing the line list
Optional Parameters
-------------------
vel_tol: float, default = 1.0
Tolerance in km/s for matching lines in the IDs to lines in the NIST database. The default is 1.0 km/s
outroot: str
Path for QA file.
slittxt : str
Label used for QA
thar: bool, default = False
True if this is a ThAr fit
match_toler: float, default = 3.0
Matching tolerance when searching for new lines. This is the difference in pixels between the wavlength assigned to
an arc line by an iteration of the wavelength solution to the wavelength in the line list.
func: str, default = 'legendre'
Name of function used for the wavelength solution
n_first: int, default = 2
Order of first guess to the wavelength solution.
sigrej_first: float, default = 2.0
Number of sigma for rejection for the first guess to the wavelength solution.
n_final: int, default = 4
Order of the final wavelength solution fit
sigrej_final: float, default = 3.0
Number of sigma for rejection for the final fit to the wavelength solution.
verbose : bool
If True, print out more information.
plot_fil:
Filename for plotting some QA?
Returns
-------
final_fit : dict
A dictionary containing all of the information about the fit
"""
# Check that patt_dict and tcent refer to each other
if patt_dict['mask'].shape != tcent.shape:
msgs.error('patt_dict and tcent do not refer to each other. Something is very wrong')
# Perform final fit to the line IDs
if thar:
NIST_lines = (line_lists['NIST'] > 0) & (np.char.find(line_lists['Source'].data, 'MURPHY') >= 0)
else:
NIST_lines = line_lists['NIST'] > 0
ifit = np.where(patt_dict['mask'])[0]
if outroot is not None:
plot_fil = outroot + slittxt + '_fit.pdf'
else:
plot_fil = None
# TODO Profx maybe you can add a comment on what this is doing. Why do we have use_unknowns=True only to purge them later??
# Purge UNKNOWNS from ifit
imsk = np.ones(len(ifit), dtype=np.bool)
for kk, idwv in enumerate(np.array(patt_dict['IDs'])[ifit]):
if (np.min(np.abs(line_lists['wave'][NIST_lines] - idwv)))/idwv*3.0e5 > vel_tol:
imsk[kk] = False
ifit = ifit[imsk]
# Fit
final_fit = iterative_fitting(spec, tcent, ifit, np.array(patt_dict['IDs'])[ifit], line_lists[NIST_lines],
patt_dict['bdisp'],match_toler=match_toler, func=func, n_first=n_first,
sigrej_first=sigrej_first,n_final=n_final, sigrej_final=sigrej_final,
plot_fil=plot_fil, verbose=verbose)
if plot_fil is not None and final_fit is not None:
print("Wrote: {:s}".format(plot_fil))
# Return
return final_fit
def iterative_fitting(spec, tcent, ifit, IDs, llist, disp,
match_toler = 2.0, func = 'legendre', n_first=2, sigrej_first=2.0,
n_final=4, sigrej_final=3.0, input_only=False,
weights=None, plot_fil=None, verbose=False):
""" Routine for iteratively fitting wavelength solutions.
Parameters
----------
spec : ndarray, shape = (nspec,)
arcline spectrum
tcent : ndarray
Centroids in pixels of lines identified in spec
ifit : ndarray
Indices of the lines that will be fit
IDs: ndarray
wavelength IDs of the lines that will be fit (I think?)
llist: dict
Linelist dictionary
disp: float
dispersion
Optional Parameters
-------------------
match_toler: float, default = 3.0
Matching tolerance when searching for new lines. This is the difference in pixels between the wavlength assigned to
an arc line by an iteration of the wavelength solution to the wavelength in the line list.
func: str, default = 'legendre'
Name of function used for the wavelength solution
n_first: int, default = 2
Order of first guess to the wavelength solution.
sigrej_first: float, default = 2.0
Number of sigma for rejection for the first guess to the wavelength solution.
n_final: int, default = 4
Order of the final wavelength solution fit
sigrej_final: float, default = 3.0
Number of sigma for rejection for the final fit to the wavelength solution.
input_only: bool
If True, the routine will only perform a robust polyfit to the input IDs.
If False, the routine will fit the input IDs, and then include additional
lines in the linelist that are a satisfactory fit.
weights: ndarray
Weights to be used?
verbose : bool
If True, print out more information.
plot_fil:
Filename for plotting some QA?
Returns
-------
final_fit: :class:`pypeit.core.wavecal.wv_fitting.WaveFit`
"""
#TODO JFH add error checking here to ensure that IDs and ifit have the same size!
if weights is None:
weights = np.ones(tcent.size)
nspec = spec.size
xnspecmin1 = float(nspec-1)
# Setup for fitting
sv_ifit = list(ifit) # Keep the originals
all_ids = -999.*np.ones(len(tcent))
all_idsion = np.array(['UNKNWN']*len(tcent))
all_ids[ifit] = IDs
# Fit
n_order = n_first
flg_continue = True
flg_penultimate = False
fmin, fmax = 0.0, 1.0
# Note the number of parameters is actually n_order and not n_order+1
while flg_continue:
if flg_penultimate:
flg_continue = False
# Fit with rejection
xfit, yfit, wfit = tcent[ifit], all_ids[ifit], weights[ifit]
maxiter = xfit.size - n_order - 2
#
if xfit.size == 0:
msgs.warn("All points rejected !!")
return None
# Fit
pypeitFit = fitting.robust_fit(xfit/xnspecmin1, yfit, n_order, function=func, maxiter=maxiter,
lower=sigrej_first, upper=sigrej_first, maxrej=1, sticky=True,
minx=fmin, maxx=fmax, weights=wfit)
# Junk fit?
if pypeitFit is None:
msgs.warn("Bad fit!!")
return None
rms_ang = pypeitFit.calc_fit_rms(apply_mask=True)
rms_pix = rms_ang/disp
if verbose:
msgs.info('n_order = {:d}'.format(n_order) + ': RMS = {:g}'.format(rms_pix))
# Reject but keep originals (until final fit)
ifit = list(ifit[pypeitFit.gpm == 1]) + sv_ifit
if not input_only:
# Find new points from the linelist (should we allow removal of the originals?)
twave = pypeitFit.eval(tcent/xnspecmin1)#, func, minx=fmin, maxx=fmax)
for ss, iwave in enumerate(twave):
mn = np.min(np.abs(iwave-llist['wave']))
if mn/disp | |
Expression(self, valid_name))
self._initialize_column(valid_name)
def _initialize_column(self, name):
self._save_assign_expression(name)
def _sparse_matrix(self, column):
column = _ensure_string_from_expression(column)
return self._sparse_matrices.get(column)
def add_columns(self, names, columns):
from scipy.sparse import csc_matrix, csr_matrix
if isinstance(columns, csr_matrix):
if len(names) != columns.shape[1]:
raise ValueError('number of columns ({}) does not match number of column names ({})'.format(columns.shape[1], len(names)))
for i, name in enumerate(names):
valid_name = vaex.utils.find_valid_name(name, used=self.get_column_names(hidden=True))
self.columns[valid_name] = ColumnSparse(columns, i)
self.column_names.append(valid_name)
self._sparse_matrices[valid_name] = columns
self._save_assign_expression(valid_name)
else:
raise ValueError('only scipy.sparse.csr_matrix is supported')
def _save_assign_expression(self, name, expression=None):
obj = getattr(self, name, None)
# it's ok to set it if it does not exist, or we overwrite an older expression
if obj is None or isinstance(obj, Expression):
if expression is None:
expression = name
if isinstance(expression, str):
expression = vaex.utils.valid_expression(self.get_column_names(hidden=True), expression)
expression = Expression(self, expression)
setattr(self, name, expression)
@_hidden
def add_column_healpix(self, name="healpix", longitude="ra", latitude="dec", degrees=True, healpix_order=12, nest=True):
"""Add a healpix (in memory) column based on a longitude and latitude
:param name: Name of column
:param longitude: longitude expression
:param latitude: latitude expression (astronomical convenction latitude=90 is north pole)
:param degrees: If lon/lat are in degrees (default) or radians.
:param healpix_order: healpix order, >= 0
:param nest: Nested healpix (default) or ring.
"""
import healpy as hp
if degrees:
scale = "*pi/180"
else:
scale = ""
# TODO: multithread this
phi = self.evaluate("(%s)%s" % (longitude, scale))
theta = self.evaluate("pi/2-(%s)%s" % (latitude, scale))
hp_index = hp.ang2pix(hp.order2nside(healpix_order), theta, phi, nest=nest)
self.add_column("healpix", hp_index)
@_hidden
def add_virtual_columns_matrix3d(self, x, y, z, xnew, ynew, znew, matrix, matrix_name='deprecated', matrix_is_expression=False, translation=[0, 0, 0], propagate_uncertainties=False):
"""
:param str x: name of x column
:param str y:
:param str z:
:param str xnew: name of transformed x column
:param str ynew:
:param str znew:
:param list[list] matrix: 2d array or list, with [row,column] order
:param str matrix_name:
:return:
"""
m = matrix
x, y, z = self._expr(x, y, z)
self[xnew] = m[0][0] * x + m[0][1] * y + m[0][2] * z + translation[0]
self[ynew] = m[1][0] * x + m[1][1] * y + m[1][2] * z + translation[1]
self[znew] = m[2][0] * x + m[2][1] * y + m[2][2] * z + translation[2]
if propagate_uncertainties:
self.propagate_uncertainties([self[xnew], self[ynew], self[znew]], [x, y, z])
# wrap these with an informative msg
# add_virtual_columns_eq2ecl = _requires('astro')
# add_virtual_columns_eq2gal = _requires('astro')
# add_virtual_columns_distance_from_parallax = _requires('astro')
# add_virtual_columns_cartesian_velocities_to_pmvr = _requires('astro')
# add_virtual_columns_proper_motion_eq2gal = _requires('astro')
# add_virtual_columns_lbrvr_proper_motion2vcartesian = _requires('astro')
# add_virtual_columns_equatorial_to_galactic_cartesian = _requires('astro')
# add_virtual_columns_celestial = _requires('astro')
# add_virtual_columns_proper_motion2vperpendicular = _requires('astro')
def _covariance_matrix_guess(self, columns, full=False, as_expression=False):
all_column_names = self.get_column_names()
columns = _ensure_strings_from_expressions(columns)
def _guess(x, y):
if x == y:
postfixes = ["_error", "_uncertainty", "e", "_e"]
prefixes = ["e", "e_"]
for postfix in postfixes:
if x + postfix in all_column_names:
return x + postfix
for prefix in prefixes:
if prefix + x in all_column_names:
return prefix + x
if full:
raise ValueError("No uncertainty found for %r" % x)
else:
postfixes = ["_cov", "_covariance"]
for postfix in postfixes:
if x + "_" + y + postfix in all_column_names:
return x + "_" + y + postfix
if y + "_" + x + postfix in all_column_names:
return y + "_" + x + postfix
postfixes = ["_correlation", "_corr"]
for postfix in postfixes:
if x + "_" + y + postfix in all_column_names:
return x + "_" + y + postfix + " * " + _guess(x, x) + " * " + _guess(y, y)
if y + "_" + x + postfix in all_column_names:
return y + "_" + x + postfix + " * " + _guess(y, y) + " * " + _guess(x, x)
if full:
raise ValueError("No covariance or correlation found for %r and %r" % (x, y))
return "0"
N = len(columns)
cov_matrix = [[""] * N for i in range(N)]
for i in range(N):
for j in range(N):
cov = _guess(columns[i], columns[j])
if i == j and cov:
cov += "**2" # square the diagnal
cov_matrix[i][j] = cov
if as_expression:
return [[self[k] for k in row] for row in cov_matrix]
else:
return cov_matrix
def _jacobian(self, expressions, variables):
expressions = _ensure_strings_from_expressions(expressions)
return [[self[expression].expand(stop=[var]).derivative(var) for var in variables] for expression in expressions]
def propagate_uncertainties(self, columns, depending_variables=None, cov_matrix='auto',
covariance_format="{}_{}_covariance",
uncertainty_format="{}_uncertainty"):
"""Propagates uncertainties (full covariance matrix) for a set of virtual columns.
Covariance matrix of the depending variables is guessed by finding columns prefixed by "e"
or `"e_"` or postfixed by "_error", "_uncertainty", "e" and `"_e"`.
Off diagonals (covariance or correlation) by postfixes with "_correlation" or "_corr" for
correlation or "_covariance" or "_cov" for covariances.
(Note that x_y_cov = x_e * y_e * x_y_correlation.)
Example
>>> df = vaex.from_scalars(x=1, y=2, e_x=0.1, e_y=0.2)
>>> df["u"] = df.x + df.y
>>> df["v"] = np.log10(df.x)
>>> df.propagate_uncertainties([df.u, df.v])
>>> df.u_uncertainty, df.v_uncertainty
:param columns: list of columns for which to calculate the covariance matrix.
:param depending_variables: If not given, it is found out automatically, otherwise a list of columns which have uncertainties.
:param cov_matrix: List of list with expressions giving the covariance matrix, in the same order as depending_variables. If 'full' or 'auto',
the covariance matrix for the depending_variables will be guessed, where 'full' gives an error if an entry was not found.
"""
names = _ensure_strings_from_expressions(columns)
virtual_columns = self._expr(*columns, always_list=True)
if depending_variables is None:
depending_variables = set()
for expression in virtual_columns:
depending_variables |= expression.expand().variables()
depending_variables = list(sorted(list(depending_variables)))
fs = [self[self.virtual_columns[name]] for name in names]
jacobian = self._jacobian(fs, depending_variables)
m = len(fs)
n = len(depending_variables)
# n x n matrix
cov_matrix = self._covariance_matrix_guess(depending_variables, full=cov_matrix == "full", as_expression=True)
# empty m x m matrix
cov_matrix_out = [[self['0'] for __ in range(m)] for __ in range(m)]
for i in range(m):
for j in range(m):
for k in range(n):
for l in range(n):
if jacobian[i][k].expression == '0' or jacobian[j][l].expression == '0' or cov_matrix[k][l].expression == '0':
pass
else:
cov_matrix_out[i][j] = cov_matrix_out[i][j] + jacobian[i][k] * cov_matrix[k][l] * jacobian[j][l]
for i in range(m):
for j in range(i + 1):
sigma = cov_matrix_out[i][j]
sigma = self._expr(vaex.expresso.simplify(_ensure_string_from_expression(sigma)))
if i != j:
self.add_virtual_column(covariance_format.format(names[i], names[j]), sigma)
else:
self.add_virtual_column(uncertainty_format.format(names[i]), np.sqrt(sigma))
@_hidden
def add_virtual_columns_cartesian_to_polar(self, x="x", y="y", radius_out="r_polar", azimuth_out="phi_polar",
propagate_uncertainties=False,
radians=False):
kwargs = dict(**locals())
del kwargs['self']
return self.geo.cartesian_to_polar(inplace=True, **kwargs)
@_hidden
def add_virtual_columns_cartesian_velocities_to_spherical(self, x="x", y="y", z="z", vx="vx", vy="vy", vz="vz", vr="vr", vlong="vlong", vlat="vlat", distance=None):
kwargs = dict(**locals())
del kwargs['self']
return self.geo.velocity_cartesian2spherical(inplace=True, **kwargs)
def _expr(self, *expressions, **kwargs):
always_list = kwargs.pop('always_list', False)
return self[str(expressions[0])] if len(expressions) == 1 and not always_list else [self[str(k)] for k in expressions]
def _selection_expression(self, expression):
return vaex.expression.Expression(self, str(expression), _selection=True)
@_hidden
def add_virtual_columns_cartesian_velocities_to_polar(self, x="x", y="y", vx="vx", radius_polar=None, vy="vy", vr_out="vr_polar", vazimuth_out="vphi_polar",
propagate_uncertainties=False,):
kwargs = dict(**locals())
del kwargs['self']
return self.geo.velocity_cartesian2polar(inplace=True, **kwargs)
@_hidden
def add_virtual_columns_polar_velocities_to_cartesian(self, x='x', y='y', azimuth=None, vr='vr_polar', vazimuth='vphi_polar', vx_out='vx', vy_out='vy', propagate_uncertainties=False):
kwargs = dict(**locals())
del kwargs['self']
return self.geo.velocity_polar2cartesian(inplace=True, **kwargs)
@_hidden
def add_virtual_columns_rotation(self, x, y, xnew, ynew, angle_degrees, propagate_uncertainties=False):
kwargs = dict(**locals())
del kwargs['self']
return self.geo.rotation_2d(inplace=True, **kwargs)
@docsubst
@_hidden
def add_virtual_columns_spherical_to_cartesian(self, alpha, delta, distance, xname="x", yname="y", zname="z",
propagate_uncertainties=False,
center=[0, 0, 0], radians=False):
kwargs = dict(**locals())
del kwargs['self']
return self.geo.spherical2cartesian(inplace=True, **kwargs)
@_hidden
def add_virtual_columns_cartesian_to_spherical(self, x="x", y="y", z="z", alpha="l", delta="b", distance="distance", radians=False, center=None, center_name="solar_position"):
kwargs = dict(**locals())
del kwargs['self']
return self.geo.cartesian2spherical(inplace=True, **kwargs)
@_hidden
def add_virtual_columns_aitoff(self, alpha, delta, x, y, radians=True):
kwargs = dict(**locals())
del kwargs['self']
return self.geo.project_aitoff(inplace=True, **kwargs)
@_hidden
def add_virtual_columns_projection_gnomic(self, alpha, delta, alpha0=0, delta0=0, x="x", y="y", radians=False, postfix=""):
kwargs = dict(**locals())
del kwargs['self']
return self.geo.project_gnomic(inplace=True, **kwargs)
def add_function(self, name, f, unique=False):
name = vaex.utils.find_valid_name(name, used=[] if not unique else self.functions.keys())
function = vaex.expression.Function(self, name, f)
self.functions[name] = function
return function
def add_virtual_column(self, name, expression, unique=False):
"""Add a virtual column to the DataFrame.
Example:
>>> df.add_virtual_column("r", "sqrt(x**2 + y**2 + z**2)")
>>> df.select("r < 10")
:param: str name: name of virtual column
:param: expression: expression for the column
:param str unique: if name is already used, make it unique by adding a postfix, e.g. _1, or _2
"""
if isinstance(expression, Expression):
if expression.df is not self:
expression = expression.copy(self)
column_position = len(self.column_names)
# if the current name is an existing column name....
if name in self.get_column_names(hidden=True):
column_position = self.column_names.index(name)
renamed = vaex.utils.find_valid_name('__' +name, used=self.get_column_names(hidden=True))
| |
not allowed.
"""
return pulumi.get(self, "topology_key")
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional['outputs.DatadogAgentSpecClusterChecksRunnerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector']:
"""
A label query over a set of resources, in this case pods.
"""
return pulumi.get(self, "label_selector")
@property
@pulumi.getter
def namespaces(self) -> Optional[Sequence[str]]:
"""
namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
return pulumi.get(self, "namespaces")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecClusterChecksRunnerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector(dict):
"""
A label query over a set of resources, in this case pods.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.DatadogAgentSpecClusterChecksRunnerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions']] = None,
match_labels: Optional[Mapping[str, str]] = None):
"""
A label query over a set of resources, in this case pods.
:param Sequence['DatadogAgentSpecClusterChecksRunnerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs'] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param Mapping[str, str] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.DatadogAgentSpecClusterChecksRunnerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions']]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[Mapping[str, str]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecClusterChecksRunnerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions(dict):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: key is the label key that the selector applies to.
:param str operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param Sequence[str] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecClusterChecksRunnerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecution(dict):
"""
Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
"""
def __init__(__self__, *,
topology_key: str,
label_selector: Optional['outputs.DatadogAgentSpecClusterChecksRunnerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector'] = None,
namespaces: Optional[Sequence[str]] = None):
"""
Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
:param str topology_key: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
:param 'DatadogAgentSpecClusterChecksRunnerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs' label_selector: A label query over a set of resources, in this case pods.
:param Sequence[str] namespaces: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
pulumi.set(__self__, "topology_key", topology_key)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
if namespaces is not None:
pulumi.set(__self__, "namespaces", namespaces)
@property
@pulumi.getter(name="topologyKey")
def topology_key(self) -> str:
"""
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
"""
return pulumi.get(self, "topology_key")
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional['outputs.DatadogAgentSpecClusterChecksRunnerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector']:
"""
A label query over a set of resources, in this case pods.
"""
return pulumi.get(self, "label_selector")
@property
@pulumi.getter
def namespaces(self) -> Optional[Sequence[str]]:
"""
namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
return pulumi.get(self, "namespaces")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecClusterChecksRunnerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector(dict):
"""
A label query over a set of resources, in this case pods.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.DatadogAgentSpecClusterChecksRunnerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions']] = None,
match_labels: Optional[Mapping[str, str]] = None):
"""
A label query over a set of resources, in this case pods.
:param Sequence['DatadogAgentSpecClusterChecksRunnerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs'] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param Mapping[str, str] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.DatadogAgentSpecClusterChecksRunnerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions']]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[Mapping[str, str]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecClusterChecksRunnerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions(dict):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: key is the label key that the selector applies to.
:param str operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param Sequence[str] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) | |
<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import math
from multiprocessing import Array, Value
from numbers import Number
import numpy as np
from scipy import linalg
from six import string_types
from sklearn.decomposition import PCA, IncrementalPCA
from sklearn.utils import (as_float_array, check_array, check_random_state,
gen_batches)
from sklearn.utils.extmath import (_incremental_mean_and_var, randomized_svd,
svd_flip)
from sklearn.utils.validation import check_is_fitted
from odin.ml.base import BaseEstimator, TransformerMixin
from odin.utils import Progbar, batching, ctext, flatten_list
from odin.utils.mpi import MPI
__all__ = [
"fast_pca",
"MiniBatchPCA",
"PPCA",
"SupervisedPPCA",
]
def fast_pca(*x,
n_components=None,
algo='pca',
y=None,
batch_size=1024,
return_model=False,
random_state=1234):
r""" A shortcut for many different PCA algorithms
Arguments:
x : {list, tuple}
list of matrices for transformation, the first matrix will
be used for training
n_components : {None, int}
number of PCA components
algo : {'pca', 'ipca', 'ppca', 'sppca', 'plda', 'rpca'}
different PCA algorithm:
'ipca' - IncrementalPCA,
'ppca' - Probabilistic PCA,
'sppca' - Supervised Probabilistic PCA,
'plda' - Probabilistic LDA,
'rpca' - randomized PCA using randomized SVD
'pca' - Normal PCA
y : {numpy.ndarray, None}
required for labels in case of `sppca`
batch_size : int (default: 1024)
batch size, only used for IncrementalPCA
return_model : bool (default: False)
if True, return the trained PCA model as the FIRST return
"""
try:
from cuml.decomposition import PCA as cuPCA
except ImportError:
cuPCA = None
batch_size = int(batch_size)
algo = str(algo).lower()
if algo not in ('pca', 'ipca', 'ppca', 'sppca', 'plda', 'rpca'):
raise ValueError("`algo` must be one of the following: 'pca', "
"'ppca', 'plda', 'sppca', or 'rpca'; but given: '%s'" %
algo)
if algo in ('sppca', 'plda') and y is None:
raise RuntimeError("`y` must be not None if `algo='sppca'`")
x = flatten_list(x, level=None)
# ====== check input ====== #
x_train = x[0]
x_test = x[1:]
input_shape = None
if x_train.ndim > 2: # only 2D for PCA
input_shape = (-1,) + x_train.shape[1:]
new_shape = (-1, np.prod(input_shape[1:]))
x_train = np.reshape(x_train, new_shape)
x_test = [np.reshape(x, new_shape) for x in x_test]
if n_components is not None: # no need to reshape back
input_shape = None
# ====== train PCA ====== #
if algo == 'sppca':
pca = SupervisedPPCA(n_components=n_components, random_state=random_state)
pca.fit(x_train, y)
elif algo == 'plda':
from odin.ml import PLDA
pca = PLDA(n_phi=n_components, random_state=random_state)
pca.fit(x_train, y)
elif algo == 'pca':
if x_train.shape[1] > 1000 and x_train.shape[0] > 1e5 and cuPCA is not None:
pca = cuPCA(n_components=n_components, random_state=random_state)
else:
pca = PCA(n_components=n_components, random_state=random_state)
pca.fit(x_train)
elif algo == 'rpca':
# we copy the implementation of RandomizedPCA because
# it is significantly faster than PCA(svd_solver='randomize')
pca = RandomizedPCA(n_components=n_components,
iterated_power=2,
random_state=random_state)
pca.fit(x_train)
elif algo == 'ipca':
pca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
prog = Progbar(target=x_train.shape[0],
print_report=False,
print_summary=False,
name="Fitting PCA")
for start, end in batching(batch_size=batch_size,
n=x_train.shape[0],
seed=1234):
pca.partial_fit(x_train[start:end], check_input=False)
prog.add(end - start)
elif algo == 'ppca':
pca = PPCA(n_components=n_components, random_state=random_state)
pca.fit(x_train)
# ====== transform ====== #
x_train = pca.transform(x_train)
x_test = [pca.transform(x) for x in x_test]
# reshape back to original shape if necessary
if input_shape is not None:
x_train = np.reshape(x_train, input_shape)
x_test = [np.reshape(x, input_shape) for x in x_test]
# return the results
if len(x_test) == 0:
return x_train if not return_model else (pca, x_train)
return tuple([x_train] +
x_test) if not return_model else tuple([pca, x_train] + x_test)
# ===========================================================================
# PPCA
# ===========================================================================
class PPCA(BaseEstimator, TransformerMixin):
""" Probabilistic Principal Components Analysis
(C) Copyright University of Eastern Finland (UEF).
<NAME>, <EMAIL>,
<NAME>, <EMAIL>.
Parameters
----------
n_components : {int, None}
if None, keep the same dimensions as input features
bias : {vector, 'auto'} [feat_dim,]
if 'auto' take mean of training data
n_iter : {integer, 'auto'}
if 'auto', keep iterating until no more improvement (i.e. reduction in `sigma` value)
compared to the `improve_threshold`
improve_threshold : scalar
Only used in case `n_iter='auto'`
solver : {'traditional', 'simple'}
verbose: {0, 1}
showing logging information during fitting
random_state : {None, integer, numpy.random.RandomState}
Attributes
----------
V_ : [feat_dim, n_components]
total variability matrix
bias_ : [feat_dim]
bias vector
sigma_ : scalar
variance of error term
References
----------
[1] <NAME> and <NAME>, "Supervector Compression
Strategies to Speed up i-vector System Development",
submitted to Speaker Odyssey 2018.
"""
def __init__(self,
n_components=None,
bias='auto',
n_iter='auto',
improve_threshold=1e-3,
solver='traditional',
verbose=0,
random_state=None):
super(PPCA, self).__init__()
if isinstance(n_components, Number):
assert n_components > 0, \
"`n_components` must be greater than 0, but given: %d" % n_components
n_components = int(n_components)
elif n_components is not None:
raise ValueError("`n_components` can be None or integer")
self.n_components_ = n_components
# ====== checking bias ====== #
if isinstance(bias, string_types):
bias = bias.strip().lower()
assert bias == 'auto', 'Invalid value for `bias`: %s' % bias
elif not isinstance(bias, (np.ndarray, Number)):
raise ValueError("`bias` can be 'auto', numpy.ndarray or a number")
self.bias_ = bias
# ====== checking solver ====== #
if solver not in ('traditional', 'simple'):
raise ValueError("`solver` must be: 'traditional', or 'simple'")
self.solver_ = solver
# ====== checking n_iter ====== #
if isinstance(n_iter, string_types):
n_iter = n_iter.lower()
assert n_iter == 'auto', 'Invalid `n_iter` value: %s' % n_iter
elif isinstance(n_iter, Number):
assert n_iter > 0, "`n_iter` must greater than 0, but given: %d" % n_iter
self.n_iter_ = n_iter
# ====== checking random_state ====== #
if random_state is None:
rand = np.random.RandomState(seed=None)
elif isinstance(random_state, Number):
rand = np.random.RandomState(seed=None)
elif isinstance(random_state, np.random.RandomState):
rand = random_state
else:
raise ValueError("No suppport for `random_state` value: %s" %
str(random_state))
self.random_state_ = rand
# ====== other dimension ====== #
self.improve_threshold_ = float(improve_threshold)
self.feat_dim_ = None
self.verbose_ = int(verbose)
def fit(self, X, y=None):
# ====== initialize ====== #
num_samples, feat_dim = X.shape
n_components = feat_dim if self.n_components_ is None else self.n_components_
if self.bias_ == 'auto':
bias = np.mean(X, 0)
elif isinstance(self.bias_, Number):
bias = np.full(shape=(feat_dim,), fill_value=self.bias_)
else:
bias = self.bias_
assert bias.shape == (feat_dim,), \
"Invialid `bias` given shape: %s, require shape: %s" % (str(bias.shape), str((feat_dim,)))
# ====== initialize parameters ====== #
V = self.random_state_.rand(feat_dim, n_components)
last_sigma = None
sigma = 1
centeredM = X - bias[np.newaxis, :]
varianceM = np.sum(centeredM**2) / (num_samples * feat_dim)
# ====== training ====== #
if self.verbose_:
print(
'[PPCA]n_components: %d n_sample: %d feat_dim: %d n_iter: %d threshold: %f solver: %s'
% (n_components, num_samples, feat_dim, -1 if self.n_iter_ == 'auto'
else self.n_iter_, self.improve_threshold_, self.solver_))
curr_n_iter = 0
while True:
B = (V * 1 / sigma).T # [feat_dim, n_components]
Sigma = np.linalg.inv(np.eye(n_components) +
np.dot(B, V)) # [n_components, n_components]
my = np.dot(np.dot(Sigma, B), centeredM.T) # [n_components, num_samples]
if self.solver_ == 'traditional':
sumEmm = num_samples * Sigma + np.dot(my, my.T)
elif self.solver_ == 'simple':
sumEmm = np.dot(my, my.T)
sumEmmInv = np.linalg.inv(sumEmm) # [n_components, n_components]
# updating V and sigma for next iteration
V = np.dot(np.dot(centeredM.T, my.T),
sumEmmInv) # [feat_dim, n_components]
last_sigma = sigma
sigma = varianceM - np.sum(
sumEmm * np.dot(V.T, V)) / (feat_dim * num_samples)
improvement = last_sigma - sigma
# log
if self.verbose_ > 0:
print("Iteration: %d sigma: %.3f improvement: %.3f" %
(curr_n_iter, sigma, improvement))
# check iteration escape
curr_n_iter += 1
if isinstance(self.n_iter_, Number):
if curr_n_iter >= self.n_iter_:
break
elif curr_n_iter > 1 and improvement < self.improve_threshold_:
break
# ====== save the model ====== #
# record new dimensions
self.feat_dim_ = feat_dim
self.n_components_ = n_components
# trained vectors and matrices
self.V_ = V
self.bias_ = bias
self.sigma_ = sigma
# pre-calculate matrix for transform
B = (V * 1 / sigma).T
Sigma = np.linalg.inv(np.eye(n_components) + np.dot(B, V))
self.extractorMatrix_ = np.dot(Sigma, B) # [n_components, feat_dim]
def transform(self, X):
"""
Parameters
----------
X : matrix [num_samples, feat_dim]
"""
assert hasattr(self, 'extractorMatrix_'), "The model hasn't `fit` on data"
assert X.shape[1] == self.feat_dim_, \
"Expect input matrix with shape: [?, %d], but give: %s" % (self.feat_dim_, str(X.shape))
ivec = np.dot(self.extractorMatrix_, (X - self.bias_[np.newaxis, :]).T)
return ivec.T
class SupervisedPPCA(PPCA):
""" Supervised Probabilistic Principal Components Analysis
(C) Copyright University of Eastern Finland (UEF).
<NAME>, <EMAIL>,
<NAME>, <EMAIL>.
Parameters
----------
n_components : {int, None}
if None, keep the same dimensions as input features
bias : {vector, 'auto'} [feat_dim,]
if 'auto' take mean of training data
beta : scalar (default: 1)
a weight parameter (use beta = 1 as default)
n_iter : {integer, 'auto'}
if 'auto', keep iterating until no more improvement (i.e. reduction in `sigma` value)
compared to the `improve_threshold`
improve_threshold : scalar
Only used | |
"""
<abs> ::= D_i^t | NonD
NonD ::= S_o^ [<NonD>*]
| Top
| Bot
"""
from __future__ import annotations
from typing import (
Union,
Callable,
Optional,
Iterable,
Sequence,
TypeVar,
Type,
NamedTuple,
TYPE_CHECKING,
cast,
)
from collections import defaultdict, OrderedDict
from contextlib import contextmanager
from functools import total_ordering
import types
import dataclasses
import pyrsistent
import builtins
from .intrinsics import *
NoneType = type(None)
# awesome pycharm :(
FunctionType: Type[types.FunctionType] = cast(
Type[types.FunctionType], types.FunctionType
)
# set False when not made for Python
PYTHON = True
__all__ = [
"Values",
"AbsVal",
"D",
"S",
"Top",
"Bot",
"Judge",
"JITSpecInfo",
"CallSpec",
"CallRecord",
"PreSpecMaps",
"SpecMaps",
"RecTraces",
"In_Def",
"In_Move",
"In_Goto",
"In_SetLineno",
"In_Bind",
"In_Stmt",
"In_Blocks",
"In_Cond",
"In_Return",
"Out_Def",
"Out_Label",
"Out_Call",
"Out_Return",
"Out_Goto",
"Out_TypeCase",
"Out_If",
"Out_Assign",
"Out_SetLineno",
"Out_DecRef",
"Out_Instr",
"print_out",
"print_in",
"from_runtime",
"AWARED_IMMUTABLES",
"ShapeSystem",
"Shape",
"FunctionType", # TODO: put it elsewhere
]
class Out_Callable:
def __call__(self, *args: AbsVal):
# noinspection PyTypeChecker
return Out_Call(self, args)
class AbsVal:
if TYPE_CHECKING:
@property
def type(self) -> NonD:
raise NotImplementedError
def is_literal(self) -> bool:
raise NotImplementedError
def is_s(self):
return False
@total_ordering
class D(Out_Callable, AbsVal):
"""
dynamic abstract value
"""
i: int
type: NonD
def __init__(self, i: int, type: NonD):
self.i = i
self.type = type
def __repr__(self):
if self.type is Top:
return f"D{self.i}"
return f"D{self.i} : {self.type}"
def is_literal(self):
return False
def __hash__(self):
return 114514 ^ hash(self.i) ^ hash(self.type)
def __eq__(self, other):
return (
isinstance(other, D)
and self.i == other.i
and self.type == other.type
)
def __lt__(self, other):
# noinspection PyTypeHints
if not isinstance(other, AbsVal):
return False
if other is Top or other is Bot:
return True
if isinstance(other, S):
return True
if self.i == other.i:
return self.type < other.type
return self.i < other.i
@total_ordering
class S(Out_Callable, AbsVal):
"""
type abstract value
"""
base: object
params: Optional[tuple[NonD, ...]]
def __init__(
self, base: object, params: Optional[tuple[NonD, ...]] = None
):
self.base = base
self.params = params
def __hash__(self):
return 1919810 ^ hash(self.base) ^ hash(self.params)
def __eq__(self, other):
return (
isinstance(other, S)
and self.base == other.base
and self.params == other.params
)
def __lt__(self, other):
# noinspection PyTypeHints
if not isinstance(other, AbsVal):
return False
if other is Top or other is Bot:
return True
if isinstance(other, D):
return False
if self.base == other.base:
return self.params < other.params
return hash(self.base) < hash(other.base)
def is_s(self):
return True
def oop(self):
return (shape := self.shape) and shape.oop
@property
def type(self):
base = self.base
t = type(base)
if abs_t := _literal_type_maps.get(t):
return abs_t
elif t is tuple:
return tuple_type(base)
a_t = from_runtime(t)
assert not isinstance(a_t, D)
return a_t
@property
def shape(self) -> Optional[Shape]:
if type(self.base) in _literal_type_maps:
return
return ShapeSystem.get(self.base)
def is_literal(self):
return type(self.base) in _literal_type_maps
def __repr__(self):
if isinstance(self.base, type):
n = self.base.__name__
elif isinstance(self.base, FunctionType):
n = self.base.__name__
elif isinstance(self.base, types.BuiltinFunctionType):
n = f"{self.base.__module__}.{self.base.__name__}"
else:
n = repr(self.base)
if self.params is None:
return n
return f"{n}<{', '.join(map(repr, self.params))}>"
class Values:
A_Int = S(int)
A_Float = S(float)
A_Str = S(str)
A_NoneType = S(NoneType)
A_FuncType = S(FunctionType)
A_MethType = S(types.MethodType)
A_Complex = S(complex)
A_Bool = S(bool)
A_Intrinsic = S(Intrinsic)
A_Type = S(type)
A_NotImplemented = S(NotImplemented)
A_NotImplementedType = S(type(NotImplemented))
A_Int = Values.A_Int
A_Float = Values.A_Float
A_Str = Values.A_Str
A_NoneType = Values.A_NoneType
A_FuncType = Values.A_FuncType
A_Bool = Values.A_Bool
A_Intrinsic = Values.A_Intrinsic
A_Complex = Values.A_Complex
_literal_type_maps = {
int: A_Int,
float: A_Float,
complex: A_Complex,
str: A_Str,
bool: A_Bool,
NoneType: A_NoneType,
tuple: None,
}
_T = TypeVar("_T")
class _Top(Out_Callable, AbsVal):
def is_literal(self) -> bool:
return False
@property
def type(self):
raise TypeError
def __repr__(self):
return "Top"
class _Bot(Out_Callable, AbsVal):
def is_literal(self) -> bool:
return False
@property
def type(self):
raise TypeError
def __repr__(self):
return "Bot"
Top = _Top()
Bot = _Bot()
NonD = Union[S, _Top, _Bot]
if TYPE_CHECKING:
AbsVal = Union[D, NonD]
undef = object()
@dataclasses.dataclass
class Shape:
name: object
oop: bool
fields: dict[str, Union[S, types.FunctionType]]
# some type has unique instance
# None.__class__ has None only
instance: Union[
None, S, Callable[[tuple[NonD, ...]], Optional[S]]
] = dataclasses.field(default=None)
self_bound: bool = dataclasses.field(default=False)
# None: means No Shape
ShapeSystem: dict[object, Optional[Shape]] = {}
AWARED_IMMUTABLES = {*_literal_type_maps, type, Intrinsic, FunctionType}
def from_runtime(o: object, rt_map: list[object] = None):
if hash(o):
return S(o)
t = type(o)
if t is tuple:
type_abs = tuple_type(o)
else:
type_abs = from_runtime(t)
rt_map = rt_map or []
i = len(rt_map)
abs_val = D(i, type_abs)
rt_map.append(abs_val)
return abs_val
def tuple_type(xs):
return S(tuple, tuple(from_runtime(x) for x in xs))
@dataclasses.dataclass(frozen=True)
class In_Move:
target: D
source: AbsVal
def __repr__(self):
return f"{self.target} = {self.source!r}"
@dataclasses.dataclass(frozen=True)
class In_Bind:
target: D
sub: AbsVal
attr: AbsVal
args: tuple[AbsVal, ...]
def __repr__(self):
args = [repr(x) for x in self.args]
return f"{self.target} = {self.sub!r}.{self.attr}({','.join(args)})"
@dataclasses.dataclass(frozen=True)
class In_Goto:
label: str
def __repr__(self):
return f"goto {self.label}"
@dataclasses.dataclass(frozen=True)
class In_SetLineno:
line: int
filename: str
def __repr__(self):
return f"# line {self.line} at {self.filename}"
@dataclasses.dataclass(frozen=True)
class In_Cond:
test: AbsVal
then: str
otherwise: str
def __repr__(self):
return (
f"if {self.test!r} then {self.then} else {self.otherwise}"
)
@dataclasses.dataclass(frozen=True)
class In_Return:
value: AbsVal
def __repr__(self):
return f"return {self.value!r}"
In_Stmt = Union[
In_Cond, In_SetLineno, In_Goto, In_Move, In_Return, In_Bind
]
In_Blocks = "dict[str, list[In_Stmt]]"
def print_in(b: In_Blocks, print=print):
for label, xs in sorted(b.items(), key=lambda x: x[0] != "entry"):
print(label, ":")
for each in xs:
print(each)
@dataclasses.dataclass
class In_Def:
narg: int
blocks: In_Blocks
_func: FunctionType
static_glob: set[str]
UserCodeDyn = {} # type: dict[FunctionType, In_Def]
def show(self):
args = [f"D[{i}]" for i in range(self.narg)]
print(f'def {self.name}({",".join(args)})', "{")
print_in(self.blocks, print=lambda *x: print("", *x))
print("}")
@property
def func(self) -> FunctionType:
"""this is for hacking PyCharm's type checker"""
# noinspection PyTypeChecker
return self._func
@property
def name(self) -> str:
return self.func.__name__
@property
def glob(self) -> dict:
# noinspection PyUnresolvedReferences
return self.func.__globals__
@dataclasses.dataclass(unsafe_hash=True)
class Out_Call(Out_Callable):
func: AbsVal
args: tuple[AbsVal, ...]
def __repr__(self):
return f"{self.func!r}{self.args!r}"
@dataclasses.dataclass(frozen=True)
class Out_Assign:
target: D
expr: Out_Call
decrefs: tuple[int, ...]
def show(self, prefix, print):
decrefs = ",".join(f"D{i}" for i in self.decrefs)
print(f"{prefix}{self.target} = {self.expr!r}")
print(f"{prefix}when err: decref [{decrefs}] ")
@dataclasses.dataclass(frozen=True)
class Out_If:
test: AbsVal
t: str
f: str
def show(self, prefix, print):
print(f"{prefix}if {self.test!r}")
print(f"{prefix}then goto {self.t}")
print(f"{prefix}else goto {self.f}")
@dataclasses.dataclass(frozen=True)
class Out_TypeCase:
obj: AbsVal
cases: pyrsistent.PMap[AbsVal, tuple[Out_Instr, ...]]
def show(self, prefix, print):
print(f"{prefix}case typeof {self.obj!r}")
for t, xs in self.cases.items():
print(f"{prefix} {t!r} ->")
print_out(xs, prefix + " ", print)
@dataclasses.dataclass(frozen=True)
class Out_Label:
label: str
def show(self, prefix, print):
print(f"label {self.label}:")
@dataclasses.dataclass(frozen=True)
class Out_Goto:
label: str
def show(self, prefix, print):
print(f"{prefix}goto {self.label}")
@dataclasses.dataclass(frozen=True)
class Out_Return:
value: AbsVal
decrefs: tuple[int, ...]
def show(self, prefix, print):
decrefs = ",".join(f"D{i}" for i in self.decrefs)
print(f"{prefix}return {self.value!r}")
print(f"{prefix}and decref [{decrefs}]")
@dataclasses.dataclass(frozen=True)
class Out_DecRef:
i: int
def show(self, prefix, print):
print(f"{prefix}decref D{self.i}")
@dataclasses.dataclass(frozen=True)
class Out_SetLineno:
line: int
filename: str
def show(self, prefix, print):
print(f"{prefix}# line {self.line} at {self.filename}")
Out_Instr = Union[
Out_DecRef,
Out_Label,
Out_TypeCase,
Out_If,
Out_Assign,
Out_Return,
Out_Goto,
]
CallRecord = "tuple[FunctionType, tuple[AbsVal, ...]]"
def print_out(xs: Iterable[Out_Instr], prefix, print):
for each in xs:
each.show(prefix, print)
@dataclasses.dataclass
class Out_Def:
spec: JITSpecInfo
params: tuple[AbsVal, ...]
instrs: tuple[Out_Instr, ...]
start: str
func: FunctionType
GenerateCache = OrderedDict() # type: dict[Intrinsic, Out_Def]
@property
def name(self) -> str:
return self.func.__name__
def show(self, print=print):
ret_types = self.spec.possibly_return_types
name = self.spec.abs_jit_func
instance = self.spec.instance
print(
"|".join(map(repr, ret_types)),
f"{name!r}(",
", ".join(map(repr, self.params)),
")",
f"-> {instance} {{" if instance else "{",
)
# print(f" START from {self.start}")
for i in self.instrs:
i.show(" ", print)
print("}")
@dataclasses.dataclass
class JITSpecInfo:
instance: Optional[AbsVal] # maybe return a constant instance
abs_jit_func: AbsVal
possibly_return_types: tuple[AbsVal, ...]
class CallSpec:
instance: Optional[AbsVal] # maybe return a constant instance
e_call: Union[Out_Call, AbsVal]
possibly_return_types: tuple[AbsVal, ...]
def __init__(
self,
instance: Optional[AbsVal],
e_call: Union[Out_Call, AbsVal],
possibly_return_types: Iterable[AbsVal, ...],
):
self.instance = instance
self.e_call = e_call
if not isinstance(possibly_return_types, tuple):
possibly_return_types = tuple(possibly_return_types)
self.possibly_return_types = possibly_return_types
def __eq__(self, other):
return (
isinstance(other, CallSpec)
and self.instance == other.instance
and self.e_call == other.e_call
and self.possibly_return_types
== other.possibly_return_types
)
def astuple(self):
return self.instance, self.e_call, self.possibly_return_types
# user function calls recorded here cannot be reanalyzed next time
RecTraces: set[tuple[str, tuple[AbsVal, ...]]] = set()
# specialisations map
# return types not inferred but compiled function name, and partially inferenced types
PreSpecMaps: dict[CallRecord, tuple[str, set[AbsVal, ...]]] = {}
# cache return types and function address
SpecMaps: dict[CallRecord, JITSpecInfo] = {}
def mk_prespec_name(
key: CallRecord, partial_returns: set[AbsVal], name=""
):
v = PreSpecMaps.get(key)
if v is None:
i = len(PreSpecMaps)
n = f"J_{name.replace('_', '__')}_{i}"
PreSpecMaps[key] = n, partial_returns
return n
return v[0]
class MemSlot(NamedTuple):
# reference count
rc: int
# is locally allocated
ila: bool
def __repr__(self):
x = f"[{self.rc}]"
if self.ila:
x = f"!{x}"
return x
@dataclasses.dataclass(frozen=True, eq=True, order=True)
class Local:
mem: pyrsistent.PVector[MemSlot]
store: pyrsistent.PMap[int, AbsVal]
def up_mem(self, mem):
return Local(mem, self.store)
def up_store(self, store):
return Local(self.mem, store)
def alloc(local: Local):
i = -1
for i, each in enumerate(local.mem):
if each.rc == 0:
return i
i += | |
<reponame>PhanterJR/www_sme
import phanterpwa.frontend.helpers as helpers
import phanterpwa.frontend.components.widgets as widgets
import phanterpwa.frontend.forms as forms
import phanterpwa.frontend.components.modal as modal
import phanterpwa.frontend.preloaders as preloaders
from org.transcrypt.stubs.browser import __pragma__
__pragma__('alias', "jQuery", "$")
__pragma__('skip')
jQuery = sessionStorage = JSON = M = js_undefined = Date = window =\
this = __new__ = FormData = console = localStorage = QRCode = 0
__pragma__('noskip')
CONCATENATE = helpers.CONCATENATE
OPTION = helpers.XmlConstructor.tagger("option")
SELECT = helpers.XmlConstructor.tagger("select")
DIV = helpers.XmlConstructor.tagger("div")
I = helpers.XmlConstructor.tagger("i")
A = helpers.XmlConstructor.tagger("a")
SPAN = helpers.XmlConstructor.tagger("span")
FORM = helpers.XmlConstructor.tagger("form")
UL = helpers.XmlConstructor.tagger("ul")
H3 = helpers.XmlConstructor.tagger("h3")
P = helpers.XmlConstructor.tagger("p")
LI = helpers.XmlConstructor.tagger("li")
STRONG = helpers.XmlConstructor.tagger("strong")
IMG = helpers.XmlConstructor.tagger("img", True)
INPUT = helpers.XmlConstructor.tagger("input", True)
I18N = helpers.I18N
XTABLE = widgets.Table
XML = helpers.XML
XTRD = widgets.TableData
XTRH = widgets.TableHead
XFOOTER = widgets.TableFooterPagination
LABEL = helpers.XmlConstructor.tagger("label")
XSECTION = helpers.XSECTION
TD = helpers.XmlConstructor.tagger("td")
TR = helpers.XmlConstructor.tagger("tr")
BR = helpers.XmlConstructor.tagger("br", True)
__pragma__('kwargs')
class Visualizar(helpers.XmlConstructor):
def __init__(
self,
id_matricula,
id_escola,
ano_letivo,
prof_pai,
prof_mae,
id_aluno,
nome_do_aluno,
sexo,
data_de_nascimento,
nome_do_pai,
nome_da_mae,
naturalidade,
endereco,
serie,
nome_do_responsavel,
data_mat,
novato,
admitido,
unidade_admitido,
ano_anterior,
serie_ant,
resultado_final,
resultado_anterior,
numero_aluno,
turma,
):
logo = "{0}/api/escolas/{1}/image".format(
window.PhanterPWA.ApiServer.remote_address,
id_escola
)
if data_de_nascimento is not None:
ano, mes, dia = data_de_nascimento.split("-")
data_de_nascimento = "{0}/{1}/{2}".format(dia, mes, ano)
else:
data_de_nascimento = STRONG("Não definido!", _style="color: red;")
if nome_do_responsavel is None:
nome_do_responsavel = STRONG("Não definido!", _style="color: red;")
if data_mat is not None:
ano, mes, dia = data_mat.split("-")
data_mat = "{0}/{1}/{2}".format(dia, mes, ano)
else:
data_mat = STRONG("Não definido!", _style="color: red;")
if sexo == "1" or sexo == 1:
l_o_aluno = "O aluno"
label_nome_do_aluno = "Nome do aluno: "
l_dados_do_aluno = "Dados do Aluno"
novato = "novato"
admitido = "admitido"
elif sexo == "2" or sexo == 2:
l_o_aluno = "A aluna"
label_nome_do_aluno = "Nome da aluna: "
l_dados_do_aluno = "Dados da Aluna"
novato = "novata"
admitido = "admitida"
else:
l_o_aluno = "O(A) aluno(a)"
label_nome_do_aluno = "Nome do(a) aluno(a): "
l_dados_do_aluno = "Dados do(a) Aluno(a)"
novato = "novato(a)"
admitido = "admitido(a)"
xml_endereco = TR(
TD(
DIV(
STRONG("Endereço: ", _class="rotulo"),
SPAN(STRONG("Não definido!", _style="color: red;"), _class="dado"),
_class="label_e_campo"
),
_colspan=2,
_class="label_e_campo_wrapper"
)
)
if endereco is not None and endereco != "":
xml_endereco = TR(
TD(
DIV(
STRONG("Endereço: ", _class="rotulo"),
SPAN(endereco, _class="dado"),
_class="label_e_campo"
),
_colspan=2,
_class="label_e_campo_wrapper"
)
)
xml_naturalidade = TD(
DIV(
STRONG("Naturalidade: ", _class="rotulo"),
SPAN(STRONG("Não definido!", _style="color: red;"), _class="dado"),
_class="label_e_campo"
),
_class="label_e_campo_wrapper"
)
xml_turma_atual = TR(
TD(
DIV(
STRONG(l_o_aluno, " não está em uma turma ainda!", _style="color: red;"),
_class="label_e_campo"
),
_colspan=3,
_class="label_e_campo_wrapper"
)
)
if turma is not None:
xml_resultado_final = ""
xml_numero_aluno = ""
colspan_turma = 3
if resultado_final is not None:
colspan_turma -= 1
xml_resultado_final = TD(
DIV(
STRONG("Resultado Final: "), resultado_final,
_class="label_e_campo"
),
_colspan=1,
_class="label_e_campo_wrapper"
)
if str(numero_aluno).isdigit():
colspan_turma -= 1
xml_numero_aluno = TD(
DIV(
STRONG("Número: "), numero_aluno,
_class="label_e_campo"
),
_colspan=1,
_class="label_e_campo_wrapper"
)
xml_turma_atual = TR(
TD(
DIV(
STRONG("Turma : "), turma,
_class="label_e_campo"
),
_colspan=colspan_turma,
_class="label_e_campo_wrapper"
),
xml_numero_aluno,
xml_resultado_final
)
xml_ano_anterior = ""
if serie_ant is not None:
if ano_anterior is None:
ano_anterior = STRONG("Não definido!", _style="color: red;")
if resultado_anterior is None:
resultado_anterior = STRONG("Não definido!", _style="color: red;")
xml_ano_anterior = XSECTION(
LABEL("Sobre o resultado anterior"),
DIV(
XTABLE(
"tabela_turma_aluno_{0}".format(id_matricula),
TR(
TD(
DIV(
STRONG("Ano Letivo: ", _class="rotulo"),
SPAN(ano_anterior, _class="dado"),
_class="label_e_campo"
),
_class="label_e_campo_wrapper"
),
TD(
DIV(
STRONG("Serie anterior: ", _class="rotulo"),
SPAN(serie_ant, _class="dado"),
_class="label_e_campo"
),
_class="label_e_campo_wrapper"
),
TD(
DIV(
STRONG("Resultado Final: ", _class="rotulo"),
SPAN(resultado_anterior, _class="dado"),
_class="label_e_campo"
),
_class="label_e_campo_wrapper"
),
_class="phanterpwa-widget-table-data phanterpwa-widget"
),
),
_class="modal_dados_dos_aluno_matricula e-padding_20"
)
)
if naturalidade is not None:
xml_naturalidade = TD(
DIV(
STRONG("Naturalidade: ", _class="rotulo"),
SPAN(naturalidade, _class="dado"),
_class="label_e_campo"
),
_class="label_e_campo_wrapper"
)
xml_nome_do_pai = ""
if nome_do_pai is not None and nome_do_pai != "":
xml_nome_do_pai = TR(
TD(
DIV(
STRONG("Nome do Pai: ", _class="rotulo"),
SPAN(nome_do_pai, _class="dado"),
_class="label_e_campo"
),
_class="label_e_campo_wrapper",
_colspan=2
)
)
if prof_pai is not None:
xml_nome_do_pai = TR(
TD(
DIV(
STRONG("Nome do Pai: ", _class="rotulo"),
SPAN(nome_do_pai, _class="dado"),
_class="label_e_campo"
),
_class="label_e_campo_wrapper"
),
TD(
DIV(
STRONG("Profissão: ", _class="rotulo"),
SPAN(prof_pai, _class="dado"),
_class="label_e_campo"
),
_class="label_e_campo_wrapper"
)
)
xml_nome_da_mae = TR(
TD(
DIV(
STRONG("Nome da Mãe: ", _class="rotulo"),
SPAN(STRONG("Não definido!", _style="color: red;"), _class="dado"),
_class="label_e_campo"
),
_class="label_e_campo_wrapper",
_colspan=2
),
_class="p-row"
)
if nome_da_mae is not None and nome_da_mae != "":
xml_nome_da_mae = TR(
TD(
DIV(
STRONG("Nome <NAME>: ", _class="rotulo"),
SPAN(nome_da_mae, _class="dado"),
_class="label_e_campo"
),
_class="label_e_campo_wrapper",
_colspan=2
)
)
if prof_mae is not None:
xml_nome_da_mae = TR(
TD(
DIV(
STRONG("Nome <NAME>: ", _class="rotulo"),
SPAN(nome_da_mae, _class="dado"),
_class="label_e_campo"
),
_class="label_e_campo_wrapper"
),
TD(
DIV(
STRONG("Profissão: ", _class="rotulo"),
SPAN(prof_mae, _class="dado"),
_class="label_e_campo"
),
_class="label_e_campo_wrapper"
)
)
xml_novato_admitido = ""
admi_no = [
"Unidade I Completada",
"Unidade II Completada",
"Unidade III Completada",
"Unidade IV Completada"
]
admi_com = "Início do Ano"
if admitido is True and novato is True:
xml_admi = SPAN(", porém ", STRONG("não foi possível determinar quando.", _style="red"))
if unidade_admitido == admi_com:
xml_admi = SPAN(" no ", STRONG("início do ano"), ".")
elif unidade_admitido in admi_no:
xml_admi = SPAN(" com a ", STRONG(unidade_admitido, _style="text-transform: lowercase;"), ".")
xml_novato_admitido = TR(
TD(
DIV(
l_o_aluno, " é ", STRONG(novato), " e foi ", admitido, xml_admi,
_class="label_e_campo"
),
_class="label_e_campo_wrapper",
_style="text-align: center; color: orange;",
_colspan=3
)
)
elif novato is True:
xml_novato_admitido = TR(
TD(
DIV(
l_o_aluno, " é ", STRONG(novato), ".",
_class="label_e_campo"
),
_class="label_e_campo_wrapper",
_style="text-align: center; color: orange;",
_colspan=3
)
)
elif admitido is True:
xml_admi = SPAN(", porém ", STRONG("não foi possível determinar quando.", _style="red"))
if unidade_admitido == admi_com:
xml_admi = SPAN(" no ", STRONG("início do ano"), ".")
elif unidade_admitido in admi_no:
xml_admi = SPAN(" com a ", STRONG(unidade_admitido, _style="text-transform: lowercase;"), ".")
xml_novato_admitido = TR(
TD(
DIV(
l_o_aluno, " foi ", admitido, xml_admi,
_class="label_e_campo"
),
_class="label_e_campo_wrapper",
_style="text-align: center; color: orange;",
_colspan=3
)
)
card = DIV(
DIV(
DIV(
DIV(
DIV(
DIV(
IMG(_src=logo),
_class="escolas-container-info-image",
_style="text-align: center;"
),
_class="p-col w1p100 w4p30"
),
DIV(
XSECTION(
LABEL(l_dados_do_aluno),
DIV(
XTABLE(
"tabela_dados_aluno_{0}".format(id_matricula),
TR(
TD(
DIV(
STRONG(label_nome_do_aluno, _class="rotulo"),
SPAN(nome_do_aluno, _class="dado"),
_class="label_e_campo"
),
_colspan="2",
_class="label_e_campo_wrapper"
),
_class="phanterpwa-widget-table-data phanterpwa-widget"
),
TR(
TD(
DIV(
STRONG("Data de nascimento: ", _class="rotulo"),
SPAN(data_de_nascimento, _class="dado"),
_class="label_e_campo"
),
_class="label_e_campo_wrapper"
),
xml_naturalidade,
_class="phanterpwa-widget-table-data phanterpwa-widget"
),
xml_nome_do_pai,
xml_nome_da_mae,
xml_endereco,
),
_class="modal_dados_dos_aluno_matricula e-padding_20"
)
),
BR(),
_class="p-col w1p100 w4p70",
),
DIV(
XSECTION(
LABEL("Sobre a Matrícula"),
DIV(
XTABLE(
"tabela_dados_matricula_{0}".format(id_matricula),
TR(
TD(
DIV(
STRONG("ID matrícula: ", _class="rotulo"),
SPAN(id_matricula, _class="dado"),
_class="label_e_campo"
),
_class="label_e_campo_wrapper"
),
TD(
DIV(
STRONG("Ano Letivo: ", _class="rotulo"),
SPAN(ano_letivo, _class="dado"),
_class="label_e_campo"
),
_class="label_e_campo_wrapper"
),
TD(
DIV(
STRONG("Data de matrícula: ", _class="rotulo"),
SPAN(data_mat, _class="dado"),
_class="label_e_campo"
),
_class="label_e_campo_wrapper"
),
_class="phanterpwa-widget-table-data phanterpwa-widget"
),
TR(
TD(
DIV(
STRONG("Série: ", _class="rotulo"),
SPAN(serie, _class="dado"),
_class="label_e_campo"
),
_class="label_e_campo_wrapper",
_colspan=3
),
_class="phanterpwa-widget-table-data phanterpwa-widget"
),
TR(
TD(
DIV(
STRONG("Nome do Responsável: ", _class="rotulo"),
SPAN(nome_do_responsavel, _class="dado"),
_class="label_e_campo"
),
_class="label_e_campo_wrapper",
_colspan=3
),
_class="phanterpwa-widget-table-data phanterpwa-widget"
),
xml_novato_admitido,
xml_turma_atual
),
_class="modal_dados_dos_aluno_matricula e-padding_20"
)
),
_class="p-col w1p100"
),
xml_ano_anterior,
_class="p-row"
),
_class="p-row e-padding_20w"
),
_class="card-de-matricula",
),
)
helpers.XmlConstructor.__init__(self, "div", False, card, _class="painel-visualizar-matricula")
class Matricula(helpers.XmlConstructor):
def __init__(self, index_instance, target, ano_letivo, id_escola, id_aluno, id_matricula=None):
self.index_instance = index_instance
self.ano_letivo = ano_letivo
self.id_escola = id_escola
self.id_aluno = id_aluno
self.id_matricula = id_matricula
self.id_ = window.PhanterPWA.get_id()
html = DIV(
DIV(
DIV(
DIV(preloaders.android, _style="width: 300px; height: 300px; overflow: hidden; margin: auto;"),
_style="text-align:center; padding: 50px 0;"
),
_class="p-col w1p100",
),
_id=self.id_,
)
html.html_to(target)
self.get_form_matricula()
def get_form_matricula(self):
window.PhanterPWA.GET(
"api",
"matricular",
self.ano_letivo,
self.id_escola,
self.id_aluno,
self.id_matricula,
onComplete=self.after_get
)
def after_get(self, data, ajax_status):
if ajax_status == "success":
json = data.responseJSON
if data.status == 202:
if json.razoes == "faltam_series":
window.PhanterPWA.open_way("series/{0}/{1}/matricula/{2}".format(self.arg1, self.arg2, self.arg0))
else:
json = data.responseJSON
self.process_data(json)
def process_data(self, json):
self.nova_matricula = True
if json.data.matricula.id is not js_undefined and json.data.matricula.id is not None:
self.nova_matricula = False
sexo = json.data.aluno.sexo
nome_aluno = json.data.aluno.nome
if self.arg1 == "aluno-conferido":
logo = "{0}/api/escolas/{1}/image".format(
window.PhanterPWA.ApiServer.remote_address,
self.arg2
)
ano_letivo = self.arg3
else:
logo = "{0}/api/escolas/{1}/image".format(
window.PhanterPWA.ApiServer.remote_address,
self.arg1
)
ano_letivo = self.arg2
P1 = "O(A) ALUNO(A) "
if sexo == "1" or sexo == 1 or sexo == "Masculino":
P1 = "O ALUNO "
elif sexo == "2" or sexo == 2 or sexo == "Feminino":
P1 = "A ALUNA "
if self.nova_matricula:
texto = CONCATENATE(
H3("Matricula de: ", STRONG(nome_aluno, _style="color: orange;")),
P("AGORA VAMOS CRIAR UMA NOVA MATRÍCULA PARA ", P1, "NO ANO LETIVO DE ", STRONG(ano_letivo),"."),
P("OBSERVE QUE ALGUNS DADOS NÃO PODEM SER ALTERADOS (COMO NOME DO ALUNO POR EXEMPLO), PARA ALTERAR ",
"VOCÊ PODE EDITAR A MATRÍCULA POSTERIORMENTE OU VOLTAR A TELA ANTERIOR."
)
)
else:
texto = CONCATENATE(
H3("Matricula de: ", STRONG(nome_aluno, _style="color: orange;")),
P(P1, "POSSUI UMA MATRÍCULA NO ANO LETIVO DE ", STRONG(ano_letivo), | |
import base64
import json
import logging
import os
import pathlib
import re
import subprocess
import tempfile
import warnings
from collections import defaultdict
from typing import Dict, List, Optional, Set, Tuple
import backoff
import google.auth
import google.auth.exceptions
import google.auth.transport.requests
import requests
import yaml
from square.dtypes import (
Filepath, K8sClientCert, K8sConfig, K8sResource, MetaManifest,
)
FNAME_TOKEN = Filepath("/var/run/secrets/kubernetes.io/serviceaccount/token")
FNAME_CERT = Filepath("/var/run/secrets/kubernetes.io/serviceaccount/ca.crt")
# Convenience: global logger instance to avoid repetitive code.
logit = logging.getLogger("square")
def load_kubeconfig(fname: Filepath,
context: Optional[str]) -> Tuple[str, dict, dict, bool]:
"""Return user name and user- and cluster information.
Return None on error.
Inputs:
fname: str
Path to kubeconfig file, eg "~/.kube/config.yaml"
context: str
Kubeconf context. Use `None` to use default context.
Returns:
name, user info, cluster info
"""
# Load `kubeconfig`.
try:
kubeconf = yaml.safe_load(open(fname))
except (IOError, PermissionError) as err:
logit.error(f"{err}")
return ("", {}, {}, True)
# Find the user and cluster information based on the specified `context`.
try:
# Use default context unless specified.
ctx = context if context else kubeconf["current-context"]
del context
try:
# Find the correct context.
ctx = [_ for _ in kubeconf["contexts"] if _["name"] == ctx]
assert len(ctx) == 1
ctx = ctx[0]["context"]
# Unpack the cluster- and user name from the current context.
clustername, username = ctx["cluster"], ctx["user"]
# Find the information for the current cluster and user.
user_info = [_ for _ in kubeconf["users"] if _["name"] == username]
cluster_info = [_ for _ in kubeconf["clusters"] if _["name"] == clustername]
assert len(user_info) == len(cluster_info) == 1
except AssertionError:
logit.error(f"Could not find information for context <{ctx}>")
return ("", {}, {}, True)
# Unpack the cluster- and user information.
cluster_name = cluster_info[0]["name"]
cluster_info_out = cluster_info[0]["cluster"]
cluster_info_out["name"] = cluster_name
user_info = user_info[0]["user"]
del cluster_info
except (KeyError, TypeError):
logit.error(f"Kubeconfig YAML file <{fname}> is invalid")
return ("", {}, {}, True)
# Success. The explicit `dicts()` are to satisfy MyPy.
logit.info(f"Loaded {ctx} from Kubeconfig file <{fname}>")
return (username, dict(user_info), dict(cluster_info_out), False)
def load_incluster_config(
fname_token: Filepath = FNAME_TOKEN,
fname_cert: Filepath = FNAME_CERT) -> Tuple[K8sConfig, bool]:
"""Return K8s access config from Pod service account.
Returns None if we are not running in a Pod.
Inputs:
kubconfig: str
Name of kubeconfig file.
Returns:
Config
"""
# Every K8s pod has this.
server_ip = os.getenv('KUBERNETES_PORT_443_TCP_ADDR', None)
fname_cert = pathlib.Path(fname_cert)
fname_token = pathlib.Path(fname_token)
# Sanity checks: URL and service account files either exist, or we are not
# actually inside a Pod.
try:
assert server_ip is not None
assert fname_cert.exists()
assert fname_token.exists()
except AssertionError:
logit.debug("Could not find incluster (service account) credentials.")
return K8sConfig(), True
# Return the compiled K8s access configuration.
logit.info("Use incluster (service account) credentials.")
return K8sConfig(
url=f'https://{server_ip}',
token=fname_token.read_text(),
ca_cert=fname_cert,
client_cert=None,
version="",
name="",
), False
def load_gke_config(
fname: Filepath,
context: Optional[str],
disable_warnings: bool = False) -> Tuple[K8sConfig, bool]:
"""Return K8s access config for GKE cluster described in `kubeconfig`.
Returns None if `kubeconfig` does not exist or could not be parsed.
Inputs:
kubconfig: str
Name of kubeconfig file.
context: str
Kubeconf context. Use `None` to use default context.
disable_warnings: bool
Whether or not do disable GCloud warnings.
Returns:
Config
"""
# Parse the kubeconfig file.
name, user, cluster, err = load_kubeconfig(fname, context)
if err:
return (K8sConfig(), True)
# Unpack the self signed certificate (Google does not register the K8s API
# server certificate with a public CA).
try:
ssl_ca_cert_data = base64.b64decode(cluster["certificate-authority-data"])
except KeyError:
logit.debug(f"Context {context} in <{fname}> is not a GKE config")
return (K8sConfig(), True)
# Save the certificate to a temporary file. This is only necessary because
# the requests library will need a path to the CA file - unfortunately, we
# cannot just pass it the content.
_, tmp = tempfile.mkstemp(text=False)
ssl_ca_cert = Filepath(tmp)
ssl_ca_cert.write_bytes(ssl_ca_cert_data)
with warnings.catch_warnings(record=disable_warnings):
try:
cred, project_id = google.auth.default(
scopes=['https://www.googleapis.com/auth/cloud-platform']
)
cred.refresh(google.auth.transport.requests.Request())
token = cred.token
except google.auth.exceptions.DefaultCredentialsError as e:
logit.error(str(e))
return (K8sConfig(), True)
# Return the config data.
logit.info("Assuming GKE cluster.")
return K8sConfig(
url=cluster["server"],
token=<PASSWORD>,
ca_cert=ssl_ca_cert,
client_cert=None,
version="",
name=cluster["name"],
), False
def load_eks_config(
fname: Filepath,
context: Optional[str],
disable_warnings: bool = False) -> Tuple[K8sConfig, bool]:
"""Return K8s access config for EKS cluster described in `kubeconfig`.
Returns None if `kubeconfig` does not exist or could not be parsed.
Inputs:
fname: Filepath
Kubeconfig file.
context: str
Kubeconf context. Use `None` to use default context.
disable_warnings: bool
Whether or not do disable GCloud warnings.
Returns:
Config
"""
# Parse the kubeconfig file.
name, user, cluster, err = load_kubeconfig(fname, context)
if err:
return (K8sConfig(), True)
# Get a copy of all env vars. We will pass that one along to the
# sub-process, plus the env vars specified in the kubeconfig file.
env = os.environ.copy()
# Unpack the self signed certificate (AWS does not register the K8s API
# server certificate with a public CA).
try:
ssl_ca_cert_data = base64.b64decode(cluster["certificate-authority-data"])
cmd = user["exec"]["command"]
args = user["exec"]["args"]
env_kubeconf = user["exec"].get("env", [])
except KeyError:
logit.debug(f"Context {context} in <{fname}> is not an EKS config")
return (K8sConfig(), True)
# Convert a None value (valid value in YAML) to an empty list of env vars.
env_kubeconf = env_kubeconf if env_kubeconf else []
# Save the certificate to a temporary file. This is only necessary because
# the Requests library will need a path to the CA file - unfortunately, we
# cannot just pass it the content.
_, tmp = tempfile.mkstemp(text=False)
ssl_ca_cert = Filepath(tmp)
ssl_ca_cert.write_bytes(ssl_ca_cert_data)
# Compile the name, arguments and env vars for the command specified in kubeconf.
cmd_args = [cmd] + args
env_kubeconf = {_["name"]: _["value"] for _ in env_kubeconf}
env.update(env_kubeconf)
logit.debug(f"Requesting EKS certificate: {cmd_args} with envs: {env_kubeconf}")
# Pre-format the command for the log message.
log_cmd = (
f"kubeconf={fname} kubectx={context} "
f"cmd={cmd_args} env={env_kubeconf}"
)
# Run the specified command to produce the access token. That program must
# produce a YAML document on stdout that specifies the bearer token.
try:
out = subprocess.run(cmd_args, stdout=subprocess.PIPE, env=env)
token = yaml.safe_load(out.stdout.decode("utf8"))["status"]["token"]
except FileNotFoundError:
logit.error(f"Could not find {cmd} application to get token ({log_cmd})")
return (K8sConfig(), True)
except (KeyError, yaml.YAMLError):
logit.error(f"Token manifest produce by {cmd_args} is corrupt ({log_cmd})")
return (K8sConfig(), True)
except TypeError:
logit.error(f"The YAML token produced by {cmd_args} is corrupt ({log_cmd})")
return (K8sConfig(), True)
# Return the config data.
logit.info("Assuming EKS cluster.")
return K8sConfig(
url=cluster["server"],
token=token,
ca_cert=ssl_ca_cert,
client_cert=None,
version="",
name=cluster["name"],
), False
def load_minikube_config(fname: Filepath,
context: Optional[str]) -> Tuple[K8sConfig, bool]:
"""Load minikube configuration from `fname`.
Return None on error.
Inputs:
kubconfig: str
Path to kubeconfig file, eg "~/.kube/config.yaml"
context: str
Kubeconf context. Use `None` to use default context.
Returns:
Config
"""
# Parse the kubeconfig file.
name, user, cluster, err = load_kubeconfig(fname, context)
if err:
return (K8sConfig(), True)
# Minikube uses client certificates to authenticate. We need to pass those
# to the HTTP client of our choice when we create the session.
try:
client_cert = K8sClientCert(
crt=user["client-certificate"],
key=user["client-key"],
)
# Return the config data.
logit.info("Assuming Minikube cluster.")
return K8sConfig(
url=cluster["server"],
token="",
ca_cert=cluster["certificate-authority"],
client_cert=client_cert,
version="",
name=cluster["name"],
), False
except KeyError:
logit.debug(f"Context {context} in <{fname}> is not a Minikube config")
return (K8sConfig(), True)
def load_kind_config(fname: Filepath, context: Optional[str]) -> Tuple[K8sConfig, bool]:
"""Load Kind configuration from `fname`.
https://github.com/bsycorp/kind
Kind is just another Minikube cluster. The only notable difference
is that it does not store its credentials as files but directly in
the Kubeconfig file. This function will copy those files into /tmp.
Return None on error.
Inputs:
kubconfig: str
Path to kubeconfig for Kind cluster.
context: str
Kubeconf context. Use `None` to use default context.
Returns:
Config
"""
# Parse the kubeconfig file.
name, user, cluster, err = load_kubeconfig(fname, context)
if err:
return (K8sConfig(), True)
# Kind and Minikube use client certificates to authenticate. We need to
# pass those to the HTTP client of our choice when we create the session.
try:
client_crt = base64.b64decode(user["client-certificate-data"]).decode()
client_key = base64.b64decode(user["client-key-data"]).decode()
client_ca = base64.b64decode(cluster["certificate-authority-data"]).decode()
path = Filepath(tempfile.mkdtemp())
p_client_crt = path / "kind-client.crt"
p_client_key = path / "kind-client.key"
p_ca = path / "kind.ca"
p_client_crt.write_text(client_crt)
p_client_key.write_text(client_key)
p_ca.write_text(client_ca)
client_cert = K8sClientCert(crt=p_client_crt, key=p_client_key)
# Return the config data.
logit.debug("Assuming Minikube/Kind cluster.")
return K8sConfig(
url=cluster["server"],
token="",
ca_cert=p_ca,
client_cert=client_cert,
version="",
name=cluster["name"],
), False
except KeyError:
logit.debug(f"Context {context} in <{fname}> is not a Minikube config")
return (K8sConfig(), True)
def load_auto_config(
fname: | |
<reponame>nicproulx/mne-python
import os
import os.path as op
import shutil
import glob
import warnings
import numpy as np
from scipy import sparse
from numpy.testing import assert_array_equal, assert_array_almost_equal
from nose.tools import assert_equal, assert_true, assert_false, assert_raises
from mne.datasets import testing
from mne import (read_label, stc_to_label, read_source_estimate,
read_source_spaces, grow_labels, read_labels_from_annot,
write_labels_to_annot, split_label, spatial_tris_connectivity,
read_surface)
from mne.label import Label, _blend_colors, label_sign_flip
from mne.utils import (_TempDir, requires_sklearn, get_subjects_dir,
run_tests_if_main, slow_test)
from mne.fixes import assert_is, assert_is_not
from mne.label import _n_colors
from mne.source_space import SourceSpaces
from mne.source_estimate import mesh_edges
from mne.externals.six import string_types
from mne.externals.six.moves import cPickle as pickle
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
src_fname = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-6-src.fif')
stc_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-lh.stc')
real_label_fname = op.join(data_path, 'MEG', 'sample', 'labels',
'Aud-lh.label')
real_label_rh_fname = op.join(data_path, 'MEG', 'sample', 'labels',
'Aud-rh.label')
v1_label_fname = op.join(subjects_dir, 'sample', 'label', 'lh.V1.label')
fwd_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
src_bad_fname = op.join(data_path, 'subjects', 'fsaverage', 'bem',
'fsaverage-ico-5-src.fif')
label_dir = op.join(subjects_dir, 'sample', 'label', 'aparc')
test_path = op.join(op.split(__file__)[0], '..', 'io', 'tests', 'data')
label_fname = op.join(test_path, 'test-lh.label')
label_rh_fname = op.join(test_path, 'test-rh.label')
# This code was used to generate the "fake" test labels:
# for hemi in ['lh', 'rh']:
# label = Label(np.unique((np.random.rand(100) * 10242).astype(int)),
# hemi=hemi, comment='Test ' + hemi, subject='fsaverage')
# label.save(op.join(test_path, 'test-%s.label' % hemi))
# XXX : this was added for backward compat and keep the old test_label_in_src
def _stc_to_label(stc, src, smooth, subjects_dir=None):
"""Compute a label from the non-zero sources in an stc object.
Parameters
----------
stc : SourceEstimate
The source estimates.
src : SourceSpaces | str | None
The source space over which the source estimates are defined.
If it's a string it should the subject name (e.g. fsaverage).
Can be None if stc.subject is not None.
smooth : int
Number of smoothing iterations.
subjects_dir : str | None
Path to SUBJECTS_DIR if it is not set in the environment.
Returns
-------
labels : list of Labels | list of list of Labels
The generated labels. If connected is False, it returns
a list of Labels (one per hemisphere). If no Label is available
in a hemisphere, None is returned. If connected is True,
it returns for each hemisphere a list of connected labels
ordered in decreasing order depending of the maximum value in the stc.
If no Label is available in an hemisphere, an empty list is returned.
"""
src = stc.subject if src is None else src
if isinstance(src, string_types):
subject = src
else:
subject = stc.subject
if isinstance(src, string_types):
subjects_dir = get_subjects_dir(subjects_dir)
surf_path_from = op.join(subjects_dir, src, 'surf')
rr_lh, tris_lh = read_surface(op.join(surf_path_from,
'lh.white'))
rr_rh, tris_rh = read_surface(op.join(surf_path_from,
'rh.white'))
rr = [rr_lh, rr_rh]
tris = [tris_lh, tris_rh]
else:
if not isinstance(src, SourceSpaces):
raise TypeError('src must be a string or a set of source spaces')
if len(src) != 2:
raise ValueError('source space should contain the 2 hemispheres')
rr = [1e3 * src[0]['rr'], 1e3 * src[1]['rr']]
tris = [src[0]['tris'], src[1]['tris']]
labels = []
cnt = 0
for hemi_idx, (hemi, this_vertno, this_tris, this_rr) in enumerate(
zip(['lh', 'rh'], stc.vertices, tris, rr)):
this_data = stc.data[cnt:cnt + len(this_vertno)]
e = mesh_edges(this_tris)
e.data[e.data == 2] = 1
n_vertices = e.shape[0]
e = e + sparse.eye(n_vertices, n_vertices)
clusters = [this_vertno[np.any(this_data, axis=1)]]
cnt += len(this_vertno)
clusters = [c for c in clusters if len(c) > 0]
if len(clusters) == 0:
this_labels = None
else:
this_labels = []
colors = _n_colors(len(clusters))
for c, color in zip(clusters, colors):
idx_use = c
for k in range(smooth):
e_use = e[:, idx_use]
data1 = e_use * np.ones(len(idx_use))
idx_use = np.where(data1)[0]
label = Label(idx_use, this_rr[idx_use], None, hemi,
'Label from stc', subject=subject,
color=color)
this_labels.append(label)
this_labels = this_labels[0]
labels.append(this_labels)
return labels
def assert_labels_equal(l0, l1, decimal=5, comment=True, color=True):
if comment:
assert_equal(l0.comment, l1.comment)
if color:
assert_equal(l0.color, l1.color)
for attr in ['hemi', 'subject']:
attr0 = getattr(l0, attr)
attr1 = getattr(l1, attr)
msg = "label.%s: %r != %r" % (attr, attr0, attr1)
assert_equal(attr0, attr1, msg)
for attr in ['vertices', 'pos', 'values']:
a0 = getattr(l0, attr)
a1 = getattr(l1, attr)
assert_array_almost_equal(a0, a1, decimal)
def test_copy():
"""Test label copying"""
label = read_label(label_fname)
label_2 = label.copy()
label_2.pos += 1
assert_array_equal(label.pos, label_2.pos - 1)
def test_label_subject():
"""Test label subject name extraction
"""
label = read_label(label_fname)
assert_is(label.subject, None)
assert_true('unknown' in repr(label))
label = read_label(label_fname, subject='fsaverage')
assert_true(label.subject == 'fsaverage')
assert_true('fsaverage' in repr(label))
def test_label_addition():
"""Test label addition."""
pos = np.random.RandomState(0).rand(10, 3)
values = np.arange(10.) / 10
idx0 = list(range(7))
idx1 = list(range(7, 10)) # non-overlapping
idx2 = list(range(5, 10)) # overlapping
l0 = Label(idx0, pos[idx0], values[idx0], 'lh', color='red')
l1 = Label(idx1, pos[idx1], values[idx1], 'lh')
l2 = Label(idx2, pos[idx2], values[idx2], 'lh', color=(0, 1, 0, .5))
assert_equal(len(l0), len(idx0))
l_good = l0.copy()
l_good.subject = 'sample'
l_bad = l1.copy()
l_bad.subject = 'foo'
assert_raises(ValueError, l_good.__add__, l_bad)
assert_raises(TypeError, l_good.__add__, 'foo')
assert_raises(ValueError, l_good.__sub__, l_bad)
assert_raises(TypeError, l_good.__sub__, 'foo')
# adding non-overlapping labels
l01 = l0 + l1
assert_equal(len(l01), len(l0) + len(l1))
assert_array_equal(l01.values[:len(l0)], l0.values)
assert_equal(l01.color, l0.color)
# subtraction
assert_labels_equal(l01 - l0, l1, comment=False, color=False)
assert_labels_equal(l01 - l1, l0, comment=False, color=False)
# adding overlappig labels
l = l0 + l2
i0 = np.where(l0.vertices == 6)[0][0]
i2 = np.where(l2.vertices == 6)[0][0]
i = np.where(l.vertices == 6)[0][0]
assert_equal(l.values[i], l0.values[i0] + l2.values[i2])
assert_equal(l.values[0], l0.values[0])
assert_array_equal(np.unique(l.vertices), np.unique(idx0 + idx2))
assert_equal(l.color, _blend_colors(l0.color, l2.color))
# adding lh and rh
l2.hemi = 'rh'
bhl = l0 + l2
assert_equal(bhl.hemi, 'both')
assert_equal(len(bhl), len(l0) + len(l2))
assert_equal(bhl.color, l.color)
assert_true('BiHemiLabel' in repr(bhl))
# subtraction
assert_labels_equal(bhl - l0, l2)
assert_labels_equal(bhl - l2, l0)
bhl2 = l1 + bhl
assert_labels_equal(bhl2.lh, l01)
assert_equal(bhl2.color, _blend_colors(l1.color, bhl.color))
assert_array_equal((l2 + bhl).rh.vertices, bhl.rh.vertices) # rh label
assert_array_equal((bhl + bhl).lh.vertices, bhl.lh.vertices)
assert_raises(TypeError, bhl.__add__, 5)
# subtraction
bhl_ = bhl2 - l1
assert_labels_equal(bhl_.lh, bhl.lh, comment=False, color=False)
assert_labels_equal(bhl_.rh, bhl.rh)
assert_labels_equal(bhl2 - l2, l0 + l1)
assert_labels_equal(bhl2 - l1 - l0, l2)
bhl_ = bhl2 - bhl2
assert_array_equal(bhl_.vertices, [])
@testing.requires_testing_data
def test_label_in_src():
"""Test label in src"""
src = read_source_spaces(src_fname)
label = read_label(v1_label_fname)
# construct label from source space vertices
vert_in_src = np.intersect1d(label.vertices, src[0]['vertno'], True)
where = np.in1d(label.vertices, vert_in_src)
pos_in_src = label.pos[where]
values_in_src = label.values[where]
label_src = Label(vert_in_src, pos_in_src, values_in_src,
hemi='lh').fill(src)
# check label vertices
vertices_status = np.in1d(src[0]['nearest'], label.vertices)
vertices_in = np.nonzero(vertices_status)[0]
vertices_out = np.nonzero(np.logical_not(vertices_status))[0]
assert_array_equal(label_src.vertices, vertices_in)
assert_array_equal(np.in1d(vertices_out, label_src.vertices), False)
# check values
value_idx = np.digitize(src[0]['nearest'][vertices_in], vert_in_src, True)
assert_array_equal(label_src.values, values_in_src[value_idx])
# test exception
vertices = np.append([-1], vert_in_src)
assert_raises(ValueError, Label(vertices, hemi='lh').fill, src)
@testing.requires_testing_data
def test_label_io_and_time_course_estimates():
"""Test IO for label + stc files
"""
stc = read_source_estimate(stc_fname)
label = read_label(real_label_fname)
stc_label = stc.in_label(label)
assert_true(len(stc_label.times) == stc_label.data.shape[1])
assert_true(len(stc_label.vertices[0]) == stc_label.data.shape[0])
@testing.requires_testing_data
def test_label_io():
"""Test IO of label files
"""
tempdir = _TempDir()
label = read_label(label_fname)
# label attributes
assert_equal(label.name, 'test-lh')
assert_is(label.subject, None)
assert_is(label.color, None)
# save and reload
label.save(op.join(tempdir, 'foo'))
label2 = read_label(op.join(tempdir, 'foo-lh.label'))
assert_labels_equal(label, label2)
# pickling
dest = op.join(tempdir, 'foo.pickled')
with open(dest, 'wb') as fid:
pickle.dump(label, fid, pickle.HIGHEST_PROTOCOL)
with open(dest, 'rb') as fid:
label2 = pickle.load(fid)
assert_labels_equal(label, label2)
def _assert_labels_equal(labels_a, labels_b, ignore_pos=False):
"""Make sure two sets of labels are equal"""
for label_a, label_b in zip(labels_a, labels_b):
assert_array_equal(label_a.vertices, label_b.vertices)
assert_true(label_a.name == label_b.name)
assert_true(label_a.hemi == label_b.hemi)
if not ignore_pos:
assert_array_equal(label_a.pos, label_b.pos)
@testing.requires_testing_data
def test_annot_io():
"""Test I/O from and to *.annot files"""
# copy necessary files from fsaverage to tempdir
tempdir = _TempDir()
subject = 'fsaverage'
label_src = os.path.join(subjects_dir, 'fsaverage', 'label')
surf_src = os.path.join(subjects_dir, 'fsaverage', 'surf')
label_dir = os.path.join(tempdir, subject, 'label')
surf_dir = os.path.join(tempdir, subject, 'surf')
os.makedirs(label_dir)
os.mkdir(surf_dir)
shutil.copy(os.path.join(label_src, 'lh.PALS_B12_Lobes.annot'), label_dir)
shutil.copy(os.path.join(label_src, 'rh.PALS_B12_Lobes.annot'), label_dir)
shutil.copy(os.path.join(surf_src, 'lh.white'), surf_dir)
shutil.copy(os.path.join(surf_src, 'rh.white'), surf_dir)
# read original labels
assert_raises(IOError, read_labels_from_annot, subject, 'PALS_B12_Lobesey',
subjects_dir=tempdir)
labels = read_labels_from_annot(subject, 'PALS_B12_Lobes',
subjects_dir=tempdir)
# test saving parcellation only covering one hemisphere
parc = [l for l in labels if l.name == 'LOBE.TEMPORAL-lh']
write_labels_to_annot(parc, subject, 'myparc', subjects_dir=tempdir)
parc1 = read_labels_from_annot(subject, 'myparc', subjects_dir=tempdir)
parc1 = [l for l in parc1 if not l.name.startswith('unknown')]
assert_equal(len(parc1), len(parc))
for l1, l in zip(parc1, parc):
assert_labels_equal(l1, l)
# test saving only one hemisphere
parc = [l for l in labels if l.name.startswith('LOBE')]
write_labels_to_annot(parc, subject, 'myparc2', hemi='lh',
subjects_dir=tempdir)
annot_fname = os.path.join(tempdir, subject, 'label', '%sh.myparc2.annot')
assert_true(os.path.isfile(annot_fname % 'l'))
assert_false(os.path.isfile(annot_fname % 'r'))
parc1 = read_labels_from_annot(subject, 'myparc2',
annot_fname=annot_fname % 'l',
subjects_dir=tempdir)
parc_lh = [l for l in parc if l.name.endswith('lh')]
for l1, l in zip(parc1, parc_lh):
assert_labels_equal(l1, l)
@testing.requires_testing_data
def test_read_labels_from_annot():
"""Test reading labels from FreeSurfer parcellation
"""
# test some invalid inputs
assert_raises(ValueError, read_labels_from_annot, 'sample', hemi='bla',
subjects_dir=subjects_dir)
assert_raises(ValueError, read_labels_from_annot, 'sample',
annot_fname='bla.annot', subjects_dir=subjects_dir)
# read labels using hemi specification
labels_lh = read_labels_from_annot('sample', | |
#!/usr/bin/env python2.7
import getpass
import logging
import os
import re
import sys
from abc import ABCMeta, abstractmethod, abstractproperty
from collections import defaultdict, namedtuple, OrderedDict
from textwrap import dedent
from generation_context import GenerationContext
from generation_utils import GenerationUtils
from target_template import Target
logger = logging.getLogger(__name__)
class BuildComponent(object):
"""Represents a feature of a maven project that should generate things in BUILD files.
For example, the MainClassComponent knows when a project should generate a jvm_binary, and
generates the appropriate code to go in the project's top-level BUILD file.
"""
__metaclass__ = ABCMeta
class DuplicateTargetException(Exception):
"""Thrown when a two targets with the same name are created for the same BUILD file."""
def __init__(self, pom_file, generation_context=None):
"""Creates a new BuildComponent for the given pom file.
:param squarepants.pom_file.PomFile pom_file: the extracted information from a project's
pom.xml.
:return:
"""
self.pom = pom_file
self.gen_context = generation_context or GenerationContext()
@abstractproperty
def exists(self):
"""Whether this build component exists (should be generated) for the project pom.xml."""
@abstractmethod
def generate(self):
"""Generates and returns code that should be added to the top-level BUILD file for this pom.xml.
Also creates any BUILD files for subdirectories which pertain to this component.
"""
def get_project_target_name(self, name):
"""Convenience function to return the inferred target name appropriate for this component.
For BUILD.gen files this will just be 'name', but for BUILD.aux's this will be name-aux.
"""
return self.gen_context.infer_target_name(self.pom.directory, name)
def has_project_target(self, name):
"""Whether the project-level BUILD file already has a target with the given name."""
return self.get_project_target_name(name) in self.pom.project_target_names
def format_project(self, target_type, **kwargs):
return target_type.format(symbols=self.pom.properties, file_name=self.pom.path,
**kwargs)
def create_project_target(self, target_type, name, **kwargs):
"""Formats and returns a target for the project-level BUILD file.
Registers the target's name in project_target_names.
:return: the formatted target string.
"""
name = self.get_project_target_name(name)
if name in self.pom.project_target_names:
raise BuildComponent.DuplicateTargetException("Duplicate target '{name}' in {build}."
.format(name=name,
build=self.pom.directory))
self.pom.project_target_names.add(name)
return self.format_project(target_type, name=name, **kwargs)
def get_jvm_platform_name(self):
# if self.is_test:
# return self.get_jvm_platform_test_name()
options = self.pom.java_options
if not any([options.target_level, options.source_level, options.compile_args]):
return None
args = [GenerationUtils.symbol_substitution(self.pom.properties, arg, symbols_name=self.pom.path)
for arg in options.compile_args]
return self.gen_context.jvm_platform(options.target_level,
options.source_level,
args,)
def get_jvm_platform_test_name(self):
return self.get_jvm_platform_name()
class SubdirectoryComponent(BuildComponent):
"""Represents a BuildComponent whose existence is inferred from a project's directory structure.
"""
__metaclass__ = ABCMeta
@abstractproperty
def subdirectory(self):
"""The subdirectory that indicates whether this component exists, relative to the project pom
directory.
"""
@abstractproperty
def target_type(self):
"""The target type generated, e.g. Target.java_protobuf_library."""
@abstractproperty
def target_name(self):
"""The name of the generated target, e.g. 'proto'."""
@abstractproperty
def pom_dependency_list(self):
"""Returns the list that this target injects dependencies into.
Eg, pom.lib_deps.
"""
@property
def target_spec(self):
"""The spec for the (primary) target this component generates."""
return self.gen_context.format_spec(self.directory, self.target_name)
def generate_subdirectory_code(self):
"""Generates and returns code for the subdirectory's BUILD file."""
return self.format_project(self.target_type, **self.generate_target_arguments())
def generate_target_arguments(self):
"""Generates the arguments that will be passed into the target_type.format().
Subclasses are expected to update the arguments appropriately.
"""
return {
'name': self.gen_context.infer_target_name(self.directory, self.target_name),
}
def generate_project_dependency_code(self):
"""Generates a dependencies() target to be injected into the project BUILD file."""
return self.create_project_target(Target.dependencies,
name=self.target_name,
dependencies=[self.target_spec],
)
@property
def directory(self):
"""Convenience property to get the directory path relative to the working directory."""
return os.path.join(self.pom.directory, self.subdirectory)
@property
def exists(self):
subdir = self.directory
return os.path.exists(subdir) and os.path.isdir(subdir) and os.listdir(subdir)
def inject_generated_dependencies(self):
"""Powers the mechanism by which generated targets are injected as dependencies into other
generated targets.
Updates a dependency list in the PomFile, like lib_deps or test_deps, according to
the pom_dependency_list.
"""
if self.pom_dependency_list is not None:
self.pom_dependency_list.append(self.target_spec)
def generate(self):
subdir = self.directory
if not os.path.exists(subdir):
os.makedirs(subdir)
self.gen_context.write_build_file(self.directory, self.generate_subdirectory_code())
project_code = self.generate_project_dependency_code()
self.inject_generated_dependencies()
return project_code
class JarFilesMixin(object):
"""Methods for BuildComponents that also generated a jar_library() that they depend on."""
@property
def jar_deps(self):
"""Jar dependencies from pom.xml."""
return self.pom.lib_jar_deps
@property
def jar_target_contents(self):
"""Formatted jar_library() for injection into the subdirectory BUILD file."""
return self.format_jar_library(self.gen_context.infer_target_name(self.directory, 'jar_files'),
[str(s).strip() for s in self.jar_deps if s],
pom_file=self.pom)
@property
def jar_target_spec(self):
"""Spec address for the generated jar_library()."""
if not self.jar_target_contents:
return ''
return self.gen_context.format_spec(
'', self.gen_context.infer_target_name(self.directory, 'jar_files'))
def generate_subdirectory_code(self):
return super(JarFilesMixin, self).generate_subdirectory_code() + self.jar_target_contents
@classmethod
def format_jar_library(cls, target_name, jar_deps, pom_file=None):
"""Given a list of jar dependencies, format a jar_library target.
Exposed for testing.
:param target_name: the target name for the jar_library.
:param jar_deps: - <jar> dependency names to add to the jar_library.
:returns: A jar_library declaration.
:rtype: string
"""
if not jar_deps:
return ''
# There is a bug in Target.jar_library.format(). See test_target_template.py
#return Target.jar_library.format(
# name=target_name,
# jars=sorted(set(jar_deps)),
# symbols=pom_file.properties if pom_file else None,
# file_name=pom_file.path if pom_file else None,
#)
jar_library = dedent('''
jar_library(name='{name}',
jars=[{jars}
],
)
''').format(name=target_name,
jars=','.join('\n{}{}'.format(' '*4, jar) for jar in sorted(set(jar_deps))))
if pom_file:
jar_library = GenerationUtils.symbol_substitution(pom_file.properties, jar_library)
return GenerationUtils.autoindent(jar_library)
class MainClassComponent(BuildComponent):
"""Generates a jvm_binary if the pom.xml specifies a main class."""
@property
def exists(self):
return True if self.pom.mainclass else False
def generate(self):
main_class = self.pom.mainclass
dependencies = [
self.gen_context.format_spec(name=self.get_project_target_name('lib')),
]
deploy_excludes = self.pom.signed_jars_formatted_excludes or None
signed_jar_target = ''
if deploy_excludes:
signed_jar_target_name = '{}-signed-jars'.format(self.pom.default_target_name)
signed_jar_target = self.create_project_target(Target.signed_jars,
name=signed_jar_target_name,
dependencies=self.pom.signed_jars_dependencies,
strip_version = str(self.pom.signed_jars_strip_version),
)
dependencies.append(
self.gen_context.format_spec(name=self.get_project_target_name(signed_jar_target_name))
)
manifest_entries = self.pom.manifest_entries or None
extra_fingerprint_files = []
app_manifest = 'app-manifest.yaml'
if os.path.exists(os.path.join(os.path.dirname(self.pom.path), app_manifest)):
extra_fingerprint_files.append(app_manifest)
fingerprint_target = self.create_project_target(
Target.fingerprint,
name='extra-files',
sources=extra_fingerprint_files,
dependencies=None,
)
return self.create_project_target(Target.jvm_binary,
name=self.pom.default_target_name,
main=main_class,
basename=self.pom.artifact_id,
dependencies=dependencies,
manifest_entries=manifest_entries,
deploy_excludes=deploy_excludes,
platform=self.get_jvm_platform_name(),
shading_rules=self.pom.shading_rules or None,
) + signed_jar_target + fingerprint_target
class MainResourcesComponent(SubdirectoryComponent):
"""Generates targets for src/main/resources."""
@property
def subdirectory(self):
return 'src/main/resources'
@property
def target_type(self):
return Target.resources
@property
def target_name(self):
return 'resources'
@property
def pom_dependency_list(self):
return self.pom.resources
def generate_target_arguments(self):
args = super(MainResourcesComponent, self).generate_target_arguments()
args.update({
'sources': "rglobs('*', exclude=[globs('BUILD*')])",
'dependencies': [],
})
return args
def generate_project_dependency_code(self):
pass
class TestResourcesComponent(MainResourcesComponent):
"""Generates targets for src/test/resources."""
@property
def subdirectory(self):
return 'src/test/resources'
@property
def pom_dependency_list(self):
return self.pom.test_resources
class MainProtobufLibraryComponent(JarFilesMixin, SubdirectoryComponent):
"""Generates targets for src/main/proto.
Some minor hacks in the 'exists' property of this target to deal with external-protos,
but not nearly as bad as before.
"""
@property
def subdirectory(self):
return 'src/main/proto'
@property
def exists(self):
if MainExternalProtosComponent(self.pom).exists:
return False
return super(MainProtobufLibraryComponent, self).exists
@property
def target_type(self):
return Target.java_protobuf_library
@property
def target_name(self):
return 'proto'
@property
def pom_dependency_list(self):
return self.pom.lib_deps
@property
def _deps(self):
"""Dependencies that get injected into the generated target's dependency list."""
return self.pom.lib_deps
@property
def _proto_sources(self):
return "rglobs('*.proto')"
def generate_target_arguments(self):
args = super(MainProtobufLibraryComponent, self).generate_target_arguments()
# If there is no src/main/java:lib target, then we don't need to tack
# on a uniqifying suffix, this is the only artifact that will be published for this
# package
artifactId_suffix = ('-proto' if MainJavaLibraryComponent(self.pom).exists else '')
dependencies = self._deps + [self.jar_target_spec, ':{}'.format(self._proto_sources_name)]
args.update({
'sources': self._proto_sources,
'imports': [],
'dependencies': format_dependency_list(dependencies),
'platform': self.get_jvm_platform_name(),
'groupId' : self.pom.deps_from_pom.group_id,
'artifactId' : self.pom.deps_from_pom.artifact_id + artifactId_suffix,
})
return args
@property
def _proto_sources_name(self):
return self.gen_context.infer_target_name(self.directory, 'proto-sources')
@property
def proto_resources_contents(self):
return self.format_project(
Target.resources,
name=self._proto_sources_name,
sources=self._proto_sources,
)
@property
def wire_proto_path_contents(self):
return self.format_project(
Target.wire_proto_path,
name=self.gen_context.infer_target_name(self.directory, 'path'),
sources=self._proto_sources,
dependencies=format_dependency_list(find_wire_proto_paths(self._deps)),
)
def generate_subdirectory_code(self):
return super(MainProtobufLibraryComponent, self).generate_subdirectory_code() \
+ self.wire_proto_path_contents + self.proto_resources_contents
class TestProtobufLibraryComponent(MainProtobufLibraryComponent):
"""Generates targets for src/test/proto."""
@property
def subdirectory(self):
return 'src/test/proto'
@property
def pom_dependency_list(self):
return self.pom.test_deps
@property
def _deps(self):
return self.pom.lib_deps + self.pom.test_deps
def generate_project_dependency_code(self):
pass
def generate_target_arguments(self):
args = super(TestProtobufLibraryComponent, self).generate_target_arguments()
args['platform'] = self.get_jvm_platform_test_name()
return args
class MainJavaLibraryComponent(JarFilesMixin, SubdirectoryComponent):
"""Generates targets for src/main/java."""
@property
def subdirectory(self):
return 'src/main/java'
@property
def target_type(self):
return Target.java_library
@property
def target_name(self):
return 'lib'
@property
def pom_dependency_list(self):
return None
@property
def artifactId_suffix(self):
return ''
@property
def _deps(self):
"""Dependencies that get injected into the generated target's dependency list."""
return self.pom.lib_deps
def generate_target_arguments(self):
args = super(MainJavaLibraryComponent, self).generate_target_arguments()
library_deps = self._deps + [self.jar_target_spec]
module_path = os.path.dirname(self.pom.path)
if self.pom.mainclass:
spec_name = self.gen_context.infer_target_name(module_path, 'extra-files')
library_deps.append(self.gen_context.format_spec(path=module_path, name=spec_name))
artifactId = self.pom.deps_from_pom.artifact_id + self.artifactId_suffix
args.update({
'sources': "rglobs('*.java')",
'dependencies': format_dependency_list(library_deps),
'resources': self.pom.resources,
'groupId': self.pom.deps_from_pom.group_id,
'artifactId': artifactId,
'platform': self.get_jvm_platform_name(),
})
return args
class TestJavaLibraryComponent(MainJavaLibraryComponent):
"""Generates junit_tests for src/test/java."""
INTEGRATION_TEST_PATTERN=re.compile(r'.*IT.java')
@property
def artifactId_suffix(self):
return '-test'
@property
def subdirectory(self):
return 'src/test/java'
@property
def jar_deps(self):
return self.pom.lib_jar_deps + self.pom.test_jar_deps
@property
def _deps(self):
deps = self.pom.lib_deps + self.pom.test_deps + ["'testing-support/src/main/java:lib'"]
main_lib = MainJavaLibraryComponent(self.pom)
if main_lib.exists:
deps.append(main_lib.target_spec)
return deps
def generate_target_arguments(self):
args = super(TestJavaLibraryComponent, self).generate_target_arguments()
args.update({
'sources': "rglobs('*.java')",
'resources': self.pom.test_resources,
'platform': self.get_jvm_platform_test_name(),
})
return args
def generate_subdirectory_code(self):
common_args = dict(
cwd=self.pom.directory,
extra_env_vars=self.pom.java_options.test_env_vars or None,
extra_jvm_options=self.pom.java_options.test_jvm_args or None,
platform=self.get_jvm_platform_test_name(),
dependencies=["':{}'".format(self.gen_context.infer_target_name(self.directory, 'lib'))],
)
test_target = self.format_project(Target.junit_tests,
name=self.gen_context.infer_target_name(self.directory, 'test'),
sources="rglobs('*Test.java')",
**common_args
)
test_target += self.format_project(Target.junit_tests,
name=self.gen_context.infer_target_name(self.directory, 'integration-tests'),
sources="rglobs('*IT.java')",
tags=['integration'],
**common_args
)
return test_target + super(TestJavaLibraryComponent,
self).generate_subdirectory_code()
def generate_project_dependency_code(self):
return | |
#!/usr/bin/env python
import os
import re
import sys
import logging
from collections import namedtuple, OrderedDict, Iterable
from functools import partial
if sys.version_info.major >= 3:
from inspect import signature
else:
from funcsigs import signature
logger = logging.getLogger('map_folder_structure')
# =========================================================================== #
# Define structures
# =========================================================================== #
Struc = namedtuple('folder', ['name', 'flags', 'mapping', 'subfolders'])
Struc.__new__.__defaults__ = (None, [], {}, [])
Keyword = namedtuple('Keyword', ['name', 'value'])
Keyword.__new__.__defaults__ = (None, None)
Metadata = namedtuple('Metadata', ['name', 'value'])
Metadata.__new__.__defaults__ = (None, None)
# =========================================================================== #
# Define functions
# =========================================================================== #
def map_word_folder(list_words, list_folders):
"""
Function to map a list of words using list_folders
Parameters
----------
list_words : List[str]
List of words to map. e.g ``path.split()``
list_folders : List[Struc]
List of ``Struc`` for every folder to map
Returns
-------
list_keywords : List[Keyword]
List of keywords generated by mapping ``list_folders`` to ``list_words``
"""
# get the first word of the list
word = list_words.pop(0)
# check if the word matches one of the folders in list_folders
list_keywords = []
for i, folder in enumerate(list_folders):
# check by name
if folder.name == word:
break
# check by regex
elif re.match(folder.name, word):
break
else:
return list_keywords
# get the matching folder
folder = list_folders[i]
# append data
if isinstance(folder.flags, Iterable):
list_keywords.extend(folder.flags)
elif isinstance(folder.flags, Keyword):
list_keywords.append(folder.flags)
# append mapping
for mapping, function in folder.mapping.items():
if re.match(mapping, word):
if isinstance(function, Iterable):
for func in function:
list_keywords = apply_mapping(word, list_words, func, list_keywords)
else:
list_keywords = apply_mapping(word, list_words, function, list_keywords)
# append subfolders
if len(list_words) > 0:
list_keywords.extend(map_word_folder(list_words, folder.subfolders))
return list_keywords
def apply_mapping(word, list_words, function, list_keywords):
"""
Function to apply a mapping function on the [word, list_words]
If the ``function`` takes ``1`` argument, ``word`` is passed to the function.
If the ``function`` takes ``2`` arguments, ``word`` and ``list_words`` is passed to the function.
Parameters
----------
word : str
Word to map.
list_words : List[str]
List of words to map.
function : function
Function to apply for mapping.
list_keywords : List[Keyword]
List of keywords to append at.
Returns
-------
list_keywords : List[Keyword]
List of keywords.
Raises
------
UserWarning
If the function takes 0 or more then 2 arguments.
"""
# get the number of parameters of the function
if type(function) is partial:
sig = signature(function.func)
num_args = len(sig.parameters)
num_args -= len(function.args) + len(function.keywords)
else:
sig = signature(function)
num_args = len(sig.parameters)
# pass parameter to the functions
if num_args == 1:
list_keywords.extend(function(word))
elif num_args == 2:
list_keywords.extend(function(word, list_words))
else:
raise UserWarning('Not implemented a function with a signature of\n{}'.format(sig))
return list_keywords
def dict_mapping(word, list_words, adict):
"""
Function to map a dictionary of keywords.
dictionary with:
key = pattern
value = Keyword or list(Keywords)
Parameters
----------
word : str
Word to map.
list_words : List[str]
List of words to map.
adict : dict
Dictionary with `key = pattern` and `value = Keyword or list(Keywords)`
Returns
-------
list_keywords : List[Keyword]
List of keywords.
Raises
------
UserWarning
if value in dict is not a ``Keyword`` or a list of ``Keyword``.
"""
list_keywords = []
for w in [word] + list_words:
for k, v in adict.items():
if re.match(k, w):
if type(v) is Keyword:
list_keywords.append(v)
elif isinstance(v, Iterable) and all([i is Keyword for i in v]):
list_keywords.extend(v)
else:
raise UserWarning('value in dict is not a Keyword or a list of Keywords\nk: {}\nv: {}')
return list_keywords
def map_mapping(word, list_words, adict):
"""
Function to map a dictionary of functions.
dictionary with:
key = pattern
value = function or list(function)
Parameters
----------
word : str
Word to map.
list_words : List[str]
List of words to map.
adict : dict
Dictionary with `key = pattern` and `value = function or list(function)`
Returns
-------
list_keywords : List[Keyword]
List of keywords.
"""
list_keywords = []
for w in [word] + list_words:
for k, function in adict.items():
if re.match(k, w):
if isinstance(function, Iterable):
for func in function:
list_keywords = apply_mapping(w, [], func, list_keywords)
else:
list_keywords = apply_mapping(w, [], function, list_keywords)
return list_keywords
def dict_mapping_with_condition(word, list_words, adict):
"""
Function to map a dictionary of ``(condition, keywords)``.
Only maps the keyword if the condition is matched.
dictionary with:
key = pattern
value = ``(condition, Keyword)`` or (condition, List(Keyword))
Parameters
----------
word : str
Word to map.
list_words : List[str]
List of words to map.
adict : dict
Dictionary with `key = pattern` and `value = (condition, Keyword) or (condition, List(Keyword))`
Returns
-------
list_keywords : List[Keyword]
List of keywords.
Raises
------
UserWarning
if value in dict is not a ``Keyword`` or a list of ``Keyword``.
"""
list_keywords = []
for w in [word] + list_words:
for k, (condition, v) in adict.items():
if re.match(k, w) and any(bool(re.match(w1, condition)) for w1 in [word] + list_words):
if type(v) is Keyword:
list_keywords.append(v)
elif isinstance(v, Iterable) and all([i is Keyword for i in v]):
list_keywords.extend(v)
else:
raise UserWarning('value in dict is not a Keyword or a list of Keywords\nk: {}\nv: {}')
return list_keywords
def general_mapping(word, list_words):
"""
Wrapper function to map ``list_general_maps``.
Parameters
----------
word : str
Word to map.
list_words : List[str]
List of words to map.
Returns
-------
list_keywords : List[Keyword]
List of keywords.
Raises
------
UserWarning
if value in dict is not a ``Keyword`` or a list of ``Keyword``.
"""
return dict_mapping(word, list_words, dict_general_mapping)
def generate_keywords(path):
"""
Function to generate the keywords for a given path
by splitting into folders and mapping them with ``list_folders``
Parameters
----------
path : str
Path to be mapped
Returns
-------
list_keywords : List[Keyword]
List of keywords.
"""
list_keywords = map_word_folder(path.split(os.path.sep), structure_folders)
dummy_dict = {}
for keyword in list_keywords:
dummy_dict[keyword.name] = keyword
# print(len(list_keywords), len(dummy_dict))
list_keywords = list(dummy_dict.values())
return list_keywords
# =========================================================================== #
# helper functions
# =========================================================================== #
def remove_version(string):
"""
Function to remove the version number.
For folders ending with ``.v[0-9]``
Parameters
----------
string : str
Folder name.
Returns
-------
folder_name : str
Folder name without version tag.
Examples
--------
Example folder
>>> remove_version('simulation.v3')
'simulation'
"""
return re.sub('\.v[0-9]*$', '', string)
def _convert_number_string(number_string):
"""
Function to convert mixed number character strings to a number.
Mapping: ``{'K': 1000, 'M': 1000000}``
Parameters
----------
number_string : str
Number string to be mapped
Returns
-------
number : int or float
Converted number, tries to return a integer if possible.
Examples
--------
>>> _convert_number_string('64K')
64000 # 64 * 1000
>>> _convert_number_string('0.2M')
200000 # 0.2 * 1000000
"""
map_number = {'K': 1000, 'M': 1000000}
pure_number = re.sub('[A-Z]', '', number_string)
rv = int(pure_number) if pure_number.isdigit() else float(pure_number)
for i in number_string:
if i.isalpha():
rv *= map_number[i]
return rv
def _in_text(adict):
"""
Wrapper to add .* around matches so pattern can be found in words.
Parameters
----------
adict : dict
Dictonary with key = `pattern`
Returns
-------
wrapped_dict : dict
Dictonary with key = `.*pattern.*`
"""
return dict((".*{}.*".format(k), v) for k, v in adict.items())
# =========================================================================== #
# maintenance functions
# =========================================================================== #
def get_keywords_values(alist):
"""
Function to get the keywords + values.
Parameters
----------
alist : List[Struc]
``list_folders``
Returns
-------
list_keywords : List[Tuple]
list of tuples with keyword.name and all found keyword values.
Examples
--------
Show the possible keyword + values
>>> get_keywords_values(structure_folders)
"""
list_keywords = []
# get Keywords
for folder in alist:
list_keywords.extend([(k.name, [k.value]) for k in folder.flags])
if len(folder.subfolders) > 0:
list_keywords.extend(get_keywords_values(folder.subfolders))
return list_keywords
def get_possible_keywords():
"""
Function to get the list of possible keywords.
Returns
-------
list_keywords : List[str]
sorted, unique list of keyword names.
Examples
--------
Show the unique list of possible keyword.
>>> get_possible_keywords()
"""
return sorted(list(set([k for k, v in get_keywords_values(structure_folders)])))
def get_possible_combinations():
"""
Function to get the possible combinations of keywords and values.
Returns
-------
possible_combinations : OrderedDict
Ordered dictionary of the keyword.name and possible values
Examples
--------
get possible combinations of keywords and values.
>>> get_possible_combinations()
"""
# condense
adict = dict()
for k, v in get_keywords_values(structure_folders):
if k in adict:
adict[k] += v
else:
adict[k] = v
# return adict
rv = OrderedDict([(k, sorted(list(set(adict[k])))) for k in sorted(adict.keys())])
return rv
def show_possible_combinations():
"""
Show the possible combinations of keywords and values.
Returns
-------
None
Examples
--------
Show possible combinations of keywords and values.
>>> show_possible_combinations()
"""
for k, v in get_possible_combinations().items():
print("'{}'-> {}".format(k, v) if len(v) != 0 else "'{}'".format(k))
# =========================================================================== #
# Mapping
# =========================================================================== #
# | |
<filename>scopy/ScoRepresent/pubchem.py
# -*- coding: utf-8 -*-
#Created on Wed Jul 17 10:15:43 2019
#
#@Author: <NAME>, <NAME>
#@Institution: CBDD Group, Xiangya School of Pharmaceutical Science, CSU, China
#@Homepage: http://www.scbdd.com
#@Mail: <EMAIL>; <EMAIL>
#@Blog: https://blog.moyule.me
import os
from rdkit import Chem
from rdkit import DataStructs
from .. import ScoConfig
# This module is derived from our previous work
# these are SMARTS patterns corresponding to the PubChem fingerprints
# https://astro.temple.edu/~tua87106/list_fingerprints.pdf
# ftp://ftp.ncbi.nlm.nih.gov/pubchem/specifications/pubchem_fingerprints.txt
class PubChem(object):
""" This module is derived from our previous work.
These are SMARTS patterns corresponding to the PubChem fingerprints:
(1) https://astro.temple.edu/~tua87106/list_fingerprints.pdf
(2) ftp://ftp.ncbi.nlm.nih.gov/pubchem/specifications/pubchem_fingerprints.txt
"""
def __init__(self):
"""Initialization
"""
with open(os.path.join(ScoConfig.PubChemDir, 'pubchem.txt')) as f_obj:
self.smartsPatts = eval(f_obj.read())
f_obj.close()
self.PubchemKeys = None
def InitKeys(self, keyList, keyDict):
""" *Internal Use Only*
generates SMARTS patterns for the keys, run once
"""
assert len(keyList) == len(keyDict.keys()), 'length mismatch'
for key in keyDict.keys():
patt, count = keyDict[key]
if patt != '?':
sma = Chem.MolFromSmarts(patt)
if not sma:
print('SMARTS parser error for key #%d: %s' % (key, patt))
else:
keyList[key - 1] = sma, count
def calcPubChemFingerPart1(self, mol, **kwargs):
"""Calculate PubChem Fingerprints (1-115; 263-881)
:param mol: molecule
:type mol: rdkit.Chem.rdchem.Mol
:return: fingerprint
:rtype: rdkit.DataStructs.cDataStructs.SparseBitVect
"""
# global PubchemKeys
if self.PubchemKeys is None:
self.PubchemKeys = [(None, 0)] * len(self.smartsPatts.keys())
self.InitKeys(self.PubchemKeys, self.smartsPatts)
ctor = kwargs.get('ctor', DataStructs.SparseBitVect)
res = ctor(len(self.PubchemKeys) + 1)
for i, (patt, count) in enumerate(self.PubchemKeys):
if patt is not None:
if count == 0:
res[i + 1] = mol.HasSubstructMatch(patt)
else:
matches = mol.GetSubstructMatches(patt)
if len(matches) > count:
res[i + 1] = 1
return res
def func_1(self,mol,bits):
""" *Internal Use Only*
Calculate PubChem Fingerprints (116-263)
"""
ringSize=[]
temp={3:0, 4:0, 5:0, 6:0, 7:0, 8:0, 9:0, 10:0}
AllRingsAtom = mol.GetRingInfo().AtomRings()
for ring in AllRingsAtom:
ringSize.append(len(ring))
for k,v in temp.items():
if len(ring) == k:
temp[k]+=1
if temp[3]>=2:
bits[0]=1;bits[7]=1
elif temp[3]==1:
bits[0]=1
else:
pass
if temp[4]>=2:
bits[14]=1;bits[21]=1
elif temp[4]==1:
bits[14]=1
else:
pass
if temp[5]>=5:
bits[28]=1;bits[35]=1;bits[42]=1;bits[49]=1;bits[56]=1
elif temp[5]==4:
bits[28]=1;bits[35]=1;bits[42]=1;bits[49]=1
elif temp[5]==3:
bits[28]=1;bits[35]=1;bits[42]=1
elif temp[5]==2:
bits[28]=1;bits[35]=1
elif temp[5]==1:
bits[28]=1
else:
pass
if temp[6]>=5:
bits[63]=1;bits[70]=1;bits[77]=1;bits[84]=1;bits[91]=1
elif temp[6]==4:
bits[63]=1;bits[70]=1;bits[77]=1;bits[84]=1
elif temp[6]==3:
bits[63]=1;bits[70]=1;bits[77]=1
elif temp[6]==2:
bits[63]=1;bits[70]=1
elif temp[6]==1:
bits[63]=1
else:
pass
if temp[7]>=2:
bits[98]=1;bits[105]=1
elif temp[7]==1:
bits[98]=1
else:
pass
if temp[8]>=2:
bits[112]=1;bits[119]=1
elif temp[8]==1:
bits[112]=1
else:
pass
if temp[9]>=1:
bits[126]=1;
else:
pass
if temp[10]>=1:
bits[133]=1;
else:
pass
return ringSize,bits
def func_2(self,mol,bits):
""" *Internal Use Only*
saturated or aromatic carbon-only ring
"""
AllRingsBond = mol.GetRingInfo().BondRings()
ringSize=[]
temp={3:0, 4:0, 5:0, 6:0, 7:0, 8:0, 9:0, 10:0}
for ring in AllRingsBond:
######### saturated
nonsingle = False
for bondIdx in ring:
if mol.GetBondWithIdx(bondIdx).GetBondType().name!='SINGLE':
nonsingle = True
break
if nonsingle == False:
ringSize.append(len(ring))
for k,v in temp.items():
if len(ring) == k:
temp[k]+=1
######## aromatic carbon-only
aromatic = True
AllCarb = True
for bondIdx in ring:
if mol.GetBondWithIdx(bondIdx).GetBondType().name!='AROMATIC':
aromatic = False
break
for bondIdx in ring:
BeginAtom = mol.GetBondWithIdx(bondIdx).GetBeginAtom()
EndAtom = mol.GetBondWithIdx(bondIdx).GetEndAtom()
if BeginAtom.GetAtomicNum() != 6 or EndAtom.GetAtomicNum() != 6:
AllCarb = False
break
if aromatic == True and AllCarb == True:
ringSize.append(len(ring))
for k,v in temp.items():
if len(ring) == k:
temp[k]+=1
if temp[3]>=2:
bits[1]=1;bits[8]=1
elif temp[3]==1:
bits[1]=1
else:
pass
if temp[4]>=2:
bits[15]=1;bits[22]=1
elif temp[4]==1:
bits[15]=1
else:
pass
if temp[5]>=5:
bits[29]=1;bits[36]=1;bits[43]=1;bits[50]=1;bits[57]=1
elif temp[5]==4:
bits[29]=1;bits[36]=1;bits[43]=1;bits[50]=1
elif temp[5]==3:
bits[29]=1;bits[36]=1;bits[43]=1
elif temp[5]==2:
bits[29]=1;bits[36]=1
elif temp[5]==1:
bits[29]=1
else:
pass
if temp[6]>=5:
bits[64]=1;bits[71]=1;bits[78]=1;bits[85]=1;bits[92]=1
elif temp[6]==4:
bits[64]=1;bits[71]=1;bits[78]=1;bits[85]=1
elif temp[6]==3:
bits[64]=1;bits[71]=1;bits[78]=1
elif temp[6]==2:
bits[64]=1;bits[71]=1
elif temp[6]==1:
bits[64]=1
else:
pass
if temp[7]>=2:
bits[99]=1;bits[106]=1
elif temp[7]==1:
bits[99]=1
else:
pass
if temp[8]>=2:
bits[113]=1;bits[120]=1
elif temp[8]==1:
bits[113]=1
else:
pass
if temp[9]>=1:
bits[127]=1;
else:
pass
if temp[10]>=1:
bits[134]=1;
else:
pass
return ringSize, bits
def func_3(self,mol,bits):
""" *Internal Use Only*
saturated or aromatic nitrogen-containing
"""
AllRingsBond = mol.GetRingInfo().BondRings()
ringSize=[]
temp={3:0, 4:0, 5:0, 6:0, 7:0, 8:0, 9:0, 10:0}
for ring in AllRingsBond:
######### saturated
nonsingle = False
for bondIdx in ring:
if mol.GetBondWithIdx(bondIdx).GetBondType().name!='SINGLE':
nonsingle = True
break
if nonsingle == False:
ringSize.append(len(ring))
for k,v in temp.items():
if len(ring) == k:
temp[k]+=1
######## aromatic nitrogen-containing
aromatic = True
ContainNitro = False
for bondIdx in ring:
if mol.GetBondWithIdx(bondIdx).GetBondType().name!='AROMATIC':
aromatic = False
break
for bondIdx in ring:
BeginAtom = mol.GetBondWithIdx(bondIdx).GetBeginAtom()
EndAtom = mol.GetBondWithIdx(bondIdx).GetEndAtom()
if BeginAtom.GetAtomicNum() == 7 or EndAtom.GetAtomicNum() == 7:
ContainNitro = True
break
if aromatic == True and ContainNitro == True:
ringSize.append(len(ring))
for k,v in temp.items():
if len(ring) == k:
temp[k]+=1
if temp[3]>=2:
bits[2]=1;bits[9]=1
elif temp[3]==1:
bits[2]=1
else:
pass
if temp[4]>=2:
bits[16]=1;bits[23]=1
elif temp[4]==1:
bits[16]=1
else:
pass
if temp[5]>=5:
bits[30]=1;bits[37]=1;bits[44]=1;bits[51]=1;bits[58]=1
elif temp[5]==4:
bits[30]=1;bits[37]=1;bits[44]=1;bits[51]=1
elif temp[5]==3:
bits[30]=1;bits[37]=1;bits[44]=1
elif temp[5]==2:
bits[30]=1;bits[37]=1
elif temp[5]==1:
bits[30]=1
else:
pass
if temp[6]>=5:
bits[65]=1;bits[72]=1;bits[79]=1;bits[86]=1;bits[93]=1
elif temp[6]==4:
bits[65]=1;bits[72]=1;bits[79]=1;bits[86]=1
elif temp[6]==3:
bits[65]=1;bits[72]=1;bits[79]=1
elif temp[6]==2:
bits[65]=1;bits[72]=1
elif temp[6]==1:
bits[65]=1
else:
pass
if temp[7]>=2:
bits[100]=1;bits[107]=1
elif temp[7]==1:
bits[100]=1
else:
pass
if temp[8]>=2:
bits[114]=1;bits[121]=1
elif temp[8]==1:
bits[114]=1
else:
pass
if temp[9]>=1:
bits[128]=1;
else:
pass
if temp[10]>=1:
bits[135]=1;
else:
pass
return ringSize, bits
def func_4(self,mol,bits):
""" *Internal Use Only*
saturated or aromatic heteroatom-containing
"""
AllRingsBond = mol.GetRingInfo().BondRings()
ringSize=[]
temp={3:0, 4:0, 5:0, 6:0, 7:0, 8:0, 9:0, 10:0}
for ring in AllRingsBond:
######### saturated
nonsingle = False
for bondIdx in ring:
if mol.GetBondWithIdx(bondIdx).GetBondType().name!='SINGLE':
nonsingle = True
break
if nonsingle == False:
ringSize.append(len(ring))
for k,v in temp.items():
if len(ring) == k:
temp[k]+=1
######## aromatic heteroatom-containing
aromatic = True
heteroatom = False
for bondIdx in ring:
if mol.GetBondWithIdx(bondIdx).GetBondType().name!='AROMATIC':
aromatic = False
break
for bondIdx in ring:
BeginAtom = mol.GetBondWithIdx(bondIdx).GetBeginAtom()
EndAtom = mol.GetBondWithIdx(bondIdx).GetEndAtom()
if BeginAtom.GetAtomicNum() not in [1,6] or EndAtom.GetAtomicNum() not in [1,6]:
heteroatom = True
break
if aromatic == True and heteroatom == True:
ringSize.append(len(ring))
for k,v in temp.items():
if len(ring) == k:
temp[k]+=1
if temp[3]>=2:
bits[3]=1;bits[10]=1
elif temp[3]==1:
bits[3]=1
else:
pass
if temp[4]>=2:
bits[17]=1;bits[24]=1
elif temp[4]==1:
bits[17]=1
else:
pass
if temp[5]>=5:
bits[31]=1;bits[38]=1;bits[45]=1;bits[52]=1;bits[59]=1
elif temp[5]==4:
bits[31]=1;bits[38]=1;bits[45]=1;bits[52]=1
elif temp[5]==3:
bits[31]=1;bits[38]=1;bits[45]=1
elif temp[5]==2:
bits[31]=1;bits[38]=1
elif temp[5]==1:
bits[31]=1
else:
pass
if temp[6]>=5:
bits[66]=1;bits[73]=1;bits[80]=1;bits[87]=1;bits[94]=1
elif temp[6]==4:
bits[66]=1;bits[73]=1;bits[80]=1;bits[87]=1
elif temp[6]==3:
bits[66]=1;bits[73]=1;bits[80]=1
elif temp[6]==2:
bits[66]=1;bits[73]=1
elif temp[6]==1:
bits[66]=1
else:
pass
if temp[7]>=2:
bits[101]=1;bits[108]=1
elif temp[7]==1:
bits[101]=1
else:
pass
if temp[8]>=2:
bits[115]=1;bits[122]=1
elif temp[8]==1:
bits[115]=1
else:
pass
if temp[9]>=1:
bits[129]=1;
else:
pass
if temp[10]>=1:
bits[136]=1;
else:
pass
return ringSize,bits
def func_5(self,mol,bits):
""" *Internal Use Only*
unsaturated non-aromatic carbon-only
"""
ringSize=[]
AllRingsBond = mol.GetRingInfo().BondRings()
temp={3:0, 4:0, 5:0, 6:0, 7:0, 8:0, 9:0, 10:0}
for ring in AllRingsBond:
unsaturated = False
nonaromatic = True
Allcarb = True
######### unsaturated
for bondIdx in ring:
if mol.GetBondWithIdx(bondIdx).GetBondType().name!='SINGLE':
unsaturated = True
break
######## non-aromatic
for bondIdx in ring:
if mol.GetBondWithIdx(bondIdx).GetBondType().name=='AROMATIC':
nonaromatic = False
break
######## allcarb
for bondIdx in ring:
BeginAtom = mol.GetBondWithIdx(bondIdx).GetBeginAtom()
EndAtom = mol.GetBondWithIdx(bondIdx).GetEndAtom()
if BeginAtom.GetAtomicNum() != 6 or EndAtom.GetAtomicNum() != 6:
Allcarb = False
break
if unsaturated == True and nonaromatic == True and Allcarb == True:
ringSize.append(len(ring))
for k,v in temp.items():
if len(ring) == k:
temp[k]+=1
if temp[3]>=2:
bits[4]=1;bits[11]=1
elif temp[3]==1:
bits[4]=1
else:
pass
if temp[4]>=2:
bits[18]=1;bits[25]=1
elif temp[4]==1:
bits[18]=1
else:
pass
if temp[5]>=5:
bits[32]=1;bits[39]=1;bits[46]=1;bits[53]=1;bits[60]=1
elif temp[5]==4:
bits[32]=1;bits[39]=1;bits[46]=1;bits[53]=1
elif temp[5]==3:
bits[32]=1;bits[39]=1;bits[46]=1
elif temp[5]==2:
bits[32]=1;bits[39]=1
elif temp[5]==1:
bits[32]=1
else:
pass
if temp[6]>=5:
bits[67]=1;bits[74]=1;bits[81]=1;bits[88]=1;bits[95]=1
elif temp[6]==4:
bits[67]=1;bits[74]=1;bits[81]=1;bits[88]=1
elif temp[6]==3:
bits[67]=1;bits[74]=1;bits[81]=1
elif temp[6]==2:
bits[67]=1;bits[74]=1
elif temp[6]==1:
bits[67]=1
else:
pass
if temp[7]>=2:
bits[102]=1;bits[109]=1
elif temp[7]==1:
bits[102]=1
else:
pass
if temp[8]>=2:
bits[116]=1;bits[123]=1
elif temp[8]==1:
bits[116]=1
else:
pass
if temp[9]>=1:
bits[130]=1;
else:
pass
if temp[10]>=1:
bits[137]=1;
else:
pass
return ringSize,bits
def func_6(self,mol,bits):
""" *Internal Use Only*
unsaturated non-aromatic nitrogen-containing
"""
ringSize=[]
AllRingsBond = mol.GetRingInfo().BondRings()
temp={3:0, 4:0, 5:0, 6:0, 7:0, 8:0, 9:0, 10:0}
for ring in AllRingsBond:
unsaturated = False
nonaromatic = True
ContainNitro = False
######### unsaturated
for bondIdx in ring:
if mol.GetBondWithIdx(bondIdx).GetBondType().name!='SINGLE':
unsaturated = True
break
######## non-aromatic
for bondIdx in ring:
if mol.GetBondWithIdx(bondIdx).GetBondType().name=='AROMATIC':
nonaromatic = False
break
######## nitrogen-containing
for bondIdx in ring:
BeginAtom = mol.GetBondWithIdx(bondIdx).GetBeginAtom()
EndAtom = mol.GetBondWithIdx(bondIdx).GetEndAtom()
if BeginAtom.GetAtomicNum() == 7 or EndAtom.GetAtomicNum() == 7:
ContainNitro = True
break
if unsaturated == True and nonaromatic == True and ContainNitro== True:
ringSize.append(len(ring))
for k,v in temp.items():
if len(ring) == k:
temp[k]+=1
if temp[3]>=2:
bits[5]=1;bits[12]=1
elif temp[3]==1:
bits[5]=1
else:
pass
if temp[4]>=2:
bits[19]=1;bits[26]=1
elif temp[4]==1:
bits[19]=1
else:
pass
if temp[5]>=5:
bits[33]=1;bits[40]=1;bits[47]=1;bits[54]=1;bits[61]=1
elif temp[5]==4:
bits[33]=1;bits[40]=1;bits[47]=1;bits[54]=1
elif temp[5]==3:
| |
# -*- coding: utf-8 -*-
"""
@file
@brief Quelques questions d'ordre général autour du langage Python.
"""
import os
import io
import re
def entier_grande_taille():
"""
.. faqref::
:tag: python
:title: Quel est l'entier le plus grand ?
La version 3 du langage Python a supprimé la constante ``sys.maxint``
qui définissait l'entier le plus grand (voir
`What's New In Python 3.0 <https://docs.python.org/3.1/whatsnew/3.0.html#integers>`_).
De ce fait la fonction `getrandbit <https://docs.python.org/3/library/random.html#random.getrandbits>`_
retourne un entier aussi grand que l'on veut.
::
import random,sys
x = random.getrandbits(2048)
print(type(x),x)
Qui affiche ::
<class 'int'> 2882159224557107513165483098383814837021447484558010147211921304219017212673656549681269862792029...
Les calculs en nombre réels se font toujours avec huit octets de précision.
Au delà, il faut utiliser la librairie `gmpy2 <http://gmpy2.readthedocs.org/en/latest/>`_.
Il est également recommandé d'utiliser cette librairie pour les grands nombres entiers
(entre 20 et 40 chiffres). La librairie est plus rapide que l'implémentation
du langage Python (voir `Overview of gmpy2 <https://gmpy2.readthedocs.org/en/latest/overview.html>`_).
.. faqref::
:tag: python
:title: Tabulations ou espace ?
Il est préférable de ne pas utiliser les tabulations et de les remplacer par des espaces.
Lorsqu'on passe d'un Editeur à un autre, les espaces ne bougent pas. Les tabulations sont plus ou moins grandes visuellement.
L'essentiel est de ne pas mélanger.
Dans `SciTE <http://www.scintilla.org/SciTE.html>`_, il faut aller dans le menu Options / Change Indentation Settings...
Tous les éditeurs ont une option similaire.
"""
pass
def difference_div():
"""
.. faqref::
:tag: python
:title: Quelle est la différence entre / et // - division ?
Le résultat de la division avec l'opérateur ``/`` est toujours réel :
la division de deux entiers ``1/2`` donne ``0.5``.
Le résultat de la division avec l'opérateur ``//`` est toujours entier.
Il correspond au quotient de la division.
.. runpython::
:showcode:
div1 = 1/2
div2 = 4/2
div3 = 1//2
div4 = 1.0//2.0
print(div1, div2, div3, div4) # affiche (0.5, 2.0, 0, 0)
Le reste d'une division entière est obtenue avec l'opérateur ``%``.
.. runpython::
:showcode:
print( 5 % 2 ) # affiche 1
C'est uniquement vrai pour les version Python 3.x.
Pour les versions 2.x, les opérateurs ``/`` et ``//`` avaient des comportements différents
(voir `What’s New In Python 3.0 <https://docs.python.org/3/whatsnew/3.0.html#integers>`_).
"""
div1 = 1 / 2
div2 = 4 / 2
div3 = 1 // 2
div4 = 1.0 // 2.0
return div1, div2, div3, div4
def python_path():
"""
.. faqref::
:tag: module
:title: Comment éviter sys.path.append... quand on développe un module ?
Lorsqu'on développe un module,
on ne veut pas l'installer. On ne veut pas qu'il soit présent dans le répertoire ``site-packages`` de la distribution
de Python car cela introduit deux versions : celle qu'on développe et celle qu'on a installer.
Avant, je faisais cela pour créer un petit programme utilisant mon propre module
(et on en trouve quelque trace dans mon code) :
::
import sys
sys.path.append("c:/moncode/monmodule/src")
import monmodule
Quand je récupère un programme utilisant ce module, il me faudrait ajouter
ces petites lignes à chaque fois et c'est barbant.
Pour éviter cela, il est possible de dire à l'interpréteur Python d'aller chercher
ailleurs pour trouver des modules en ajoutant le chemin à la
`variable d'environnement <http://fr.wikipedia.org/wiki/Variable_d'environnement>`_
`PYTHONPATH <https://docs.python.org/3/using/cmdline.html#envvar-PYTHONPATH>`_.
Sous Windows :
::
set PYTHON_PATH=%PYTHON_PATH%;c:\\moncode\\monmodule\\src
"""
return os.environ.get("PYTHON_PATH", "")
def same_variable(a, b):
"""
Cette fonction dit si les deux objets sont en fait le même objet (True)
ou non (False) s'ils sont différents (même s'ils contiennent la même information).
@param a n'importe quel objet
@param b n'importe quel objet
@return ``True`` ou ``False``
.. faqref::
:tag: python
:title: Qu'est-ce qu'un type immuable ou immutable ?
:lid: faq-py-immutable
Une variable de type *immuable* ne peut être modifiée. Cela concerne principalement :
- ``int``, ``float``, ``str``, ``tuple``
Si une variable est de type *immuable*, lorsqu'on effectue une opération,
on créé implicitement une copie de l'objet.
Les dictionnaires et les listes sont *modifiables* (ou *mutable*). Pour une variable
de ce type, lorsqu'on écrit ``a = b``, ``a`` et ``b`` désigne le même objet même
si ce sont deux noms différentes. C'est le même emplacement mémoire
accessible paur deux moyens (deux identifiants).
Par exemple ::
a = (2,3)
b = a
a += (4,5)
print( a == b ) # --> False
print(a,b) # --> (2, 3, 4, 5) (2, 3)
a = [2,3]
b = a
a += [4,5]
print( a == b ) # --> True
print(a,b) # --> [2, 3, 4, 5] [2, 3, 4, 5]
Dans le premier cas, le type (``tuple``) est _immutable_, l'opérateur ``+=`` cache implicitement une copie.
Dans le second cas, le type (``list``) est _mutable_, l'opérateur ``+=`` évite la copie
car la variable peut être modifiée. Même si ``b=a`` est exécutée avant l'instruction suivante,
elle n'a **pas** pour effet de conserver l'état de ``a`` avant l'ajout d'élément.
Un autre exemple ::
a = [1, 2]
b = a
a [0] = -1
print(a) # --> [-1, 2]
print(b) # --> [-1, 2]
Pour copier une liste, il faut expliciter la demander ::
a = [1, 2]
b = list(a)
a [0] = -1
print(a) # --> [-1, 2]
print(b) # --> [1, 2]
La page `Immutable Sequence Types <https://docs.python.org/3/library/stdtypes.html?highlight=immutable#immutable-sequence-types>`_
détaille un peu plus le type qui sont *mutable* et ceux qui sont *immutable*. Parmi les types standards :
* **mutable**
* `bool <https://docs.python.org/3/library/functions.html#bool>`_
* `int <https://docs.python.org/3/library/functions.html#int>`_,
`float <https://docs.python.org/3/library/functions.html#float>`_,
`complex <https://docs.python.org/3/library/functions.html#complex>`_
* `str <https://docs.python.org/3/library/functions.html#func-str>`_,
`bytes <https://docs.python.org/3/library/functions.html#bytes>`_
* `None <https://docs.python.org/3/library/constants.html?highlight=none#None>`_
* `tuple <https://docs.python.org/3/library/functions.html#func-tuple>`_,
`frozenset <https://docs.python.org/3/library/functions.html#func-frozenset>`_
* **immutable**, par défaut tous les autres types dont :
* `list <https://docs.python.org/3/library/functions.html#func-list>`_
* `dict <https://docs.python.org/3/library/functions.html#func-dict>`_
* `set <https://docs.python.org/3/library/functions.html#func-set>`_
* `bytearray <https://docs.python.org/3/library/functions.html#bytearray>`_
Une instance de classe est mutable. Il est possible de la rendre
immutable par quelques astuces :
* `__slots__ <https://docs.python.org/3/reference/datamodel.html?highlight=_slots__#object.__slots__>`_
* `How to Create Immutable Classes in Python
<http://www.blog.pythonlibrary.org/2014/01/17/how-to-create-immutable-classes-in-python/>`_
* `Ways to make a class immutable in Python <http://stackoverflow.com/questions/4996815/ways-to-make-a-class-immutable-in-python>`_
* `freeze <https://freeze.readthedocs.org/en/latest/>`_
Enfin, pour les objects qui s'imbriquent les uns dans les autres, une liste de listes, une classe
qui incluent des dictionnaires et des listes, on distingue une copie simple d'une copie intégrale (**deepcopy**).
Dans le cas d'une liste de listes, la copie simple recopie uniquement la première liste ::
import copy
l1 = [ [0,1], [2,3] ]
l2 = copy.copy(l1)
l1 [0][0] = '##'
print(l1,l2) # --> [['##', 1], [2, 3]] [['##', 1], [2, 3]]
l1 [0] = [10,10]
print(l1,l2) # --> [[10, 10], [2, 3]] [['##', 1], [2, 3]]
La copie intégrale recopie également les objets inclus ::
import copy
l1 = [ [0,1], [2,3] ]
l2 = copy.deepcopy(l1)
l1 [0][0] = '##'
print(l1,l2) # --> [['##', 1], [2, 3]] [[0, 1], [2, 3]]
Les deux fonctions s'appliquent à tout object Python : `module copy <https://docs.python.org/3/library/copy.html>`_.
"""
return id(a) == id(b)
def stringio(text):
"""
returns a StringIO object on a text
@param text any text
@return StringIO object
.. faqref::
:tag: python
:title: A quoi sert un ``StringIO`` ?
La plupart du temps, lorsqu'on récupère des données, elles sont sur le disque dur
de votre ordinateur dans un fichier texte. Lorsqu'on souhaite automatiser un processur
qu'on répète souvent avec ce fichier, on écrit une fonction qui prend le nom du fichier en entrée.
::
def processus_quotidien(nom_fichier) :
# on compte les lignes
nb = 0
with open(nom_fichier,"r") as f :
for line in f :
nb += 1
return nb
Et puis un jour, les données ne sont plus dans un fichier mais sur Internet.
Le plus simple dans ce cas est de recopier ces données sur disque dur et d'appeler la même fonction.
Simple. Un autre les données qu'on doit télécharger font plusieurs gigaoctets. Tout télécharger prend
du temps pour finir pour s'apercevoir qu'elles sont corrompues. On a perdu plusieurs heures pour rien.
On aurait bien voulu que la fonction ``processus_quotidien`` commence à traiter les données
dès le début du téléchargement.
Pour cela, on a inventé la notion de **stream** ou **flux** qui sert d'interface entre la fonction
qui traite les données et la source des données. Le | |
<reponame>Tehsurfer/mapclientplugins.meshgeneratorstep
"""
Created on Aug 29, 2017
@author: <NAME>
"""
import types
from threeWrapper import BlackfynnGet
from PySide import QtGui, QtCore
from functools import partial
from mapclientplugins.meshgeneratorstep.model.fiducialmarkermodel import FIDUCIAL_MARKER_LABELS
from mapclientplugins.meshgeneratorstep.view.ui_meshgeneratorwidget import Ui_MeshGeneratorWidget
from mapclientplugins.meshgeneratorstep.model.blackfynnECGgraphics import EcgGraphics
from mapclientplugins.meshgeneratorstep.model.blackfynnMesh import Blackfynn_2d_plate
from opencmiss.zinc.node import Node
from opencmiss.utils.maths import vectorops
import time
# imports added for pop up graph
import pyqtgraph as pg
import numpy as np
class MeshGeneratorWidget(QtGui.QWidget):
def __init__(self, model, parent=None):
super(MeshGeneratorWidget, self).__init__(parent)
self._ui = Ui_MeshGeneratorWidget()
self._ui.setupUi(self)
self._model = model
self._model.registerTimeValueUpdateCallback(self._updateTimeValue)
self._model.registerFrameIndexUpdateCallback(self._updateFrameIndex)
self._generator_model = model.getGeneratorModel()
self._plane_model = model.getPlaneModel()
self._fiducial_marker_model = model.getFiducialMarkerModel()
self._ui.sceneviewer_widget.setContext(model.getContext())
self._ui.sceneviewer_widget.setModel(self._plane_model)
self._model.registerSceneChangeCallback(self._sceneChanged)
self._doneCallback = None
self._populateFiducialMarkersComboBox()
self._marker_mode_active = False
self._have_images = False
self.x = 0
self.y = 0
# self._populateAnnotationTree()
meshTypeNames = self._generator_model.getAllMeshTypeNames()
for meshTypeName in meshTypeNames:
self._ui.meshType_comboBox.addItem(meshTypeName)
self._makeConnections()
self._ui.sceneviewer_widget.foundNode = False
self._ecg_graphics = model.getEcgGraphics()
self.blackfynn = BlackfynnGet()
self.data = {}
self.blackfynn.loaded = False
self.y_scaled = 0
self.pw = None
self.time = 0
self._ui.sceneviewer_widget.grid = []
def _graphicsInitialized(self):
"""
Callback for when SceneviewerWidget is initialised
Set custom scene from model
"""
sceneviewer = self._ui.sceneviewer_widget.getSceneviewer()
if sceneviewer is not None:
self._model.loadSettings()
self._refreshOptions()
scene = self._model.getScene()
self._ui.sceneviewer_widget.setScene(scene)
# self._ui.sceneviewer_widget.setSelectModeAll()
sceneviewer.setLookatParametersNonSkew([2.0, -2.0, 1.0], [0.0, 0.0, 0.0], [0.0, 0.0, 1.0])
sceneviewer.setTransparencyMode(sceneviewer.TRANSPARENCY_MODE_SLOW)
self._autoPerturbLines()
self._viewAll()
def _sceneChanged(self):
sceneviewer = self._ui.sceneviewer_widget.getSceneviewer()
if sceneviewer is not None:
if self._have_images:
self._plane_model.setSceneviewer(sceneviewer)
scene = self._model.getScene()
self._ui.sceneviewer_widget.setScene(scene)
self._autoPerturbLines()
def _sceneAnimate(self):
sceneviewer = self._ui.sceneviewer_widget.getSceneviewer()
if sceneviewer is not None:
self._model.loadSettings()
scene = self._model.getScene()
self._ui.sceneviewer_widget.setScene(scene)
self._autoPerturbLines()
self._viewAll()
def _autoPerturbLines(self):
"""
Enable scene viewer perturb lines iff solid surfaces are drawn with lines.
Call whenever lines, surfaces or translucency changes
"""
sceneviewer = self._ui.sceneviewer_widget.getSceneviewer()
if sceneviewer is not None:
sceneviewer.setPerturbLinesFlag(self._generator_model.needPerturbLines())
def _makeConnections(self):
self._ui.sceneviewer_widget.graphicsInitialized.connect(self._graphicsInitialized)
self._ui.done_button.clicked.connect(self._doneButtonClicked)
self._ui.viewAll_button.clicked.connect(self._viewAll)
self._ui.meshType_comboBox.currentIndexChanged.connect(self._meshTypeChanged)
self._ui.deleteElementsRanges_lineEdit.returnPressed.connect(self._deleteElementRangesLineEditChanged)
self._ui.deleteElementsRanges_lineEdit.editingFinished.connect(self._deleteElementRangesLineEditChanged)
self._ui.scale_lineEdit.returnPressed.connect(self._scaleLineEditChanged)
self._ui.scale_lineEdit.editingFinished.connect(self._scaleLineEditChanged)
self._ui.displayAxes_checkBox.clicked.connect(self._displayAxesClicked)
self._ui.displayElementNumbers_checkBox.clicked.connect(self._displayElementNumbersClicked)
self._ui.displayLines_checkBox.clicked.connect(self._displayLinesClicked)
self._ui.displayNodeDerivatives_checkBox.clicked.connect(self._displayNodeDerivativesClicked)
self._ui.displayNodeNumbers_checkBox.clicked.connect(self._displayNodeNumbersClicked)
self._ui.displaySurfaces_checkBox.clicked.connect(self._displaySurfacesClicked)
self._ui.displaySurfacesExterior_checkBox.clicked.connect(self._displaySurfacesExteriorClicked)
self._ui.displaySurfacesTranslucent_checkBox.clicked.connect(self._displaySurfacesTranslucentClicked)
self._ui.displaySurfacesWireframe_checkBox.clicked.connect(self._displaySurfacesWireframeClicked)
self._ui.displayXiAxes_checkBox.clicked.connect(self._displayXiAxesClicked)
self._ui.activeModel_comboBox.currentIndexChanged.connect(self._activeModelChanged)
self._ui.toImage_pushButton.clicked.connect(self._imageButtonClicked)
self._ui.displayImagePlane_checkBox.clicked.connect(self._displayImagePlaneClicked)
self._ui.fixImagePlane_checkBox.clicked.connect(self._fixImagePlaneClicked)
self._ui.timeValue_doubleSpinBox.valueChanged.connect(self._timeValueChanged)
self._ui.timePlayStop_pushButton.clicked.connect(self._timePlayStopClicked)
self._ui.frameIndex_spinBox.valueChanged.connect(self._frameIndexValueChanged)
self._ui.framesPerSecond_spinBox.valueChanged.connect(self._framesPerSecondValueChanged)
self._ui.timeLoop_checkBox.clicked.connect(self._timeLoopClicked)
self._ui.displayFiducialMarkers_checkBox.clicked.connect(self._displayFiducialMarkersClicked)
self._ui.fiducialMarker_comboBox.currentIndexChanged.connect(self._fiducialMarkerChanged)
self._ui.submitButton.clicked.connect(self._submitClicked)
self._ui.displayEEGAnimation_checkBox.clicked.connect(self._EEGAnimationClicked)
self._ui.pushButton.clicked.connect(self._exportWebGLJson)
# self._ui.treeWidgetAnnotation.itemSelectionChanged.connect(self._annotationSelectionChanged)
# self._ui.treeWidgetAnnotation.itemChanged.connect(self._annotationItemChanged)
# currently not able to loop it (will have to do later
self._ui.LG3.clicked.connect(self._lg3)
self._ui.LG4.clicked.connect(self._lg4)
self._ui.LG10.clicked.connect(self._lg10)
self._ui.LG3.setText('')
self._ui.LG3.setStyleSheet("background-color: rgba(255, 255, 255, 0);")
self._ui.LG4.setText('')
self._ui.LG4.setStyleSheet("background-color: rgba(255, 255, 255, 0);")
self._ui.LG10.setText('')
self._ui.LG10.setStyleSheet("background-color: rgba(255, 255, 255, 0);")
def _fiducialMarkerChanged(self):
self._fiducial_marker_model.setActiveMarker(self._ui.fiducialMarker_comboBox.currentText())
def _displayFiducialMarkersClicked(self):
self._fiducial_marker_model.setDisplayFiducialMarkers(self._ui.displayFiducialMarkers_checkBox.isChecked())
def _populateFiducialMarkersComboBox(self):
self._ui.fiducialMarker_comboBox.addItems(FIDUCIAL_MARKER_LABELS)
def _createFMAItem(self, parent, text, fma_id):
item = QtGui.QTreeWidgetItem(parent)
item.setText(0, text)
item.setData(0, QtCore.Qt.UserRole + 1, fma_id)
item.setCheckState(0, QtCore.Qt.Unchecked)
item.setFlags(QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsTristate)
return item
def _populateAnnotationTree(self):
tree = self._ui.treeWidgetAnnotation
tree.clear()
rsh_item = self._createFMAItem(tree, 'right side of heart', 'FMA_7165')
self._createFMAItem(rsh_item, 'ventricle', 'FMA_7098')
self._createFMAItem(rsh_item, 'atrium', 'FMA_7096')
self._createFMAItem(rsh_item, 'auricle', 'FMA_7218')
lsh_item = self._createFMAItem(tree, 'left side of heart', 'FMA_7166')
self._createFMAItem(lsh_item, 'ventricle', 'FMA_7101')
self._createFMAItem(lsh_item, 'atrium', 'FMA_7097')
self._createFMAItem(lsh_item, 'auricle', 'FMA_7219')
apex_item = self._createFMAItem(tree, 'apex of heart', 'FMA_7164')
vortex_item = self._createFMAItem(tree, 'vortex of heart', 'FMA_84628')
self._ui.treeWidgetAnnotation.addTopLevelItem(rsh_item)
self._ui.treeWidgetAnnotation.addTopLevelItem(lsh_item)
self._ui.treeWidgetAnnotation.addTopLevelItem(apex_item)
self._ui.treeWidgetAnnotation.addTopLevelItem(vortex_item)
def getModel(self):
return self._model
def registerDoneExecution(self, doneCallback):
self._doneCallback = doneCallback
def _updateUi(self):
if self._have_images:
frame_count = self._plane_model.getFrameCount()
self._ui.numFramesValue_label.setText("{0}".format(frame_count))
self._ui.frameIndex_spinBox.setMaximum(frame_count)
self._ui.timeValue_doubleSpinBox.setMaximum(frame_count / self._model.getFramesPerSecond())
else:
self._generator_model.disableAlignment()
self._plane_model.disableAlignment()
self._ui.alignment_groupBox.setVisible(False)
self._ui.fiducialMarkers_groupBox.setVisible(False)
self._ui.video_groupBox.setVisible(False)
self._ui.displayImagePlane_checkBox.setVisible(False)
self._ui.displayFiducialMarkers_checkBox.setVisible(False)
def setImageInfo(self, image_info):
self._plane_model.setImageInfo(image_info)
self._have_images = image_info is not None
self._updateUi()
def _doneButtonClicked(self):
self._ui.dockWidget.setFloating(False)
self._model.done()
self._model = None
self._doneCallback()
def _imageButtonClicked(self):
sceneviewer = self._ui.sceneviewer_widget.getSceneviewer()
normal, up, offset = self._plane_model.getPlaneInfo()
_, current_lookat_pos = sceneviewer.getLookatPosition()
_, current_eye_pos = sceneviewer.getEyePosition()
view_distance = vectorops.magnitude(vectorops.sub(current_eye_pos, current_lookat_pos))
eye_pos = vectorops.add(vectorops.mult(normal, view_distance), offset)
lookat_pos = offset
sceneviewer.setLookatParametersNonSkew(eye_pos, lookat_pos, up)
def _updateTimeValue(self, value):
self._ui.timeValue_doubleSpinBox.blockSignals(True)
frame_count = self._plane_model.getFrameCount()
max_time_value = frame_count / self._ui.framesPerSecond_spinBox.value()
self.time = self._model._current_time
if value > max_time_value:
self._ui.timeValue_doubleSpinBox.setValue(max_time_value)
self._timePlayStopClicked()
else:
self._ui.timeValue_doubleSpinBox.setValue(value)
if self.pw is not None:
self.line.setValue(round(value, 3)) # adjust time marker
if self._ui.displayEEGAnimation_checkBox.isChecked() and self.data is not False:
pass
# use model to update colours
#self.updateAllNodes(value)
#self.updatePlate(value)
self._ui.timeValue_doubleSpinBox.blockSignals(False)
def updateAllNodes(self, time):
colours_at_current_time = []
for key in self.data['scaled']:
colours_at_current_time.append(self.data['scaled'][key][self.currentFrame(time)])
self._ecg_graphics.updateEEGnodeColours(colours_at_current_time)
def updatePlate(self, time):
colours_at_current_time = []
for key in self.data['scaled']:
colours_at_current_time.append(self.data['scaled'][key][self.currentFrame(time)])
self.updatePlateColoursTemporary(colours_at_current_time)
def updatePlateColoursTemporary(self, values):
reg = self._generator_model._region.findChildByName('ecg_plane')
fm = reg.getFieldmodule()
fm.beginChange()
cache = fm.createFieldcache()
colour = fm.findFieldByName('colour2')
colour = colour.castFiniteElement()
nodeset = fm.findNodesetByName('nodes')
for i in range(10000, 10064):
node = nodeset.findNodeByIdentifier(i)
cache.setNode(node)
colour.setNodeParameters(cache, -1, Node.VALUE_LABEL_VALUE, 1, values[(i % (len(values)-1))])
fm.endChange()
def scaleCacheData(self):
tempDict = {}
for i, key in enumerate(self.data['cache']):
tempDict[str(i)] = self.scaleData(key)
self.data['scaled'] = tempDict
def scaleData(self, key):
numFrames = self._plane_model.getFrameCount()
y = np.array(self.data['cache'][key])
x = np.linspace(0, 16, len(y))
xterp = np.linspace(0, 16, numFrames)
yterp = np.interp(xterp, x, y)
return yterp
def initialiseSpectrum(self, data):
maximum = -1000000
minimum = 1000000
for key in data['cache']:
array_max = max(data['cache'][key])
array_min = min(data['cache'][key])
maximum = max(array_max, maximum)
minimum = min(array_min, minimum)
scene = self._generator_model._region.findChildByName('ecg_plane').getScene()
specMod = scene.getSpectrummodule()
spectrum = specMod.findSpectrumByName('eegColourSpectrum2')
spectrum_component = spectrum.getFirstSpectrumcomponent()
spectrum_component.setRangeMaximum(maximum)
spectrum_component.setRangeMinimum(minimum)
def _EEGAnimationClicked(self):
if self.data and self._ecg_graphics.initialised is False:
self.scaleCacheData()
self._ecg_graphics.setRegion(self._generator_model._region)
# create our ecg graphics if we have defined a box
if len(self._ui.sceneviewer_widget.grid) >= 4:
self._ecg_graphics.plane_normal = self._ui.sceneviewer_widget.plane_normal
self._ecg_graphics.createGraphics(new_points=True,
point1=self._ui.sceneviewer_widget.grid[0],
point2=self._ui.sceneviewer_widget.grid[1],
point3=self._ui.sceneviewer_widget.grid[2],
point4=self._ui.sceneviewer_widget.grid[3])
self._ui.sceneviewer_widget.grid = []
else:
if self._ecg_graphics.settingsLoaded:
self._ecg_graphics.createGraphics()
else:
self._ecg_graphics.createGraphics(new_points=True)
self._ecg_graphics.initialiseSpectrum(self.data)
self._ecg_graphics.initialised = True
else:
self._ecg_graphics.clearAll()
self._ecg_graphics.__init__()
self._ecg_graphics.initialised = False
def currentFrame(self, value):
frame_count = self._plane_model.getFrameCount()
frame_vals = np.linspace(0, 16, frame_count)
currentFrame = (np.abs(frame_vals - value)).argmin()
return currentFrame
def find_nearest(array, value):
# fin_nearets() Find the index of the nearest value in an array
idx = np.searchsorted(array, value, side="left")
if idx > 0 and (idx == len(array) or math.fabs(value - array[idx - 1]) < math.fabs(value - array[idx])):
return array[idx - 1]
else:
return array[idx]
def _updateFrameIndex(self, value):
self._ui.frameIndex_spinBox.blockSignals(True)
self._ui.frameIndex_spinBox.setValue(value)
self._ui.frameIndex_spinBox.blockSignals(False)
def _timeValueChanged(self, value):
self._model.setTimeValue(value)
def _timeDurationChanged(self, value):
self._model.setTimeDuration(value)
def _timePlayStopClicked(self):
play_text = 'Play'
stop_text = 'Stop'
current_text = self._ui.timePlayStop_pushButton.text()
if current_text == play_text:
self._ui.timePlayStop_pushButton.setText(stop_text)
self._model.play()
else:
self._ui.timePlayStop_pushButton.setText(play_text)
self._model.stop()
def _timeLoopClicked(self):
self._model.setTimeLoop(self._ui.timeLoop_checkBox.isChecked())
def _frameIndexValueChanged(self, value):
self._model.setFrameIndex(value)
def _framesPerSecondValueChanged(self, value):
self._model.setFramesPerSecond(value)
self._ui.timeValue_doubleSpinBox.setMaximum(self._plane_model.getFrameCount()/value)
def _fixImagePlaneClicked(self):
self._plane_model.setImagePlaneFixed(self._ui.fixImagePlane_checkBox.isChecked())
def _submitClicked(self):
# submitClicked initialises all the blackfynn functionality and updates login fields.
if self._ui.api_key.displayText() != 'API Key' and self._ui.api_secret.text() != '***************************':
self.pw = pg.plot(title='Blackfynn electrode graph',
labels={'left': f'EEG value of node', 'bottom': 'time in seconds'})
self._ui.Login_groupBox.setTitle(QtGui.QApplication.translate("MeshGeneratorWidget", "Login details saved, click on a node to open graphs", None,
QtGui.QApplication.UnicodeUTF8))
self.initialiseBlackfynnData()
self._ui.api_secret.setText('***************************')
self.blackfynn.loaded = True
def initialiseBlackfynnData(self):
# self.blackfynn.api_key = self._ui.api_key.text() <- commented so that we do not have to enter key each time
# self.blackfynn.api_secret = self._ui.api_secret.text()
self.blackfynn.set_api_key_login()
self.blackfynn.set_params(channels='LG4', window_from_start=16) # need to add dataset selection
self.data = self.blackfynn.get()
self.updatePlot(4)
self.scaleCacheData()
self.initialiseSpectrum(self.data)
def updatePlot(self, key):
try:
self.data['cache'][f'LG{key}']
except KeyError:
print('ERROR: selected data could not be found')
self.pw.plot(title='Error in data collection')
return
self.pw.clear()
self.pw.plot(self.data['x'],
self.data['cache'][f'LG{key}'],
pen='b',
title=f'EEG values from {key} LG{key}',
)
self.pw.setTitle(f'EEG values from {key} (LG{key})')
self.line = self.pw.addLine(x=self.time,
pen='r') # show current time
# For linking each EEG node
def _lg3(self):
self.updatePlot(3)
def _lg4(self):
self.updatePlot(4)
def _lg10(self):
self.updatePlot(10)
def EEGSelectionDisplay(self, key):
# For selecting EEG (brain) points
print(f'key {key} clicked!')
if self.data:
self.pw.clear()
self.pw.plot(self.data['x'], self.data['cache'][f'LG{key}'], pen='b',
title=f'EEG values from {key} (LG{key})',
labels={'left': f'EEG value of node LG{key}', 'bottom': 'time in seconds'})
self.line = self.pw.addLine(x=self.time, pen='r') # show current time
def _displayImagePlaneClicked(self):
self._plane_model.setImagePlaneVisible(self._ui.displayImagePlane_checkBox.isChecked())
def _activeModelChanged(self, index):
if index == 0:
self._ui.sceneviewer_widget.setModel(self._plane_model)
else:
self._ui.sceneviewer_widget.setModel(self._generator_model)
def _meshTypeChanged(self, index):
meshTypeName = self._ui.meshType_comboBox.itemText(index)
self._generator_model.setMeshTypeByName(meshTypeName)
self._refreshMeshTypeOptions()
#self._ecg_graphics.createGraphics()
def _meshTypeOptionCheckBoxClicked(self, checkBox):
self._generator_model.setMeshTypeOption(checkBox.objectName(), checkBox.isChecked())
def _meshTypeOptionLineEditChanged(self, lineEdit):
self._generator_model.setMeshTypeOption(lineEdit.objectName(), lineEdit.text())
finalValue = self._generator_model.getMeshTypeOption(lineEdit.objectName())
lineEdit.setText(str(finalValue))
def _refreshMeshTypeOptions(self):
layout = self._ui.meshTypeOptions_frame.layout()
# remove all current mesh type widgets
while layout.count():
child = layout.takeAt(0)
if child.widget():
child.widget().deleteLater()
optionNames = self._generator_model.getMeshTypeOrderedOptionNames()
for key in optionNames:
value = self._generator_model.getMeshTypeOption(key)
# print('key ', key, ' value ', value)
if type(value) is bool:
checkBox = QtGui.QCheckBox(self._ui.meshTypeOptions_frame)
checkBox.setObjectName(key)
checkBox.setText(key)
checkBox.setChecked(value)
callback = partial(self._meshTypeOptionCheckBoxClicked, checkBox)
checkBox.clicked.connect(callback)
layout.addWidget(checkBox)
else:
label = QtGui.QLabel(self._ui.meshTypeOptions_frame)
label.setObjectName(key)
label.setText(key)
layout.addWidget(label)
lineEdit = QtGui.QLineEdit(self._ui.meshTypeOptions_frame)
lineEdit.setObjectName(key)
lineEdit.setText(str(value))
callback = partial(self._meshTypeOptionLineEditChanged, lineEdit)
lineEdit.returnPressed.connect(callback)
lineEdit.editingFinished.connect(callback)
layout.addWidget(lineEdit)
def _refreshOptions(self):
self._ui.identifier_label_2.setText('Identifier: ' + self._model.getIdentifier())
self._ui.deleteElementsRanges_lineEdit.setText(self._generator_model.getDeleteElementsRangesText())
self._ui.scale_lineEdit.setText(self._generator_model.getScaleText())
self._ui.displayAxes_checkBox.setChecked(self._generator_model.isDisplayAxes())
self._ui.displayElementNumbers_checkBox.setChecked(self._generator_model.isDisplayElementNumbers())
self._ui.displayLines_checkBox.setChecked(self._generator_model.isDisplayLines())
self._ui.displayNodeDerivatives_checkBox.setChecked(self._generator_model.isDisplayNodeDerivatives())
self._ui.displayNodeNumbers_checkBox.setChecked(self._generator_model.isDisplayNodeNumbers())
self._ui.displaySurfaces_checkBox.setChecked(self._generator_model.isDisplaySurfaces())
self._ui.displaySurfacesExterior_checkBox.setChecked(self._generator_model.isDisplaySurfacesExterior())
self._ui.displaySurfacesTranslucent_checkBox.setChecked(self._generator_model.isDisplaySurfacesTranslucent())
self._ui.displaySurfacesWireframe_checkBox.setChecked(self._generator_model.isDisplaySurfacesWireframe())
self._ui.displayXiAxes_checkBox.setChecked(self._generator_model.isDisplayXiAxes())
self._ui.displayImagePlane_checkBox.setChecked(self._plane_model.isDisplayImagePlane())
self._ui.displayFiducialMarkers_checkBox.setChecked(self._fiducial_marker_model.isDisplayFiducialMarkers())
self._ui.fixImagePlane_checkBox.setChecked(self._plane_model.isImagePlaneFixed())
self._ui.framesPerSecond_spinBox.setValue(self._model.getFramesPerSecond())
self._ui.timeLoop_checkBox.setChecked(self._model.isTimeLoop())
index = self._ui.meshType_comboBox.findText(self._generator_model.getMeshTypeName())
self._ui.meshType_comboBox.blockSignals(True)
self._ui.meshType_comboBox.setCurrentIndex(index)
self._ui.meshType_comboBox.blockSignals(False)
index = self._ui.fiducialMarker_comboBox.findText(self._fiducial_marker_model.getActiveMarker())
self._ui.fiducialMarker_comboBox.blockSignals(True)
self._ui.fiducialMarker_comboBox.setCurrentIndex(0 if index == -1 else index)
self._ui.fiducialMarker_comboBox.blockSignals(False)
self._refreshMeshTypeOptions()
def _deleteElementRangesLineEditChanged(self):
self._generator_model.setDeleteElementsRangesText(self._ui.deleteElementsRanges_lineEdit.text())
self._ui.deleteElementsRanges_lineEdit.setText(self._generator_model.getDeleteElementsRangesText())
def _scaleLineEditChanged(self):
self._generator_model.setScaleText(self._ui.scale_lineEdit.text())
self._ui.scale_lineEdit.setText(self._generator_model.getScaleText())
def _displayAxesClicked(self):
self._generator_model.setDisplayAxes(self._ui.displayAxes_checkBox.isChecked())
# for testing, we delete our ecg nodes and reload the entire mesh
self._ecg_graphics.deleteAll()
self._meshTypeChanged(9)
self._meshTypeChanged(10)
# prepare data
self.scaleCacheData()
self._ecg_graphics.initialiseSpectrum(self.data)
ECGmatrix = []
for key in self.data['cache']:
ECGmatrix.append(self.data['cache'][key][0::10])
for i in range(len(ECGmatrix)):
ECGmatrix[i].append(ECGmatrix[i][-1])
ECGtimes = np.linspace(0, 1, len(ECGmatrix[:][0]))
# clear all of the current mesh data by going to a mesh with nothing in it
self._generator_model.deleteAll()
# self._meshTypeChanged(3)
# create our new mesh with the Blackfynn_2d_plate class
pm = Blackfynn_2d_plate(self._generator_model._region, self._ecg_graphics.node_coordinate_list)
pm.ECGtimes = ECGtimes.tolist()
pm.ECGcoloursMatrix = ECGmatrix
pm.generateMesh()
pm.drawMesh()
def _exportWebGLJson(self):
'''
| |
method='sidak', alpha=DEFAULT_ALPHA):
if method not in set(list(CORRECTIONS.keys()) + list(CORRECTIONS.values())):
raise ValueError('Correction method {!r} not supported'.format(method))
self.method = CORRECTIONS[method] if method in CORRECTIONS else method
self.alpha_orig = alpha
self.alpha_corrected = eval(self.method)(alpha, p_values)
self.ntests = len(p_values)
self.accept_hypothesis = [p < self.alpha_corrected for p in p_values]
class EmpiricalCdf(object):
"""
Class that calculates the empirical cumulative distribution function for a
set of samples. Performs some additional cacheing for performance.
"""
def __init__(self, samples):
self.samples = samples
self._cdf = ECDF(samples)
@property
def samples_cdf(self):
"""
Return the cdf evaluated at those samples used to calculate the cdf
parameters.
"""
if not hasattr(self, '_samples_cdf'):
self._samples_cdf = self.evaluate(sorted(self.samples))
return self._samples_cdf
def __call__(self, values):
return self.evaluate(values)
def evaluate(self, values=None):
"""
Evaluate the cdf for a sequence of values
"""
if values is None:
values = self.samples
return self._cdf(values)
class Samples(DescrStatsW):
"""
Class for holding samples and calculating various statistics on those
samples.
Parameters
----------
samples: array-like
the data set of sample values
"""
def __init__(self, observations, name=None):
self.name = name
observations = self._valid_observations(observations)
super(Samples, self).__init__(np.array(observations))
def _valid_observations(self, observations):
def valid(o):
if o is None:
return False
if np.isnan(o):
return False
return True
observations = list(filter(valid, observations))
if self.name:
name_string = "{!r}".format(self.name)
else:
name_string = ''
if not observations:
raise ValueError('All {} observations are nan or None'.format(name_string))
else:
return observations
def __repr__(self):
header = "Samples(name={!r})".format(self.name if self.name else None)
return """{}
Summary:
𝛮 : {}
𝝁 : {:1.4f}
𝝈² : {:1.4f}""".format(header, self.nobs, self.mean, self.var)
def permute(self):
return np.random.choice(self.data, int(self.nobs))
def sort(self):
if not hasattr(self, '_sorted'):
self._sorted = sorted(self.data)
return self._sorted
def percentiles(self, prct=[2.5, 25, 50, 75, 97.5]):
return np.percentile(self.data, prct)
@property
def cdf(self):
if not hasattr(self, '_cdf'):
self._cdf = EmpiricalCdf(self.data)
return self._cdf
def prob_greater_than(self, values):
"""
Return the probability of being larger than values under the emprical
CDF
"""
return 1.0 - self.cdf(np.asarray(values, dtype=float))
def ci(self, alpha=.05, alternative='two-sided'):
"""
Calculate the (1-alpha)-th confidence interval around the mean.
Assumes Gaussian approximation.
Returns
-------
ci : tuple (lo, hi)
the (1-alpha) % confidence interval around the mean estimate.
"""
return self.zconfint_mean(alpha, alternative)[:2]
def std_err(self, alpha=.05, alternative='two-sided'):
"""
Returns
-------
std_err : tuple (lo, hi)
the standard error interval around the mean estimate.
"""
_alpha = alpha / 2. if alternative == 'two-sided' else alpha
z = norm.ppf(1 - _alpha)
ci = z * (self.var / self.nobs) ** .5
return self.mean - ci, self.mean + ci
def hdi(self, alpha=.05):
"""
Calcualte the highest central density interval that leaves `alpha`
probability remaining.
Parameters
----------
alpha: float in (0, 1)
1 - critical mass
Returns
-------
hdi: tuple (boundary_lower, boundary_upper)
The boundary of the highest density interval for the sample distribution
"""
credible_mass = 1 - alpha
try:
_hdi = highest_density_interval(self.data, credible_mass)
return (round(_hdi[0], 4), round(_hdi[1], 4))
except Exception as e:
logger.warn(e)
return (None, None)
def hist(self, ref_val=None, *hist_args, **hist_kwargs):
"""
Render histogram of the samples. Plot a vertical reference line, if
requested.
"""
from matplotlib import pyplot as plt
pl = plt.hist(self.data.astype(float), *hist_args, **hist_kwargs)
if ref_val is not None:
plt.axvline(ref_val, c='gray', linestyle='--', linewidth=2)
return pl
def plot_probability(self, *args, **kwargs):
"""
Evaulate and display the sample probability function.
"""
self.prob.plot(*args, **kwargs)
class MeanComparison(CompareMeans):
"""
Class for comparing the means of two sample distributions, provides a number
of helpful summary statistics about the comparison.
Parameters
----------
samples_a : Samples instance
Group a samples
samples_b : Samples instance
Group b samples
alpha : float in (0, 1)
The assumed Type I error
test_statistic: str
The name of the test statistic used.
't': for t-statistic (small sample size, N <= 30)
'z': for z-statistic (large samples size, N > 30)
hypothesis : str
Defines the assumed alternative hypothesis. Can be :
'larger'
'smaller'
'unequal' (i.e. two-tailed test)
"""
def __init__(self, samples_a, samples_b,
alpha=DEFAULT_ALPHA,
test_statistic='t',
hypothesis='larger'):
super(MeanComparison, self).__init__(samples_a, samples_b)
self.alpha = alpha
self.test_statistic = test_statistic
self.hypothesis = hypothesis
self.warnings = []
@property
def pooled_variance(self):
return ((self.d2.nobs - 1) * self.d2.var + (self.d1.nobs - 1) * self.d1.var) / (self.d2.nobs + self.d1.nobs - 2)
@property
def delta(self):
return self.d1.mean - self.d2.mean
@property
def delta_relative(self):
return (self.d1.mean - self.d2.mean) / np.abs(self.d2.mean)
@property
def effect_size(self):
return self.delta / np.sqrt(self.pooled_variance)
@property
def test_direction(self):
return self.hypothesis if self.hypothesis != 'unequal' else 'two-sided'
@property
def power(self):
"""
Statistical power (i.e. 𝜷 of the comparison)
"""
ratio = self.d1.nobs / self.d2.nobs
f_stat = "{}t_ind_solve_power".format(self.test_statistic)
return eval(f_stat)(
effect_size=self.effect_size,
nobs1=self.d2.nobs,
alpha=self.alpha,
ratio=ratio,
alternative=self.test_direction
)
class ProportionComparison(MeanComparison):
"""
Class for comparing the proportions of two sample distributions, provides a number
of helpful summary statistics about the comparison. In order to use the
z-distribution, we assume normality or proportions and thus, by proxy, adequate
sample sizes (i.e. > 30).
Parameters
----------
samples_a : Samples instance
Group a samples
samples_b : Samples instance
Group b samples
alpha : float in (0, 1)
The assumed Type I error
hypothesis : str
Defines the assumed alternative hypothesis. Can be :
'larger'
'smaller'
'unequal' (i.e. two-tailed test)
"""
def __init__(self, variance_assumption='pooled', *args, **kwargs):
super(ProportionComparison, self).__init__(test_statistic='z', *args, **kwargs)
nobs = min(self.d1.nobs, self.d2.nobs)
# to use Normal approx, must have large N
if nobs < 30:
warning = 'Normality assumption violated, at least 30 observations required. Smallest sample size is {}'.format(nobs)
logger.warn(warning)
self.warnings.append(warning)
self.variance_assumption = variance_assumption
@property
def pooled_variance(self):
if self.variance_assumption == 'pooled':
p1 = self.d1.mean
p2 = self.d2.mean
var1 = p1 * (1 - p1)
var2 = p2 * (1 - p2)
return ((self.d1.nobs - 1) * var1 + (self.d2.nobs - 1) * var2) / (self.d1.nobs + self.d2.nobs - 2)
else: # global variance
p = np.mean(np.r_[self.d1.data, self.d2.data])
return p * (1 - p)
def ztest(self):
prop_var = self.pooled_variance
n_1 = self.d1.nobs
s_1 = sum(self.d1.data)
n_2 = self.d2.nobs
s_2 = sum(self.d2.data)
return proportions_ztest(
[s_1, s_2], [n_1, n_2],
alternative=self.test_direction,
prop_var=prop_var
)
class RateComparison(MeanComparison):
"""
Class for comparing the rates of two sample distributions, provides a number
of helpful summary statistics about the comparison. Uses the exact conditional
test based on binomial distribution, as described in Gu et al (2008)
Parameters
----------
samples_a : Samples instance
Group a samples
samples_b : Samples instance
Group b samples
alpha : float in (0, 1)
The assumed Type I error
hypothesis : str
Defines the assumed alternative hypothesis. Can be :
'larger'
'smaller'
'unequal' (i.e. two-tailed test)
References
----------
Gu, Ng, <NAME> 2008: Testing the Ratio of Two Poisson Rates,
Biometrical Journal 50 (2008) 2, 2008
"""
def __init__(self, null_ratio=1., *args, **kwargs):
super(RateComparison, self).__init__(test_statistic='W', *args, **kwargs)
self.null_ratio = null_ratio
@property
def rates_ratio(self):
"""
Return the comparison ratio of the null rates ratio and the observed
rates ratio.
"""
actual_ratio = float(self.d1.sum * self.d1.nobs) / float(self.d2.sum * self.d2.nobs)
return self.null_ratio / actual_ratio
@property
def delta(self):
"""
Delta is the ratio of the variation to the control rates
"""
return self.d1.mean / self.d2.mean
@property
def delta_relative(self):
return self.delta
def rates_test(self):
"""
Run the rates comparison hyptothesis test. Uses the W5 statistic defined
in Gu et al., 2008
Returns
-------
W : float
The W5 statistic from Gu et al., 2008
p_value : float
The p-value associated with W
"""
X1, X2 = self.d2.sum, self.d1.sum
t1, t2 = self.d2.nobs, self.d1.nobs
d = float(t1) / t2
W = 2 * (np.sqrt(X2 + (3. / 8)) - np.sqrt((self.null_ratio / d) * (X1 + (3. / 8)))) / np.sqrt(1 + (self.null_ratio / d))
if self.hypothesis == 'larger':
p_val = 1 - norm.cdf(W)
elif self.hypothesis == 'smaller':
p_val = norm.cdf(W)
elif self.hypothesis == 'unequal':
p_val = 1 - norm.cdf(abs(W))
return W, p_val
@property
def effect_size(self):
"""
Effect size ranges from 0-1
"""
return 1 - self.rates_ratio
@property
def power(self):
"""
Return the statistical power of the current test. Follows the calculation
from W statistic 5 in Gu et al., 2008
"""
N2, t2 = self.d1.sum, self.d1.nobs
N1, t1 = self.d2.sum, self.d2.nobs
lambda_2, lambda_1 = np.abs(self.d1.mean), np.abs(self.d2.mean)
alternative_ratio = np.abs(lambda_2 / lambda_1)
z = norm.ppf(1 - self.alpha)
d = float(t1 * N1) / (t2 * N2)
A = np.abs(2. * (1. - np.sqrt(self.null_ratio / alternative_ratio)))
B = np.sqrt(lambda_1 | |
of the IoTHub.
:param iothub_transport_provider: Transport protocol used to connect to IoTHub
:type iothub_transport_provider: IoTHubTransportProvider(Enum)
:param iothub_name: The IoT Hub name to which the device is connecting
:type iothub_name: str
:param iothub_suffix: The suffix part of the IoTHub uri (e.g., private.azure-devices-int.net).
:type iothub_suffix: str
:raises: IoTHubClientError if failed to create the transport
"""
pass
class IoTHubClient:
"""IoTHubClient instance is used to connect a device with an Azure IoTHub.
Users of the SDK should create an instance of this class using one of the
constructors provided and call member functions to communicate with IoTHub.
Note that all parameters used to create this instance
are saved as instance attributes.
"""
def __init__(self, connection_string, protocol):
"""Creates an IoTHubClient for communication with an existing
IoTHub using the specified connection string and protocol parameter.
:param connection_string: A connection string which encapsulates "device connect" permissions on an IoTHub
:type connection_string: str
:param protocol: Transport protocol used to connect to IoTHub
:type protocol: IoTHubTransportProvider(Enum)
:return IoTHubClient instance
:rtype: IoTHubClient class
:raises: IoTHubClientError if failed to create the client
"""
pass
def __init__(self, iothub_transport, iothub_config):
"""Creates an IoTHubClient for communication with an existing
IoTHub using the specified transport and configuration parameter.
This constructor used for shared transport scenario.
:param iothub_transport: Transport instance to share.
:type iothub_transport: IoTHubTransport class
:param iothub_config: Configuration containing connection parameters
:type iothub_config: IoTHubConfig class
:return IoTHubClient instance
:rtype: IoTHubClient class
:raises: IoTHubClientError if failed to create the client
"""
pass
def __init__(self, iothub_uri, device_id, security_type, protocol):
"""Creates an IoTHubClient for communication with an existing
IoTHub using the specified iothub_uri, device_id, security_type
and protocol parameter.
This constructor used in device provisioning scenario.
:param iothub_uri: IoTHub hostname uri (received in the registration process)
:type iothub_uri: str
:param device_id: Device ID (aka device name)
:type device_id: str
:param security_type: Authentication type used in provisioning scenario
:type security_type: IoTHubSecurityType(Enum)
:param protocol: Transport protocol used to connect to IoTHub
:type protocol: IoTHubTransportProvider(Enum)
:return IoTHubClient instance
:rtype: IoTHubClient class
:raises: IoTHubClientError if failed to create the client
"""
pass
@property
def protocol(self):
"""Getter for protocol attribute
:return: Transport protocol used by this class
:rtype: IoTHubTransportProvider(Enum)
"""
pass
def send_event_async(self, message, message_callback, user_context):
"""Asynchronous call to send the message to IoTHub.
:param message: IoTHubMessage
:type message: IoTHubMessage class
:param message_callback: Callable Python function
:type message_callback: f(IoTHubMessage, IoTHubMessageResult, any)
:param user_context: User specified context that will be provided to the callback
:type user_context: any
:raises: IoTHubClientError if the operation failed
"""
pass
def set_message_callback(self, message_callback, user_context):
"""Sets up a callback function to be invoked when the device client received a message from IoTHub.
:param message_callback: Callable Python function
:type message_callback: f(IoTHubMessage, any)
:param user_context: User specified context that will be provided to the callback
:type user_context: any
:raises: IoTHubClientError if the operation failed
"""
pass
def set_connection_status_callback(self, connection_status_callback, user_context):
"""Sets up a callback function to be invoked representing the status of the connection to IOTHub.
:param connection_status_callback: Callable Python function
:type connection_status_callback: f(IoTHubConnectionStatus, IoTHubClientConnectionStatusReason, any)
:param user_context: User specified context that will be provided to the callback
:type user_context: any
:raises: IoTHubClientError if the operation failed
"""
pass
def set_retry_policy(self, retry_policy, retry_timeout_limit_in_seconds):
"""Sets the retry policy to use to reconnect to IoT Hub when a connection drops.
:param retry_policy: The policy to use to reconnect to IoT Hub when a connection drops
:type retry_policy: IoTHubClientRetryPolicy(Enum)
:param retry_timeout_limit_in_seconds: Maximum amount of time(seconds) to attempt reconnection
:type retry_timeout_limit_in_seconds: int
:raises: IoTHubClientError if the operation failed
"""
pass
def get_retry_policy(self):
"""Gets the retry policy has been used to reconnect to IoT Hub when a connection drops.
:return: The policy and timout limit to use to reconnect to IoT Hub when a connection drops
:rtype: GetRetryPolicyReturnValue class
:raises: IoTHubClientError if the operation failed
"""
pass
def set_device_twin_callback(self, device_twin_callback, user_context):
"""Sets up a callback function to be invoked when the device client receives a twin state update.
:param device_twin_callback: Callable Python function
:type device_twin_callback: f(IoTHubTwinUpdateState, any, any)
:param user_context: User specified context that will be provided to the callback
:type user_context: any
:raises: IoTHubClientError if the operation failed
"""
pass
def send_reported_state(self, reported_state, size, reported_state_callback, user_context):
"""Sends a report of the device's properties and their current values to IoTHub.
:param reported_state: JSon string containing the device current state
:type reported_state: str
:param size: Length of the JSon string (len(str))
:type size: int
:param reported_state_callback: Callable Python function
:type reported_state_callback: f(int, any)
:param user_context: User specified context that will be provided to the callback
:type user_context: any
:raises: IoTHubClientError if the operation failed
"""
pass
def set_device_method_callback(self, device_method_callback, user_context):
"""Sets up a callback function for cloud to device method call.
:param device_method_callback: Callable Python function
:type device_method_callback: f(str, str, int, any, int, any)
:param user_context: User specified context that will be provided to the callback
:type user_context: any
:raises: IoTHubClientError if the operation failed
"""
pass
def set_device_method_callback_ex(self, inbound_device_method_callback):
"""Sets up a callback function for cloud to device async method call.
:param inbound_device_method_callback: Callable Python function
:type inbound_device_method_callback: f(str, str, int, any, any)
:raises: IoTHubClientError if the operation failed
"""
pass
def device_method_response(self, method_id, response, size, status_code):
"""Sends the response for cloud to device async method call.
:param method_id: Identification of the async method called by IoTHub
:type method_id: any
:param response: Payload of the response
:type response: str
:param size: Length of the response (len(str))
:type size: int
:param status_code: Status code reported to IoTHub
:type status_code: int
:raises: IoTHubClientError if the operation failed
"""
pass
def set_option(self, option_name, option):
"""Sets the given runtime configuration option.
The options that can be set via this API are:
- name: timeout
- value: long
The maximum time in milliseconds a communication is allowed to use.
This is only supported for the HTTP
protocol as of now. When the HTTP protocol uses CURL, the meaning of
the parameter is "total request time". When the HTTP protocol uses
winhttp, the meaning is the same as the dwSendTimeout and dwReceiveTimeout parameters of the
"https://msdn.microsoft.com/en-us/library/windows/desktop/aa384116(v=vs.85).aspx"
WinHttpSetTimeouts API.
- name: CURLOPT_LOW_SPEED_LIMIT
- value: long
Only available for HTTP protocol and only when CURL is used.
It has the same meaning as CURL's option with the same name.
- name: CURLOPT_LOW_SPEED_TIME
- value: long
Only available for HTTP protocol and only.
when CURL is used. It has the same meaning as CURL's option with the same name.
- name: CURLOPT_FORBID_REUSE
- value: long
Only available for HTTP protocol and only when CURL is used.
It has the same meaning as CURL's option with the same name.
- name: CURLOPT_FRESH_CONNECT
- value: long
Only available for HTTP protocol and only when CURL is used.
It has the same meaning as CURL's option with the same name.
- name: CURLOPT_VERBOSE
- value: long
Only available for HTTP protocol and only when CURL is used.
It has the same meaning as CURL's option with the same name.
- name: messageTimeout
- value: long
The maximum time in milliseconds until a message is timeouted.
The time starts at IoTHubClient_SendEventAsync. By default, messages do not expire.
- name: c2d_keep_alive_freq_secs
- value: long
The AMQP C2D keep alive interval in seconds.
After the connection established the client requests the server to set the
keep alive interval for given time.
If it is not set then the default 240 sec applies.
If it is set to zero the server will not send keep alive messages to the client.
:param option_name: Name of the option to set
:type option_name: str
:param option: Value of the option to set
:type option: any
"""
pass
def get_send_status(self):
"""Returns the current sending status of the IoTHub device client.
:return: IoTHubClientStatus instance
:rtype: IoTHubClientStatus(Enum)
:raises: IoTHubClientError if the operation failed
"""
pass
def get_last_message_receive_time(self):
"""Returns the timestamp of the last message was received at the client.
:return: Timestamp of the last message received
:rtype: long
| |
= os.path.join(folder, 'SocialCareByClassStackedBarChart.pdf')
pp = PdfPages(path)
pp.savefig(fig)
pp.close()
# Chart 26: informal and formal social care received and unmet social care needs per recipient by social class (mean of last 20 years)
fig, ax = plt.subplots()
p1, = ax.plot(output['year'], output['informalSocialCarePerRecipient'], linewidth = 3, label = 'Population')
p2, = ax.plot(output['year'], output['informalSocialCarePerRecipient_1'], label = 'Class I')
p3, = ax.plot(output['year'], output['informalSocialCarePerRecipient_2'], label = 'Class II')
p4, = ax.plot(output['year'], output['informalSocialCarePerRecipient_3'], label = 'Class III')
p5, = ax.plot(output['year'], output['informalSocialCarePerRecipient_4'], label = 'Class IV')
p6, = ax.plot(output['year'], output['informalSocialCarePerRecipient_5'], label = 'Class V')
ax.set_xlim(left = self.p['statsCollectFrom'])
ax.set_ylabel('Hours per week')
# ax.set_xlabel('Year')
handles, labels = ax.get_legend_handles_labels()
ax.legend(loc = 'upper left')
ax.set_title('Informal Social Care Per Recipient')
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.xlim(self.p['statsCollectFrom'], self.p['endYear'])
plt.xticks(range(self.p['statsCollectFrom'], self.p['endYear']+1, 10))
fig.tight_layout()
path = os.path.join(folder, 'informalSocialCarePerRecipientChart.pdf')
pp = PdfPages(path)
pp.savefig(fig)
pp.close()
# Chart 27: formal care per recipient: population and by class
fig, ax = plt.subplots()
p1, = ax.plot(output['year'], output['formalSocialCarePerRecipient'], linewidth = 3, label = 'Population')
p2, = ax.plot(output['year'], output['formalSocialCarePerRecipient_1'], label = 'Class I')
p3, = ax.plot(output['year'], output['formalSocialCarePerRecipient_2'], label = 'Class II')
p4, = ax.plot(output['year'], output['formalSocialCarePerRecipient_3'], label = 'Class III')
p5, = ax.plot(output['year'], output['formalSocialCarePerRecipient_4'], label = 'Class IV')
p6, = ax.plot(output['year'], output['formalSocialCarePerRecipient_5'], label = 'Class V')
ax.set_xlim(left = self.p['statsCollectFrom'])
ax.set_ylabel('Hours per week')
# ax.set_xlabel('Year')
handles, labels = ax.get_legend_handles_labels()
ax.legend(loc = 'upper left')
ax.set_title('Formal Social Care Per Recipient')
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.xlim(self.p['statsCollectFrom'], self.p['endYear'])
plt.xticks(range(self.p['statsCollectFrom'], self.p['endYear']+1, 10))
fig.tight_layout()
path = os.path.join(folder, 'formalSocialCarePerRecipientChart.pdf')
pp = PdfPages(path)
pp.savefig(fig)
pp.close()
# Chart 28: unmet care need per recipient: population and by class
fig, ax = plt.subplots()
p1, = ax.plot(output['year'], output['unmetSocialCarePerRecipient'], linewidth = 3, label = 'Population')
p2, = ax.plot(output['year'], output['unmetSocialCarePerRecipient_1'], label = 'Class I')
p3, = ax.plot(output['year'], output['unmetSocialCarePerRecipient_2'], label = 'Class II')
p4, = ax.plot(output['year'], output['unmetSocialCarePerRecipient_3'], label = 'Class III')
p5, = ax.plot(output['year'], output['unmetSocialCarePerRecipient_4'], label = 'Class IV')
p6, = ax.plot(output['year'], output['unmetSocialCarePerRecipient_5'], label = 'Class V')
ax.set_xlim(left = self.p['statsCollectFrom'])
ax.set_ylabel('Hours per week')
# ax.set_xlabel('Year')
handles, labels = ax.get_legend_handles_labels()
ax.legend(loc = 'upper left')
ax.set_title('Unmet Social Care Need Per Recipient')
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.xlim(self.p['statsCollectFrom'], self.p['endYear'])
plt.xticks(range(self.p['statsCollectFrom'], self.p['endYear']+1, 10))
fig.tight_layout()
path = os.path.join(folder, 'UnmetSocialCarePerRecipientChart.pdf')
pp = PdfPages(path)
pp.savefig(fig)
pp.close()
# Chart 29: informal and formal care and unmet care need
fig, ax = plt.subplots()
p1, = ax.plot(output['year'], output['informalSocialCarePerRecipient'], linewidth = 3, label = 'Informal Care')
p2, = ax.plot(output['year'], output['formalSocialCarePerRecipient'], linewidth = 3, label = 'Formal Care')
p3, = ax.plot(output['year'], output['unmetSocialCarePerRecipient'], linewidth = 3, label = 'Unmet Care')
ax.set_xlim(left = self.p['statsCollectFrom'])
ax.set_ylabel('Hours per week')
# ax.set_xlabel('Year')
handles, labels = ax.get_legend_handles_labels()
ax.legend(loc = 'lower left')
ax.set_title('Delivered and Unmet Care Per Recipient')
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.xlim(self.p['statsCollectFrom'], self.p['endYear'])
plt.xticks(range(self.p['statsCollectFrom'], self.p['endYear']+1, 10))
fig.tight_layout()
path = os.path.join(folder, 'Delivered_UnmetSocialCarePerRecipientChart.pdf')
pp = PdfPages(path)
pp.savefig(fig)
pp.close()
# Chart 30
n_groups = int(self.p['numberClasses'])
meanInformalCareReceived_1 = np.mean(output['informalSocialCarePerRecipient_1'][-20:])
meanFormalCareReceived_1 = np.mean(output['formalSocialCarePerRecipient_1'][-20:])
meanUnmetNeed_1 = np.mean(output['unmetSocialCarePerRecipient_1'][-20:])
meanInformalCareReceived_2 = np.mean(output['informalSocialCarePerRecipient_2'][-20:])
meanFormalCareReceived_2 = np.mean(output['formalSocialCarePerRecipient_2'][-20:])
meanUnmetNeed_2 = np.mean(output['unmetSocialCarePerRecipient_2'][-20:])
meanInformalCareReceived_3 = np.mean(output['informalSocialCarePerRecipient_3'][-20:])
meanFormalCareReceived_3 = np.mean(output['formalSocialCarePerRecipient_3'][-20:])
meanUnmetNeed_3 = np.mean(output['unmetSocialCarePerRecipient_3'][-20:])
meanInformalCareReceived_4 = np.mean(output['informalSocialCarePerRecipient_4'][-20:])
meanFormalCareReceived_4 = np.mean(output['formalSocialCarePerRecipient_4'][-20:])
meanUnmetNeed_4 = np.mean(output['unmetSocialCarePerRecipient_4'][-20:])
meanInformalCareReceived_5 = np.mean(output['informalSocialCarePerRecipient_5'][-20:])
meanFormalCareReceived_5 = np.mean(output['formalSocialCarePerRecipient_5'][-20:])
meanUnmetNeed_5 = np.mean(output['unmetSocialCarePerRecipient_5'][-20:])
informalCare = (meanInformalCareReceived_1, meanInformalCareReceived_2, meanInformalCareReceived_3,
meanInformalCareReceived_4, meanInformalCareReceived_5)
formalCare = (meanFormalCareReceived_1, meanFormalCareReceived_2, meanFormalCareReceived_3,
meanFormalCareReceived_4, meanFormalCareReceived_5)
sumInformalFormalCare = [x + y for x, y in zip(informalCare, formalCare)]
unmetNeeds = (meanUnmetNeed_1, meanUnmetNeed_2, meanUnmetNeed_3, meanUnmetNeed_4, meanUnmetNeed_5)
ind = np.arange(n_groups) # the x locations for the groups
width = 0.4 # the width of the bars: can also be len(x) sequence
fig, ax = plt.subplots()
p1 = ax.bar(ind, informalCare, width, label = 'Informal Care')
p2 = ax.bar(ind, formalCare, width, bottom = informalCare, label = 'Formal Care')
p3 = ax.bar(ind, unmetNeeds, width, bottom = sumInformalFormalCare, label = 'Unmet Care Needs')
ax.set_ylabel('Hours per week')
ax.set_xticks(ind)
plt.xticks(ind, ('I', 'II', 'III', 'IV', 'V'))
handles, labels = ax.get_legend_handles_labels()
ax.legend(loc = 'lower left')
ax.set_title('Informal, Formal and Unmet Social Care Need per Recipient')
fig.tight_layout()
path = os.path.join(folder, 'SocialCarePerRecipientByClassStackedBarChart.pdf')
pp = PdfPages(path)
pp.savefig(fig)
pp.close()
# Chart 31: informal and formal child care received and unmet child care needs by social class (mean of last 20 years)
n_groups = int(self.p['numberClasses'])
meanInformalCareReceived_1 = np.mean(output['informalChildCareReceived_1'][-20:])
meanFormalCareReceived_1 = np.mean(output['formalChildCareReceived_1'][-20:])
meanUnmetNeed_1 = np.mean(output['unmetChildCareNeed_1'][-20:])
meanInformalCareReceived_2 = np.mean(output['informalChildCareReceived_2'][-20:])
meanFormalCareReceived_2 = np.mean(output['formalChildCareReceived_2'][-20:])
meanUnmetNeed_2 = np.mean(output['unmetChildCareNeed_2'][-20:])
meanInformalCareReceived_3 = np.mean(output['informalChildCareReceived_3'][-20:])
meanFormalCareReceived_3 = np.mean(output['formalChildCareReceived_3'][-20:])
meanUnmetNeed_3 = np.mean(output['unmetChildCareNeed_3'][-20:])
meanInformalCareReceived_4 = np.mean(output['informalChildCareReceived_4'][-20:])
meanFormalCareReceived_4 = np.mean(output['formalChildCareReceived_4'][-20:])
meanUnmetNeed_4 = np.mean(output['unmetChildCareNeed_4'][-20:])
meanInformalCareReceived_5 = np.mean(output['informalChildCareReceived_5'][-20:])
meanFormalCareReceived_5 = np.mean(output['formalChildCareReceived_5'][-20:])
meanUnmetNeed_5 = np.mean(output['unmetChildCareNeed_5'][-20:])
informalCare = (meanInformalCareReceived_1, meanInformalCareReceived_2, meanInformalCareReceived_3,
meanInformalCareReceived_4, meanInformalCareReceived_5)
formalCare = (meanFormalCareReceived_1, meanFormalCareReceived_2, meanFormalCareReceived_3,
meanFormalCareReceived_4, meanFormalCareReceived_5)
sumInformalFormalCare = [x + y for x, y in zip(informalCare, formalCare)]
totCare = [sum(x) for x in zip(informalCare, formalCare)]
unmetNeeds = (meanUnmetNeed_1, meanUnmetNeed_2, meanUnmetNeed_3, meanUnmetNeed_4, meanUnmetNeed_5)
ind = np.arange(n_groups) # the x locations for the groups
width = 0.4 # the width of the bars: can also be len(x) sequence
fig, ax = plt.subplots()
p1 = ax.bar(ind, informalCare, width, label = 'Informal Care')
p2 = ax.bar(ind, formalCare, width, bottom = informalCare, label = 'Formal Care')
p3 = ax.bar(ind, unmetNeeds, width, bottom = sumInformalFormalCare, label = 'Unmet Care Needs')
ax.set_ylabel('Hours per week')
ax.set_ylim([0, max(totCare)*1.1])
ax.set_xticks(ind)
plt.xticks(ind, ('I', 'II', 'III', 'IV', 'V'))
handles, labels = ax.get_legend_handles_labels()
ax.legend(loc = 'lower left')
ax.set_title('Informal, Formal and Unmet Child Care Need by Class')
fig.tight_layout()
path = os.path.join(folder, 'ChildCareByClassStackedBarChart.pdf')
pp = PdfPages(path)
pp.savefig(fig)
pp.close()
# Chart 32: informal and formal child care received and unmet child care needs per recipient by Child class (mean of last 20 years)
### Add the three charts for the child care
fig, ax = plt.subplots()
p1, = ax.plot(output['year'], output['informalChildCarePerRecipient'], linewidth = 3, label = 'Population')
p2, = ax.plot(output['year'], output['informalChildCarePerRecipient_1'], label = 'Class I')
p3, = ax.plot(output['year'], output['informalChildCarePerRecipient_2'], label = 'Class II')
p4, = ax.plot(output['year'], output['informalChildCarePerRecipient_3'], label = 'Class III')
p5, = ax.plot(output['year'], output['informalChildCarePerRecipient_4'], label = 'Class IV')
p6, = ax.plot(output['year'], output['informalChildCarePerRecipient_5'], label = 'Class V')
ax.set_xlim(left = self.p['statsCollectFrom'])
ax.set_ylabel('Hours per week')
# ax.set_xlabel('Year')
handles, labels = ax.get_legend_handles_labels()
ax.legend(loc = 'upper left')
ax.set_title('Informal Child Care Per Recipient')
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.xlim(self.p['statsCollectFrom'], self.p['endYear'])
plt.xticks(range(self.p['statsCollectFrom'], self.p['endYear']+1, 10))
fig.tight_layout()
path = os.path.join(folder, 'informalChildCarePerRecipientChart.pdf')
pp = PdfPages(path)
pp.savefig(fig)
pp.close()
# Chart 33: formal care per recipient: population and by class
fig, ax = plt.subplots()
p1, = ax.plot(output['year'], output['formalChildCarePerRecipient'], linewidth = 3, label = 'Population')
p2, = ax.plot(output['year'], output['formalChildCarePerRecipient_1'], label = 'Class I')
p3, = ax.plot(output['year'], output['formalChildCarePerRecipient_2'], label = 'Class II')
p4, = ax.plot(output['year'], output['formalChildCarePerRecipient_3'], label = 'Class III')
p5, = ax.plot(output['year'], output['formalChildCarePerRecipient_4'], label = 'Class IV')
p6, = ax.plot(output['year'], output['formalChildCarePerRecipient_5'], label = 'Class V')
ax.set_xlim(left = self.p['statsCollectFrom'])
ax.set_ylabel('Hours per week')
# ax.set_xlabel('Year')
handles, labels = ax.get_legend_handles_labels()
ax.legend(loc = 'lower left')
ax.set_title('Formal Child Care Per Recipient')
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.xlim(self.p['statsCollectFrom'], self.p['endYear'])
plt.xticks(range(self.p['statsCollectFrom'], self.p['endYear']+1, 10))
fig.tight_layout()
path = os.path.join(folder, 'formalChildCarePerRecipientChart.pdf')
pp = PdfPages(path)
pp.savefig(fig)
pp.close()
# Chart 34: Average Supply by Class (from 1960 to 2020)
fig, ax = plt.subplots()
p1, = ax.plot(output['year'], output['carePerRecipient'], linewidth = 3, label = 'Population')
p2, = ax.plot(output['year'], output['carePerRecipient_1'], label = 'Class I')
p3, = ax.plot(output['year'], output['carePerRecipient_2'], label = 'Class II')
p4, = ax.plot(output['year'], output['carePerRecipient_3'], label = 'Class III')
p5, = ax.plot(output['year'], output['carePerRecipient_4'], label = 'Class IV')
p6, = ax.plot(output['year'], output['carePerRecipient_5'], label = 'Class V')
maxValues = [max(output['carePerRecipient']), max(output['carePerRecipient_1']), max(output['carePerRecipient_2']), max(output['carePerRecipient_3']), max(output['carePerRecipient_4']), max(output['carePerRecipient_5'])]
maxValue = max(maxValues)
ax.set_xlim(left = self.p['statsCollectFrom'])
ax.set_ylim([0, maxValue*2.0])
ax.set_ylabel('Hours per week')
# ax.set_xlabel('Year')
handles, labels = ax.get_legend_handles_labels()
ax.legend(loc = 'upper left')
ax.set_title('Average Hours of Care By Class')
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_ylim([0, 60])
plt.xlim(self.p['statsCollectFrom'], self.p['endYear'])
plt.xticks(range(self.p['statsCollectFrom'], self.p['endYear']+1, 10))
fig.tight_layout()
path = os.path.join(folder, 'CarePerRecipientChart.pdf')
pp = PdfPages(path)
pp.savefig(fig)
pp.close()
# Chart 35: unmet care need per recipient: population and by class
fig, ax = plt.subplots()
p1, = ax.plot(output['year'], output['unmetChildCarePerRecipient'], linewidth = 3, label = 'Population')
p2, = ax.plot(output['year'], output['unmetChildCarePerRecipient_1'], label = 'Class I')
p3, = ax.plot(output['year'], output['unmetChildCarePerRecipient_2'], label = 'Class II')
p4, = ax.plot(output['year'], output['unmetChildCarePerRecipient_3'], label = 'Class III')
p5, = ax.plot(output['year'], output['unmetChildCarePerRecipient_4'], label = 'Class IV')
p6, = ax.plot(output['year'], output['unmetChildCarePerRecipient_5'], label = 'Class V')
ax.set_xlim(left = self.p['statsCollectFrom'])
ax.set_ylabel('Hours per week')
# ax.set_xlabel('Year')
handles, labels = ax.get_legend_handles_labels()
ax.legend(loc = 'lower left')
ax.set_title('Unmet Child Care Need Per Recipient')
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.xlim(self.p['statsCollectFrom'], self.p['endYear'])
plt.xticks(range(self.p['statsCollectFrom'], self.p['endYear']+1, 10))
fig.tight_layout()
path = os.path.join(folder, 'UnmetChildCarePerRecipientChart.pdf')
pp = PdfPages(path)
pp.savefig(fig)
pp.close()
# Chart 36: informal and formal care and unmet care | |
:rtype: :class:`bool`
"""
return (rack_position.row_index < (self.number_rows) and
rack_position.column_index < (self.number_columns))
def __len__(self):
"""
The number of positions for this rack shape.
"""
return self.number_rows * self.number_columns
def __eq__(self, other):
"""
Equality is based on the name attribute.
"""
return (isinstance(other, RackShape) and self.name == other.name)
def __str__(self):
return self.name
def __repr__(self):
str_format = '<%s name: %s, label: %s, number_rows: %s, ' \
'number_columns: %s>'
params = (self.__class__.__name__, self.name, self.label,
self.number_rows, self.number_columns)
return str_format % params
class RackShapeFactory(object):
@classmethod
def shape_from_rows_columns(cls, number_rows, number_columns):
"""
Return a rack shape (:class:`RackShape`) from
a row and column number. At this, it will first search its
internal cache for a shape matching these both criteria.
If there is no matching rack shape in the cache, the function
will create one.
There is an alias for this function called
:func:`rack_shape_from_rows_columns`
:param number_rows: The number of rows.
:type number_rows: :class:`int`
:param number_columns: THe number of columns.
:type number columns: :class:`int`
:return: the wanted rack shape
:rtype: :class:`RackShape`
"""
name = "%sx%s" % (number_rows, number_columns)
return RackShape(name, name, number_rows, number_columns)
# Define standard rack shape access function.
#: An alias for
#: :func:`RackShapeFactory.shape_from_rows_columns`
rack_shape_from_rows_columns = RackShapeFactory.shape_from_rows_columns
class RackSpecs(Entity):
"""
Abstract class for all rack specifications (rack types).
"""
#: The name of the rack specification, similar to the :attr:`label`.
name = None
#: A more human-readable label, similar to :attr:`name`.
label = None
#: The dimensions of this rack (:class:`RackShape`).
shape = None
#: Defines whether this rack type has movable subitems (*True* for
#: :class:`TubeRack` instances, *False* for
#: :class:`Plate` instances).
has_tubes = None
#: The manufacturer of this type of racks
#: (:class:`thelma.entities.organization.Organization`).
manufacturer = None
# FIXME: number_rows + number_columns are redundant # pylint:disable=W0511
#: The number of rows of these rack specs.
number_rows = None
#: The number of rows of these rack specs.
number_columns = None
def __init__(self, label, shape,
name=None, manufacturer=None, has_tubes=None, **kw):
if self.__class__ is RackSpecs:
raise NotImplementedError('Abstract class')
Entity.__init__(self, **kw)
self.label = label
self.shape = shape
if name is None:
# FIXME: ensure uniqueness ?! # pylint:disable=W0511
name = label.replace(' ', '').upper()[:32]
self.name = name
self.manufacturer = manufacturer
# FIXME: has_tubes should be readonly # pylint: disable=W0511
self.has_tubes = has_tubes
# FIXME: this is redundant - fix at DB level. # pylint: disable=W0511
self.number_rows = shape.number_rows
self.number_columns = shape.number_columns
@property
def slug(self):
#: For instances of this class, the slug is derived from the
#: :attr:`name`.
return slug_from_string(self.name)
def __str__(self):
return self.name
def __repr__(self):
str_format = '<%s id: %s, name: %s, label: %s, rack_shape: %s, ' \
'has_moveable_subitems: %s, manufacturer: %s>'
params = (self.__class__.__name__, self.id, self.name, self.label,
self.shape, self.has_tubes, self.manufacturer)
return str_format % params
def create_rack(self, label, status, comment=''):
raise NotImplementedError('abstract method')
class TubeRackSpecs(RackSpecs):
"""
This class defines tube rack specifications (tube rack types).
"""
#: List of compatible tube (container) specs for this tube rack specs.
tube_specs = None
def __init__(self, label, shape, tube_specs=None, **kw):
kw['has_tubes'] = True
RackSpecs.__init__(self, label, shape, **kw)
if tube_specs is None:
tube_specs = []
self.tube_specs = tube_specs
def create_rack(self, label, status, **kw):
return TubeRack(label, self, status, **kw)
class PlateSpecs(RackSpecs):
"""
This class defines plate specifications (plate types).
"""
#: The well (container) specs for this plate specs.
well_specs = None
def __init__(self, label, shape, well_specs, **kw):
kw['has_tubes'] = False
RackSpecs.__init__(self, label, shape, **kw)
self.well_specs = well_specs
def create_rack(self, label, status, **kw):
plate = Plate(label, self, status, **kw)
return plate
class RackPosition(Entity):
"""
This class defines position on a rack.
It is a **value object** and row and column indices **must** remain
immutable.
See http://devlicio.us/blogs/casey/archive/2009/02/13/ddd-entities-and-value-objects.aspx
RackPosition object can easily be obtained using the
:class:`RackPositionFactory`.
"""
#: The label of this rack position, i.e. a combination of letters
#: (row) and numbers (column).
_label = None
#: The index of the row.
_row_index = None
#: The index of the column.
_column_index = None
def __init__(self, row_index, column_index, label, **kw):
"""
This constructor should not be used. Load the rack positions from the
DB instead by means of one of fetcher methods (:func:`from_label`,
:func:`from_row_index_column_index` or :func:`from_row_column`).
"""
Entity.__init__(self, **kw)
self._label = label
self._row_index = row_index
self._column_index = column_index
@property
def slug(self):
#: The slug of a rack position is its label.
return self.label.lower()
@property
def label(self):
"""
The label of this rack position, i.e. a combination of letters (row)
and numbers (column).
"""
return str(self._label)
@property
def row_index(self):
"""
The index of the row.
"""
return self._row_index
@property
def column_index(self):
"""
The index of the column.
"""
return self._column_index
@classmethod
def from_label(cls, label):
"""
Returns a new RackPosition instance from the rack position label.
:Note: Try not to use this method if you can avoid it as it takes
as it takes a lot of time if you try to fetch a larger number
of rack positions one by one. Use the rack position cache in the
:mod:`semiconstants` module instead, if possible.
:param label: a set of characters from a-z (or A-Z) which
signifies a row followed by an number
signifying the column
:type label: :class:`string`
:return: The wanted rack position.
:rtype: :class:`RackPosition`
"""
agg = get_root_aggregate(IRackPosition)
return agg.get_by_slug(label.lower())
@classmethod
def from_indices(cls, row_index, column_index):
"""
Returns a RackPosition from the row index and column index.
:Note: Try not to use this method if you can avoid it as it takes
as it takes a lot of time if you try to fetch a larger number
of rack positions one by one. Use the rack position cache in the
:mod:`semiconstants` module instead, if possible.
:param row_index: the row of the container (this is 0 based).
:type row_index: :class:`int`
:param column_index: the column of the container (this 0 based).
:type column_index: :class:`int`
:return: The wanted rack position.
:rtype: :class:`RackPosition`
"""
agg = get_root_aggregate(IRackPosition)
agg.filter = eq(_row_index=row_index) & eq(_column_index=column_index)
return list(agg.iterator())[0]
@classmethod
def from_row_column(cls, row, column):
"""
Returns a new RackPosition instance from the row name
and column number.
Invokes :func:`from_indices`.
:Note: Try not to use this method if you can avoid it as it takes
as it takes a lot of time if you try to fetch a larger number
of rack positions one by one. Use the rack position cache in the
:mod:`semiconstants` module instead, if possible.
:param row: a set of characters from a-z (or A-Z) which signifies a row
:type row: :class:`string`
:param column: a number signifying the row
:type column: :class:`int`
:return: The wanted rack position.
:rtype: :class:`RackPosition`
"""
row_index = number_from_label(row) - 1
column_index = column - 1
return cls.from_indices(row_index, column_index)
def __composite_values__(self):
return (self._row_index, self._column_index)
def __eq__(self, other):
"""
Equality is based on the row_index and column_index attributes.
"""
return isinstance(other, RackPosition) \
and self._row_index == other.row_index \
and self._column_index == other.column_index
def __cmp__(self, other):
if self._row_index < other.row_index:
return -1
elif self._row_index > other.row_index:
return 1
else:
return cmp(self._column_index, other.column_index)
def __hash__(self):
return hash((self._row_index, self._column_index))
def __str__(self):
return self._label
def __repr__(self):
return '<RackPosition %s>' % self._label
class RackPositionSet(Entity):
"""
Set of :class:`RackPosition` objects.
A rack position set is uniquely identified by a hash value that is
derived from the underlying (immutable) set of rack positions.
Rack position sets are used by, for instance,
:class:`thelma.entities.tagging.TaggedRackPositionSet`.
"""
#: The rack positions (:class:`RackPosition`) as set - immutable.
_positions = None
#: The hash value is run length decoded string of the rack position
#: pattern generated by the :func:`_encode_rack_position_set` function
#: - immutable.
_hash_value = None
def __init__(self, positions, hash_value, **kw):
"""
This construction should not be used. Use the factory method
:func:`from_positions` to load a potential existing rack position
set from DB instead of creating a new one.
"""
Entity.__init__(self, **kw)
self._positions = positions
self._hash_value = hash_value
@classmethod
def from_positions(cls, positions):
"""
Returns a RackPositionSet for the given positions. If there is
already a set with the same hash value in the root aggregate, this
set will be loaded and returned instead.
"""
if not isinstance(positions, set):
positions = set(positions)
hash_value = | |
ns = GetWsdlNamespace(info.version)
method = _SetWsdlMethod(ns, info.wsdlName, mm)
if method != mm:
raise RuntimeError(
"Duplicate wsdl method %s %s (new class %s vs existing %s)" % \
(ns, info.wsdlName, mm.info.type, method.info.type))
dic[mWsdl] = mm
dic[mName] = mm
name = vmodlName
result = _AddType(LazyType(name, (parent,) , dic))
return _CheckNestedClasses(result, parent)
## Create an enum type
#
# @param vmodlName the VMODL name of the type
# @param wsdlName the WSDL name of the type
# @param version the version of the type
# @param values enum values
# @return vmodl type
def CreateAndLoadEnumType(vmodlName, wsdlName, version, values):
CreateEnumType(vmodlName, wsdlName, version, values)
return LoadEnumType(vmodlName, wsdlName, version, values)
## Create an enum type
#
# @param vmodlName the VMODL name of the type
# @param wsdlName the WSDL name of the type
# @param version the version of the type
# @param values enum values
def CreateEnumType(vmodlName, wsdlName, version, values):
with _lazyLock:
dic = [vmodlName, wsdlName, version, values]
names = vmodlName.split(".")
if _allowCapitalizedNames:
vmodlName = ".".join(name[0].lower() + name[1:] for name in names)
_AddToDependencyMap(names)
typeNs = GetWsdlNamespace(version)
_enumDefMap[vmodlName] = dic
_wsdlDefMap[(typeNs, wsdlName)] = dic
_wsdlTypeMapNSs.add(typeNs)
## Load an enum type
#
# @param vmodlName the VMODL name of the type
# @param wsdlName the WSDL name of the type
# @param version the version of the type
# @param values enum values
# @return the new enum type
def LoadEnumType(vmodlName, wsdlName, version, values):
with _lazyLock:
name = vmodlName
# Enum type cannot have nested classes. So, creating normal type
# instead of LazyType
result = type(name, (Enum,),
{"_wsdlName" : wsdlName, "_version" : version})
result.values = map(result, values)
for value in result.values:
setattr(result, value, value)
return _AddType(result)
## Create an array type
#
# @param itemType the item type
# @return the new array type
def CreateArrayType(itemType):
return type("%s[]" % itemType.__name__, (Array,), {'Item' : itemType})
## Add a new type to the type maps, create array constructors
# Note: Must be holding the _lazyLock, or in main init path
#
# @param type the type object
# @return type
def _AddType(type):
""" Note: Must be holding the _lazyLock, or in main init path """
type.Array = CreateArrayType(type)
typeNS = GetWsdlNamespace(type._version)
newType = _SetWsdlType(typeNS, type._wsdlName, type)
if newType != type:
raise RuntimeError("Duplicate wsdl type %s (already in typemap)" % (type._wsdlName))
return type
## Check that a value matches a given type, and annotate if neccesary
#
# @param info object containing of expected type
# @param val object to check
# @throw TypeError if the value does not match the type
def CheckField(info, val):
with _lazyLock:
valType = Type(val)
if val is None or (isinstance(val, list) and len(val) == 0):
# If type of the property is an Any. We should allow this to have
# unset items
if not (info.flags & F_OPTIONAL) and info.type is not object:
raise TypeError('Required field "%s" not provided (not @optional)' % info.name)
return
elif info.type is object:
try:
GetQualifiedWsdlName(valType)
return
except KeyError:
raise TypeError('Unknown type for %s' % info.type.__name__)
elif isinstance(val, info.type):
return
elif issubclass(info.type, list):
# Checking the values of VMOMI array types is surprisingly complicated....
if isinstance(val, Array):
# 1. We've got a PyVmomi Array object, which is effectively a typed list;
# verify that the type of the Array is a subclass of the expected type.
if issubclass(valType.Item, info.type.Item):
return
elif info.flags & F_LINK:
# Allow objects of expected type to be assigned to links
if issubclass(valType, GetVmodlType(info.expectedType)):
return
elif val:
# 2. We've got a non-empty Python list object, which is untyped;
# walk the list and make sure that each element is a subclass
# of the expected type.
# Masking out F_OPTIONAL part of flags since we are checking for
# each element of the list
flags = info.flags & (F_LINKABLE | F_LINK)
if flags & F_LINK:
if info.expectedType.endswith('[]'):
expectedType = info.expectedType[:-2]
else:
expectedType = info.expectedType
itemInfo = Object(type=info.type.Item, name=info.name, flags=flags,
expectedType=expectedType)
else:
itemInfo = Object(type=info.type.Item, name=info.name, flags=flags)
for it in val:
CheckField(itemInfo, it)
return
else:
# 3. We've got None or an empty Python list object;
# no checking required, since the result will be an empty array.
return
elif info.type is type and valType is type(Exception) \
or issubclass(info.type, int) and issubclass(valType, int) \
or issubclass(info.type, long) and (issubclass(valType, int) or \
issubclass(valType, long)) \
or issubclass(info.type, float) and issubclass(valType, float) \
or issubclass(info.type, string_types) and issubclass(valType, string_types):
return
elif issubclass(info.type, Link):
# Allow object of expected type to be assigned to link
if issubclass(valType, GetVmodlType(info.expectedType)):
return
raise TypeError('For "%s" expected type %s, but got %s'
% (info.name, info.type.__name__, valType.__name__))
## Finalize a created type
#
# @param type a created type
def FinalizeType(type):
if issubclass(type, DataObject):
for info in type._propList:
info.type = GetVmodlType(info.type)
elif issubclass(type, ManagedObject):
for info in list(type._propInfo.values()):
info.type = GetVmodlType(info.type)
for info in list(type._methodInfo.values()):
info.result = GetVmodlType(info.result)
info.methodResult = GetVmodlType(info.methodResult)
info.type = GetVmodlType(info.type)
for param in info.params:
param.type = GetVmodlType(param.type)
## Get the type of an object, for both new and old-style classes
def Type(obj):
try:
return obj.__class__
except AttributeError:
return type(obj)
## Set a WSDL type with wsdl namespace and wsdl name
# Internal to VmomiSupport
#
# Note: Must be holding the _lazyLock, or in main init path
def _SetWsdlType(ns, wsdlName, typ):
"""
Set a WSDL type with wsdl namespace and wsdl name.
Returns added type / existing type if (ns, wsdlName) already in the map
Note: Must be holding the _lazyLock, or in main init path
"""
return _wsdlTypeMap.setdefault((ns, wsdlName), typ)
## Lookup a WSDL type from wsdl namespace and wsdl name
# @param ns XML namespace
# @param name wsdl name
# @return type if found else throws KeyError
def GetWsdlType(ns, name):
if ns is None or name is None:
raise KeyError("{0} {1}".format(ns, name))
with _lazyLock:
# Check if the type is loaded in the map
typ = _wsdlTypeMap.get( (ns, name) )
if typ:
return typ
# It is an array type, get the actual type and return the array
elif name.startswith("ArrayOf"):
try:
return GetWsdlType(ns, name[7:]).Array
except KeyError:
raise KeyError("{0} {1}".format(ns, name))
else:
# Type is not loaded yet, load it
typ = _LoadVmodlType(_wsdlDefMap[(ns, name)][0])
if typ:
return typ
raise KeyError("{0} {1}".format(ns, name))
class UnknownWsdlTypeError(KeyError):
# NOTE (hartsock): KeyError is extended here since most logic will be
# looking for the KeyError type. I do want to distinguish malformed WSDL
# errors as a separate classification of error for easier bug reports.
pass
## Guess the type from wsdlname with no ns
# WARNING! This should not be used in general, as there is no guarantee for
# the correctness of the guessing type
# @param name wsdl name
# @return type if found in any one of the name spaces else throws KeyError
def GuessWsdlType(name):
with _lazyLock:
# Some types may exist in multiple namespaces, and returning
# the wrong one will cause a deserialization error.
# Since in python3 the order of entries in set is not deterministic,
# we will try to get the type from vim25 namespace first.
try:
return GetWsdlType(XMLNS_VMODL_BASE, name)
except KeyError:
pass
for ns in _wsdlTypeMapNSs:
try:
return GetWsdlType(ns, name)
except KeyError:
pass
raise UnknownWsdlTypeError(name)
## Return a map that contains all the wsdl types
# This function is rarely used
# By calling GetWsdlType on all wsdl names, we will
# make sure that the types are loaded before returning
# the iterator
# @return iterator to the wsdl type map
def GetWsdlTypes():
with _lazyLock:
for ns, name in _wsdlDefMap:
GetWsdlType(ns, name)
return itervalues(_wsdlTypeMap)
## Get the qualified XML schema name (ns, name) of a type
def GetQualifiedWsdlName(type):
with _lazyLock:
wsdlNSAndName = _wsdlNameMap.get(type)
if wsdlNSAndName:
return wsdlNSAndName
else:
if issubclass(type, list):
ns = GetWsdlNamespace(type.Item._version)
return (ns, "ArrayOf" + Capitalize(type.Item._wsdlName))
else:
ns = GetWsdlNamespace(type._version)
return (ns, type._wsdlName)
## Get the WSDL of a type
def GetWsdlName(type):
return GetQualifiedWsdlName(type)[-1]
## Capitalize a string
def Capitalize(str):
if str:
return str[0].upper() + str[1:]
return str
## Uncapitalize a string
def Uncapitalize(str):
if str:
return str[0].lower() + str[1:]
return str
## To uncapitalize the entire vmodl name
# pyVmomi used to map Java package names to capitalized Python module names,
# but now maps the Java package names unchanged to Python module names.
# This function is needed to support the legacy name mapping.
def UncapitalizeVmodlName(str):
if str:
return ".".join(name[0].lower() + name[1:] for name in | |
<reponame>vddesai1871/oppia
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain object for states and their constituents."""
import copy
import logging
from constants import constants
from core.domain import html_cleaner
from core.domain import interaction_registry
from core.domain import param_domain
import feconf
import jinja_utils
import schema_utils
import utils
def get_full_customization_args(customization_args, ca_specs):
"""Populates the given customization_args dict with default values
if any of the expected customization_args are missing.
Args:
customization_args: dict. The customization dict. The keys are names of
customization_args and the values are dicts with a
single key, 'value', whose corresponding value is the value of
the customization arg.
ca_specs: list(dict). List of spec dictionaries. Is used to check if
some keys are missing in customization_args. Dicts have the
following structure:
- name: str. The customization variable name.
- description: str. The customization variable description.
- default_value: *. The default value of the customization
variable.
Returns:
dict. The customization_args dict where missing keys are populated with
the default values.
"""
for ca_spec in ca_specs:
if ca_spec.name not in customization_args:
customization_args[ca_spec.name] = {
'value': ca_spec.default_value
}
return customization_args
def validate_customization_args_and_values(
item_name, item_type, customization_args,
ca_specs_to_validate_against):
"""Validates the given `customization_args` dict against the specs set out
in 'ca_specs_to_validate_against'. 'item_name' and 'item_type' are used to
populate any error messages that arise during validation.
Note that this may modify the given customization_args dict, if it has
extra or missing keys. It also normalizes any HTML in the
customization_args dict.
Args:
item_name: str. This is always 'interaction'.
item_type: str. The item_type is the ID of the interaction.
customization_args: dict. The customization dict. The keys are names of
customization_args and the values are dicts with a
single key, 'value', whose corresponding value is the value of
the customization arg.
ca_specs_to_validate_against: list(dict). List of spec dictionaries. Is
used to check if some keys are missing in customization_args. Dicts
have the following structure:
- name: str. The customization variable name.
- description: str. The customization variable description.
- default_value: *. The default value of the customization
variable.
Raises:
ValidationError: The given 'customization_args' is not valid.
"""
ca_spec_names = [
ca_spec.name for ca_spec in ca_specs_to_validate_against]
if not isinstance(customization_args, dict):
raise utils.ValidationError(
'Expected customization args to be a dict, received %s'
% customization_args)
# Validate and clean up the customization args.
# Populate missing keys with the default values.
customization_args = get_full_customization_args(
customization_args, ca_specs_to_validate_against)
# Remove extra keys.
extra_args = []
for arg_name in customization_args.keys():
if not isinstance(arg_name, basestring):
raise utils.ValidationError(
'Invalid customization arg name: %s' % arg_name)
if arg_name not in ca_spec_names:
extra_args.append(arg_name)
logging.warning(
'%s %s does not support customization arg %s.'
% (item_name.capitalize(), item_type, arg_name))
for extra_arg in extra_args:
del customization_args[extra_arg]
# Check that each value has the correct type.
for ca_spec in ca_specs_to_validate_against:
try:
customization_args[ca_spec.name]['value'] = (
schema_utils.normalize_against_schema(
customization_args[ca_spec.name]['value'],
ca_spec.schema))
except Exception:
# TODO(sll): Raise an actual exception here if parameters are not
# involved (If they are, can we get sample values for the state
# context parameters?).
pass
class AnswerGroup(object):
"""Value object for an answer group. Answer groups represent a set of rules
dictating whether a shared feedback should be shared with the user. These
rules are ORed together. Answer groups may also support a classifier
that involve soft matching of answers to a set of training data and/or
example answers dictated by the creator.
"""
def to_dict(self):
"""Returns a dict representing this AnswerGroup domain object.
Returns:
dict. A dict, mapping all fields of AnswerGroup instance.
"""
return {
'rule_specs': [rule_spec.to_dict()
for rule_spec in self.rule_specs],
'outcome': self.outcome.to_dict(),
'training_data': self.training_data,
'tagged_misconception_id': self.tagged_misconception_id
}
@classmethod
def from_dict(cls, answer_group_dict):
"""Return a AnswerGroup domain object from a dict.
Args:
answer_group_dict: dict. The dict representation of AnswerGroup
object.
Returns:
AnswerGroup. The corresponding AnswerGroup domain object.
"""
return cls(
Outcome.from_dict(answer_group_dict['outcome']),
[RuleSpec.from_dict(rs) for rs in answer_group_dict['rule_specs']],
answer_group_dict['training_data'],
answer_group_dict['tagged_misconception_id']
)
def __init__(
self, outcome, rule_specs, training_data, tagged_misconception_id):
"""Initializes a AnswerGroup domain object.
Args:
outcome: Outcome. The outcome corresponding to the answer group.
rule_specs: list(RuleSpec). List of rule specifications.
training_data: list(*). List of answers belonging to training
data of this answer group.
tagged_misconception_id: int or None. The id of the tagged
misconception for the answer group, when a state is part of a
Question object that tests a particular skill.
"""
self.rule_specs = [RuleSpec(
rule_spec.rule_type, rule_spec.inputs
) for rule_spec in rule_specs]
self.outcome = outcome
self.training_data = training_data
self.tagged_misconception_id = tagged_misconception_id
def validate(self, interaction, exp_param_specs_dict):
"""Verifies that all rule classes are valid, and that the AnswerGroup
only has one classifier rule.
Args:
interaction: InteractionInstance. The interaction object.
exp_param_specs_dict: dict. A dict of all parameters used in the
exploration. Keys are parameter names and values are ParamSpec
value objects with an object type property (obj_type).
Raises:
ValidationError: One or more attributes of the AnswerGroup are
invalid.
ValidationError: The AnswerGroup contains more than one classifier
rule.
"""
if not isinstance(self.rule_specs, list):
raise utils.ValidationError(
'Expected answer group rules to be a list, received %s'
% self.rule_specs)
if self.tagged_misconception_id is not None:
if not isinstance(self.tagged_misconception_id, int):
raise utils.ValidationError(
'Expected tagged misconception id to be an int, '
'received %s' % self.tagged_misconception_id)
if len(self.rule_specs) == 0 and len(self.training_data) == 0:
raise utils.ValidationError(
'There must be at least one rule or training data for each'
' answer group.')
for rule_spec in self.rule_specs:
if rule_spec.rule_type not in interaction.rules_dict:
raise utils.ValidationError(
'Unrecognized rule type: %s' % rule_spec.rule_type)
rule_spec.validate(
interaction.get_rule_param_list(rule_spec.rule_type),
exp_param_specs_dict)
self.outcome.validate()
class Hint(object):
"""Value object representing a hint."""
def __init__(self, hint_content):
"""Constructs a Hint domain object.
Args:
hint_content: SubtitledHtml. The hint text and ID referring to the
audio translations for this content.
"""
self.hint_content = hint_content
def to_dict(self):
"""Returns a dict representing this Hint domain object.
Returns:
dict. A dict mapping the field of Hint instance.
"""
return {
'hint_content': self.hint_content.to_dict(),
}
@classmethod
def from_dict(cls, hint_dict):
"""Return a Hint domain object from a dict.
Args:
hint_dict: dict. The dict representation of Hint object.
Returns:
Hint. The corresponding Hint domain object.
"""
return cls(SubtitledHtml.from_dict(hint_dict['hint_content']))
def validate(self):
"""Validates all properties of Hint."""
self.hint_content.validate()
class Solution(object):
"""Value object representing a solution.
A solution consists of answer_is_exclusive, correct_answer and an
explanation.When answer_is_exclusive is True, this indicates that it is
the only correct answer; when it is False, this indicates that it is one
possible answer. correct_answer records an answer that enables the learner
to progress to the next card and explanation is an HTML string containing
an explanation for the solution.
"""
def __init__(
self, interaction_id, answer_is_exclusive,
correct_answer, explanation):
"""Constructs a Solution domain object.
Args:
interaction_id: str. The interaction id.
answer_is_exclusive: bool. True if is the only correct answer;
False if is one of possible answer.
correct_answer: str. The correct answer; this answer enables the
learner to progress to the next card.
explanation: SubtitledHtml. Contains text and text id to link audio
translations for the solution's explanation.
"""
self.answer_is_exclusive = answer_is_exclusive
self.correct_answer = (
interaction_registry.Registry.get_interaction_by_id(
interaction_id).normalize_answer(correct_answer))
self.explanation = explanation
def to_dict(self):
"""Returns a dict representing this Solution domain object.
Returns:
dict. A dict mapping all fields of Solution instance.
"""
return {
'answer_is_exclusive': self.answer_is_exclusive,
'correct_answer': self.correct_answer,
'explanation': self.explanation.to_dict(),
}
@classmethod
def from_dict(cls, interaction_id, solution_dict):
"""Return a Solution domain object from a dict.
Args:
interaction_id: str. The interaction id.
solution_dict: dict. The dict representation of Solution object.
Returns:
Solution. The corresponding Solution domain object.
"""
return cls(
interaction_id,
solution_dict['answer_is_exclusive'],
interaction_registry.Registry.get_interaction_by_id(
interaction_id).normalize_answer(
solution_dict['correct_answer']),
SubtitledHtml.from_dict(solution_dict['explanation']))
def validate(self, interaction_id):
"""Validates all properties of Solution.
Args:
interaction_id: str. The interaction id.
Raises:
ValidationError: One or more attributes of the Solution are not
valid.
"""
if not isinstance(self.answer_is_exclusive, bool):
raise utils.ValidationError(
'Expected answer_is_exclusive to be | |
"""show_spanning_tree.py
supported commands:
* show spanning-tree detail
* show spanning-tree mst detail
* show spanning-tree summary
* show errdisable recovery
* show spanning-tree
* show spanning-tree mst <WORD>
* show spanning-tree vlan <WORD>
* show spanning-tree mst configuration
"""
import re
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Schema, \
Any, \
Optional, \
Or, \
And, \
Default, \
Use
# import parser utils
from genie.libs.parser.utils.common import Common
class ShowSpanningTreeSummarySchema(MetaParser):
"""Schema for show spanning-tree summary"""
schema = {
Optional('etherchannel_misconfig_guard'): bool,
Optional('extended_system_id'): bool,
Optional('portfast_default'): bool,
'bpdu_guard': bool,
Optional('bpdu_filter'): bool,
Optional('bridge_assurance'): bool,
Optional('loop_guard'): bool,
'uplink_fast': bool,
'backbone_fast': bool,
Optional('root_bridge_for'): str,
Optional('pvst_simulation'): bool,
Optional('pvst_simulation_status'): str,
Optional('platform_pvst_simulation'): bool,
Optional("configured_pathcost"): {
'method': str,
Optional('operational_value'): str,
},
Optional('mode'): {
Any(): { # mstp, pvst, rapid_pvst
Any(): { # <mst_domain>, <pvst_id>
'blocking': int,
'listening': int,
'learning': int,
'forwarding': int,
'stp_active': int,
}
}
},
'total_statistics': {
'blockings': int,
'listenings': int,
'learnings': int,
'forwardings': int,
'stp_actives': int,
Optional('num_of_msts'): int,
Optional('num_of_vlans'): int,
}
}
class ShowSpanningTreeSummary(ShowSpanningTreeSummarySchema):
"""Parser for show show spanning-tree summary"""
cli_command = 'show spanning-tree summary'
def cli(self,output=None):
if output is None:
# get output from device
out = self.device.execute(self.cli_command)
else:
out = output
# initial return dictionary
ret_dict = {}
# initial regexp pattern
p1 = re.compile(r'^Switch +is +in +(?P<mode>[\w\-]+) +mode( *\(IEEE +Standard\))?$')
p2 = re.compile(r'^Root +bridge +for: +(?P<root_bridge_for>[\w\-\,\s]+).?$')
#p3 = re.compile(r'^(?P<name>\w+(?: \S+){,5}?) +is '
# '+(?P<value>disabled|enabled)(?: +but +inactive +in (?P<simulation_value>\S+) +mode)?$')
p3 = re.compile(r'^(?P<name>\w+(?: \S+){,5}?) +is +(?P<value>disable|disabled|enabled)'
r'(?: +but (?P<simulation_value>active|inactive) +in +rapid-pvst +mode)?$')
p4 = re.compile(r'^(?P<id>(?!Total)\w+) +(?P<blocking>\d+) +(?P<listening>\d+)'
r' +(?P<learning>\d+) +(?P<forwarding>\d+) +(?P<stp_active>\d+)$')
p5 = re.compile(r'^(?P<num>\d+) +(msts?|vlans?) +(?P<blockings>\d+) +(?P<listenings>\d+)'
r' +(?P<learnings>\d+) +(?P<forwardings>\d+) +(?P<stp_actives>\d+)$')
p6 = re.compile(r'^(?:Configured +)?Pathcost +method +used +is '
r'+(?P<method>\w+)(?: +\(Operational +value +is +(?P<operational_value>\w+)\))?$')
p7 = re.compile(r'Total +(?P<blockings>\d+) +(?P<listenings>\d+)'
r' +(?P<learnings>\d+) +(?P<forwardings>\d+) +(?P<stp_actives>\d+)$')
p8 = re.compile(r'^(?P<root_bridge_for>(?:(?:[\w-]+, +)+)?[\w-]+)$')
key_map = {'EtherChannel misconfig guard': 'etherchannel_misconfig_guard',
'Extended system ID': 'extended_system_id',
'Portfast Default': 'portfast_default',
'PortFast BPDU Guard': 'bpdu_guard',
'PortFast BPDU Guard Default': 'bpdu_guard',
'Portfast Edge BPDU Guard Default': 'bpdu_guard',
'Portfast BPDU Filter Default': 'bpdu_filter',
'Portfast Edge BPDU Filter Default': 'bpdu_filter',
'Loopguard Default': 'loop_guard',
'UplinkFast': 'uplink_fast',
'Bridge Assurance': 'bridge_assurance',
'BackboneFast': 'backbone_fast',
'PVST Simulation': 'pvst_simulation',
'Platform PVST Simulation': 'platform_pvst_simulation'}
for line in out.splitlines():
line = line.strip()
# Switch is in mst mode (IEEE Standard)
m = p1.match(line)
if m:
mode = m.groupdict()['mode'].replace('-', '_')
continue
# Root bridge for: MST0, MST100
m = p2.match(line)
if m:
ret_dict['root_bridge_for'] = m.groupdict()['root_bridge_for']
continue
# VLAN0780, VLAN0801-VLAN0803, VLAN0806, VLAN0808-VLAN0818, VLAN0821-VLAN0822
m = p8.match(line)
if m:
ret_dict['root_bridge_for'] += ', {}'.format(m.groupdict()['root_bridge_for'])
# EtherChannel misconfig guard is disabled
# Extended system ID is enabled
# Portfast Default is disabled
# PortFast BPDU Guard Default is disabled or Portfast Edge BPDU Guard Default
# Portfast BPDU Filter Default is disabled or Portfast Edge BPDU Filter Default
# Loopguard Default is disabled
# UplinkFast is disabled
# BackboneFast is disabled
# PVST Simulation is enabled
# PVST Simulation Default is enabled but inactive in rapid-pvst mode
# Platform PVST Simulation is enabled
m = p3.match(line)
if m:
group = m.groupdict()
if 'PVST Simulation Default' in group['name']:
group['name'] = 'PVST Simulation'
if 'enabled' in group['value'].lower():
if group['simulation_value']:
ret_dict[key_map[group['name'].strip()]] = True
ret_dict['pvst_simulation_status'] = group['simulation_value']
else:
ret_dict[key_map[group['name'].strip()]] = True
else:
ret_dict[key_map[group['name'].strip()]] = False
continue
# VLAN0100 0 1 0 0 1
m = p4.match(line)
if m:
group = m.groupdict()
mode_id = group.pop('id')
mode_dict = ret_dict.setdefault('mode', {})\
.setdefault(mode, {}).setdefault(mode_id, {})
mode_dict.update({k:int(v) for k, v in group.items()})
continue
# 5 vlans 0 5 0 0 5
# 2 msts 6 0 0 10 16
m = p5.match(line)
if m:
group = m.groupdict()
if 'mst' in line:
key = 'num_of_msts'
elif 'vlan' in line:
key = 'num_of_vlans'
ret_dict.setdefault('total_statistics', {})\
.setdefault(key, int(group.pop('num')))
ret_dict.setdefault('total_statistics', {})\
.update({k:int(v) for k, v in group.items()})
continue
# Configured Pathcost method used is short
# Configured Pathcost method used is short (Operational value is long)
# Pathcost method used is long
m = p6.match(line)
if m:
group = m.groupdict()
ret_dict.setdefault('configured_pathcost', {})\
.update({k:v for k, v in group.items() if v})
continue
m = p7.match(line)
if m:
group = m.groupdict()
ret_dict.setdefault('total_statistics', {}) \
.update({k: int(v) for k, v in group.items()})
continue
return ret_dict
class ShowSpanningTreeDetailSchema(MetaParser):
"""Schema for show spanning-tree detail"""
schema = {
Any(): { # mstp, pvst, rapid_pvst
Optional('domain'): str,
Optional('pvst_id'): str,
Optional('name'): str,
Optional('revision'): int,
Optional('max_hop'): int,
'hello_time': int,
'max_age': int,
'forwarding_delay': int,
Optional('hold_count'): int,
Any(): { # mst_instances, vlans
Any(): {
Optional('mst_id'): int,
Optional('vlan'): str,
Optional('vlan_id'): int,
Optional('hello_time'): int,
Optional('max_age'): int,
Optional('forwarding_delay'): int,
Optional('hold_count'): int,
'bridge_priority': int,
'bridge_sysid': int,
'bridge_address': str,
Optional('root_of_spanning_tree'): bool,
'topology_change_flag': bool,
'topology_detected_flag': bool,
'hold_time': int,
'topology_changes': int,
'time_since_topology_change': str,
Optional('topology_from_port'): str,
'hello_time': int,
'max_age': int,
'forwarding_delay': int,
'hold_time': int,
'topology_change_times': int,
'notification_times': int,
'hello_timer': int,
'topology_change_timer': int,
'notification_timer': int,
Optional('aging_timer'): int,
'interfaces': {
Any(): {
'status': str,
'name': str,
'cost': int,
'port_priority': int,
'port_num': int,
'port_identifier': str,
'designated_root_priority': int,
'designated_root_address': str,
'designated_path_cost': int,
'designated_port_id': str,
'designated_bridge_priority': int,
'designated_bridge_address': str,
'number_of_forward_transitions': int,
'message_age': int,
'forward_delay': int,
'hold': int,
'link_type': str,
Optional('boundary'): str,
Optional('peer'): str,
Optional('loop_guard'): bool,
'counters': {
'bpdu_sent': int,
'bpdu_received': int,
}
}
}
},
},
}
}
class ShowSpanningTreeDetail(ShowSpanningTreeDetailSchema):
"""Parser for show spanning-tree detail"""
MODE_NAME_MAP = {'mstp': 'mstp',
'ieee': 'pvst',
'rstp': 'rapid_pvst'}
MODE_INST_MAP = {'mstp': 'mst_instances',
'ieee': 'vlans',
'rstp': 'vlans'}
MODE_KEY_MAP = {'mstp': 'mst_id',
'ieee': 'vlan_id',
'rstp': 'vlan_id'}
cli_command = 'show spanning-tree detail'
def cli(self, output=None):
if output is None:
# get output from device
out = self.device.execute(self.cli_command)
else:
out = output
# initial return dictionary
ret_dict = {}
# initial regexp pattern
p1 = re.compile(r'^(MST|VLAN)?(?P<inst>\w+) +is +executing +the +(?P<mode>[\w\-]+) +'
'compatible +Spanning +Tree +protocol$')
p2 = re.compile(r'^Bridge +Identifier +has +priority +(?P<bridge_priority>\d+), +'
'sysid +(?P<bridge_sysid>\d+), +'
'address +(?P<bridge_address>[\w\.]+)$')
p3 = re.compile(r'^Configured +hello +time +(?P<hello_time>\d+), +'
'max +age +(?P<max_age>\d+), +forward +delay +(?P<forwarding_delay>\d+)(, +'
'(transmit|tranmsit) +hold\-count +(?P<hold_count>\d+))?$')
p4 = re.compile(r'^We +are +the +root +of +the +spanning +tree$')
p5 = re.compile(r'^Topology +change +flag +(?P<topology_change_flag>[\w\s]+), +'
'detected +flag +(?P<topology_detected_flag>[\w\s]+)$')
p6 = re.compile(r'^Number +of +topology +changes +(?P<topology_changes>\d+) +'
'last +change +occurred +(?P<time_since_topology_change>[\w\.\:]+)( +ago)?$')
p7 = re.compile(r'^from +(?P<topology_from_port>[\w\.\/\-]+)$')
p8 = re.compile(r'^Times: +hold +(?P<hold_time>\d+), +'
'topology +change +(?P<topology_change_times>\d+), +'
'notification +(?P<notification_times>\d+)$')
p9 = re.compile(r'^hello +(?P<hello_time>\d+), '
'max +age +(?P<max_age>\d+), '
'+forward +delay +(?P<forwarding_delay>\d+)$')
p10 = re.compile(r'^Timers: +hello +(?P<hello_timer>\d+), +'
'topology +change +(?P<topology_change_timer>\d+), +'
'notification +(?P<notification_timer>\d+)'
'(, +aging +(?P<aging_timer>\d+))?$')
p11 = re.compile(r'^Port +(?P<port_num>\d+) *\((?P<name>[\w\/\-\.]+)\) +'
'of +(?P<inst>\w+) +is +(?P<status>.*)$')
p12 = re.compile(r'^Port +path +cost +(?P<cost>\d+), +'
'Port +priority +(?P<port_priority>\d+), +'
'Port +Identifier +(?P<port_identifier>[\w\.]+)$')
p13 = re.compile(r'^Designated +root +has +priority +(?P<designated_root_priority>\d+), +'
'address +(?P<designated_root_address>[\w\.]+)$')
p14 = re.compile(r'^Designated +bridge +has +priority +(?P<designated_bridge_priority>\d+), +'
'address +(?P<designated_bridge_address>[\w\.]+)$')
p15 = re.compile(r'^Designated +port +id +is +(?P<designated_port_id>[\w\.]+), +'
'designated +path +cost +(?P<designated_path_cost>\d+)'
'( +[\w\s\,]+)?$')
p16 = re.compile(r'^Timers: +message +age +(?P<message_age>\d+), +'
'forward +delay +(?P<forward_delay>\d+), +hold +(?P<hold>\d+)$')
p17 = re.compile(r'^Number +of +transitions +to +forwarding +'
'state: +(?P<number_of_forward_transitions>\d+)$')
p18 = re.compile(r'^Link +type +is +(?P<link_type>[\w\-]+) +by +default'
'(, *(Boundary +(?P<boundary>\w+)|Peer +is +(?P<peer>\w+)))?$')
p19 = re.compile(r'^Loop +guard +is +(?P<loop_guard>\w+) +by +default +on +the +port$')
p20 = re.compile(r'^BPDU: +sent +(?P<bpdu_sent>\d+), +'
'received +(?P<bpdu_received>\d+)$')
for line in out.splitlines():
line = line.strip()
# MST0 is executing the mstp compatible Spanning Tree protocol
m = p1.match(line)
if m:
group = m.groupdict()
mode = group['mode']
mode_dict = ret_dict.setdefault(self.MODE_NAME_MAP[mode], {})
inst_dict = mode_dict.setdefault(self.MODE_INST_MAP[mode], {}).\
setdefault(int(group['inst']), {})
inst_dict[self.MODE_KEY_MAP[mode]] = int(group['inst'])
continue
# Bridge Identifier has priority 32768, sysid 0, address d8b1.90ff.c889
m = p2.match(line)
if m:
group = m.groupdict()
inst_dict['bridge_address'] = group.pop('bridge_address')
inst_dict.update({k:int(v) for k, v in group.items()})
continue
# Configured hello time 10, max age 40, forward delay 30, transmit hold-count 20
# Configured hello time 2, max age 20, forward delay 15, tranmsit hold-count 6
m = p3.match(line)
if m:
group = m.groupdict()
update_dict = {k:int(v) for k, v in group.items() if v}
mode_dict.update(update_dict)
inst_dict.update(update_dict)
continue
# We are the root of the spanning tree
m = p4.match(line)
if m:
inst_dict['root_of_spanning_tree'] = True
continue
# Topology change flag not set, detected flag not set
m = p5.match(line)
if m:
group = m.groupdict()
inst_dict['topology_change_flag'] = False if 'not' in group['topology_change_flag'] else True
inst_dict['topology_detected_flag'] = False if 'not' in group['topology_detected_flag'] else True
continue
# Number of topology changes | |
<filename>hintgen/astTools.py
import ast, copy, pickle
from .tools import log
from .namesets import *
from .display import printFunction
def cmp(a, b):
if type(a) == type(b) == complex:
return (a.real > b.real) - (a.real < b.real)
return (a > b) - (a < b)
def tree_to_str(a):
return repr(pickle.dumps(a))
def str_to_tree(s):
return pickle.loads(eval(s))
def builtInName(id):
"""Determines whether the given id is a built-in name"""
if id in builtInNames + exceptionClasses:
return True
elif id in builtInFunctions.keys():
return True
elif id in list(allPythonFunctions.keys()) + supportedLibraries:
return False
def importedName(id, importList):
for imp in importList:
if type(imp) == ast.Import:
for name in imp.names:
if hasattr(name, "asname") and name.asname != None:
if id == name.asname:
return True
else:
if id == name.name:
return True
elif type(imp) == ast.ImportFrom:
if hasattr(imp, "module"):
if imp.module in supportedLibraries:
libMap = libraryMap[imp.module]
for name in imp.names:
if hasattr(name, "asname") and name.asname != None:
if id == name.asname:
return True
else:
if id == name.name:
return True
else:
log("astTools\timportedName\tUnsupported library: " + printFunction(imp), "bug")
else:
log("astTools\timportedName\tWhy no module? " + printFunction(imp), "bug")
return False
def isConstant(x):
"""Determine whether the provided AST is a constant"""
return (type(x) in [ast.Num, ast.Str, ast.Bytes, ast.NameConstant])
def isIterableType(t):
"""Can the given type be iterated over"""
return t in [ dict, list, set, str, bytes, tuple ]
def isStatement(a):
"""Determine whether the given node is a statement (vs an expression)"""
return type(a) in [ ast.Module, ast.Interactive, ast.Expression, ast.Suite,
ast.FunctionDef, ast.ClassDef, ast.Return, ast.Delete,
ast.Assign, ast.AugAssign, ast.For, ast.While,
ast.If, ast.With, ast.Raise, ast.Try,
ast.Assert, ast.Import, ast.ImportFrom, ast.Global,
ast.Expr, ast.Pass, ast.Break, ast.Continue ]
def codeLength(a):
"""Returns the number of characters in this AST"""
if type(a) == list:
return sum([codeLength(x) for x in a])
return len(printFunction(a))
def applyToChildren(a, f):
"""Apply the given function to all the children of a"""
if a == None:
return a
for field in a._fields:
child = getattr(a, field)
if type(child) == list:
i = 0
while i < len(child):
temp = f(child[i])
if type(temp) == list:
child = child[:i] + temp + child[i+1:]
i += len(temp)
else:
child[i] = temp
i += 1
else:
child = f(child)
setattr(a, field, child)
return a
def occursIn(sub, super):
"""Does the first AST occur as a subtree of the second?"""
superStatementTypes = [ ast.Module, ast.Interactive, ast.Suite,
ast.FunctionDef, ast.ClassDef, ast.For,
ast.While, ast.If, ast.With, ast.Try,
ast.ExceptHandler ]
if (not isinstance(super, ast.AST)):
return False
if type(sub) == type(super) and compareASTs(sub, super, checkEquality=True) == 0:
return True
# we know that a statement can never occur in an expression
# (or in a non-statement-holding statement), so cut the search off now to save time.
if isStatement(sub) and type(super) not in superStatementTypes:
return False
for child in ast.iter_child_nodes(super):
if occursIn(sub, child):
return True
return False
def countOccurances(a, value):
"""How many instances of this node type appear in the AST?"""
if type(a) == list:
return sum([countOccurances(x, value) for x in a])
if not isinstance(a, ast.AST):
return 0
count = 0
for node in ast.walk(a):
if isinstance(node, value):
count += 1
return count
def countVariables(a, id):
"""Count the number of times the given variable appears in the AST"""
if type(a) == list:
return sum([countVariables(x, id) for x in a])
if not isinstance(a, ast.AST):
return 0
count = 0
for node in ast.walk(a):
if type(node) == ast.Name and node.id == id:
count += 1
return count
def gatherAllNames(a, keep_orig=True):
"""Gather all names in the tree (variable or otherwise).
Names are returned along with their original names
(which are used in variable mapping)"""
if type(a) == list:
allIds = set()
for line in a:
allIds |= gatherAllNames(line)
return allIds
if not isinstance(a, ast.AST):
return set()
allIds = set()
for node in ast.walk(a):
if type(node) == ast.Name:
origName = node.originalId if (keep_orig and hasattr(node, "originalId")) else None
allIds |= set([(node.id, origName)])
return allIds
def gatherAllVariables(a, keep_orig=True):
"""Gather all variable names in the tree. Names are returned along
with their original names (which are used in variable mapping)"""
if type(a) == list:
allIds = set()
for line in a:
allIds |= gatherAllVariables(line)
return allIds
if not isinstance(a, ast.AST):
return set()
allIds = set()
for node in ast.walk(a):
if type(node) == ast.Name or type(node) == ast.arg:
currentId = node.id if type(node) == ast.Name else node.arg
# Only take variables
if not (builtInName(currentId) or hasattr(node, "dontChangeName")):
origName = node.originalId if (keep_orig and hasattr(node, "originalId")) else None
if (currentId, origName) not in allIds:
for pair in allIds:
if pair[0] == currentId:
if pair[1] == None:
allIds -= {pair}
allIds |= {(currentId, origName)}
elif origName == None:
pass
else:
log("astTools\tgatherAllVariables\tConflicting originalIds? " + pair[0] + " : " + pair[1] + " , " + origName + "\n" + printFunction(a), "bug")
break
else:
allIds |= {(currentId, origName)}
return allIds
def gatherAllParameters(a, keep_orig=True):
"""Gather all parameters in the tree. Names are returned along
with their original names (which are used in variable mapping)"""
if type(a) == list:
allIds = set()
for line in a:
allIds |= gatherAllVariables(line)
return allIds
if not isinstance(a, ast.AST):
return set()
allIds = set()
for node in ast.walk(a):
if type(node) == ast.arg:
origName = node.originalId if (keep_orig and hasattr(node, "originalId")) else None
allIds |= set([(node.arg, origName)])
return allIds
def gatherAllHelpers(a, restricted_names):
"""Gather all helper function names in the tree that have been anonymized"""
if type(a) != ast.Module:
return set()
helpers = set()
for item in a.body:
if type(item) == ast.FunctionDef:
if not hasattr(item, "dontChangeName") and item.name not in restricted_names: # this got anonymized
origName = item.originalId if hasattr(item, "originalId") else None
helpers |= set([(item.name, origName)])
return helpers
def gatherAllFunctionNames(a):
"""Gather all helper function names in the tree that have been anonymized"""
if type(a) != ast.Module:
return set()
helpers = set()
for item in a.body:
if type(item) == ast.FunctionDef:
origName = item.originalId if hasattr(item, "originalId") else None
helpers |= set([(item.name, origName)])
return helpers
def gatherAssignedVars(targets):
"""Take a list of assigned variables and extract the names/subscripts/attributes"""
if type(targets) != list:
targets = [targets]
newTargets = []
for target in targets:
if type(target) in [ast.Tuple, ast.List]:
newTargets += gatherAssignedVars(target.elts)
elif type(target) in [ast.Name, ast.Subscript, ast.Attribute]:
newTargets.append(target)
else:
log("astTools\tgatherAssignedVars\tWeird Assign Type: " + str(type(target)),"bug")
return newTargets
def gatherAssignedVarIds(targets):
"""Just get the ids of Names"""
vars = gatherAssignedVars(targets)
return [y.id for y in filter(lambda x : type(x) == ast.Name, vars)]
def getAllAssignedVarIds(a):
if not isinstance(a, ast.AST):
return []
ids = []
for child in ast.walk(a):
if type(child) == ast.Assign:
ids += gatherAssignedVarIds(child.targets)
elif type(child) == ast.AugAssign:
ids += gatherAssignedVarIds([child.target])
elif type(child) == ast.For:
ids += gatherAssignedVarIds([child.target])
return ids
def getAllAssignedVars(a):
if not isinstance(a, ast.AST):
return []
vars = []
for child in ast.walk(a):
if type(child) == ast.Assign:
vars += gatherAssignedVars(child.targets)
elif type(child) == ast.AugAssign:
vars += gatherAssignedVars([child.target])
elif type(child) == ast.For:
vars += gatherAssignedVars([child.target])
return vars
def getAllFunctions(a):
"""Collects all the functions in the given module"""
if not isinstance(a, ast.AST):
return []
functions = []
for child in ast.walk(a):
if type(child) == ast.FunctionDef:
functions.append(child.name)
return functions
def getAllImports(a):
"""Gather all imported module names"""
if not isinstance(a, ast.AST):
return []
imports = []
for child in ast.walk(a):
if type(child) == ast.Import:
for alias in child.names:
if alias.name in supportedLibraries:
imports.append(alias.asname if alias.asname != None else alias.name)
else:
log("astTools\tgetAllImports\tUnknown library: " + alias.name, "bug")
elif type(child) == ast.ImportFrom:
if child.module in supportedLibraries:
for alias in child.names: # these are all functions
if alias.name in libraryMap[child.module]:
imports.append(alias.asname if alias.asname != None else alias.name)
else:
log("astTools\tgetAllImports\tUnknown import from name: " + \
child.module + "," + alias.name, "bug")
else:
log("astTools\tgetAllImports\tUnknown library: " + child.module, "bug")
return imports
def getAllImportStatements(a):
if not isinstance(a, ast.AST):
return []
imports = []
for child in ast.walk(a):
if type(child) == ast.Import:
imports.append(child)
elif type(child) == ast.ImportFrom:
imports.append(child)
return imports
def getAllGlobalNames(a):
# Finds all names that can be accessed at the global level in the AST
if type(a) != ast.Module:
return []
names = []
for obj in a.body:
if type(obj) in [ast.FunctionDef, ast.ClassDef]:
names.append(obj.name)
elif type(obj) in [ast.Assign, ast.AugAssign]:
targets = obj.targets if type(obj) == ast.Assign else [obj.target]
for target in obj.targets:
if type(target) == ast.Name:
names.append(target.id)
elif type(target) in [ast.Tuple, ast.List]:
for elt in target.elts:
if type(elt) == ast.Name:
names.append(elt.id)
elif type(obj) in [ast.Import, ast.ImportFrom]:
for module in obj.names:
names.append(module.asname if module.asname != None else module.name)
return names
def doBinaryOp(op, l, r):
"""Perform the given AST binary operation on the values"""
top = type(op)
if top == ast.Add:
return l + r
elif top == ast.Sub:
return l - r
elif top == ast.Mult:
return l * r
elif top == ast.Div:
# Don't bother if this will be a really long float- it won't work properly!
# Also, in Python 3 this is floating division, so perform it accordingly.
val = 1.0 * l / r
if (val * 1e10 % 1.0) != 0:
raise Exception("Repeating Float")
return val
elif top == ast.Mod:
return l % r
elif top == ast.Pow:
return l ** r
elif top == ast.LShift:
return l << r
elif top == ast.RShift:
return l >> r
elif top == ast.BitOr:
return l | r
elif top == ast.BitXor:
return l ^ r
elif top == ast.BitAnd:
return l & r
elif top == ast.FloorDiv:
return l // r
def doUnaryOp(op, val):
"""Perform the given AST unary operation on the value"""
top = type(op)
if top == ast.Invert:
return ~ val
elif top == ast.Not:
return not val
elif top == ast.UAdd:
return val
elif top == ast.USub:
return -val
def doCompare(op, left, right):
"""Perform the given AST comparison on the values"""
top = type(op)
if top == ast.Eq:
return left == right
elif top == ast.NotEq:
return left != right
elif top == ast.Lt:
return left < right
elif top == ast.LtE:
return left <= right
elif top == ast.Gt:
return left > right
elif top == ast.GtE:
return left >= right
elif top == ast.Is:
return left is right
elif top == ast.IsNot:
return left is not right
elif top == ast.In:
return left in right
elif top == ast.NotIn:
return left not in right
def num_negate(op):
top = type(op)
neg = not op.num_negated if hasattr(op, "num_negated") else True
if top == ast.Add:
newOp = ast.Sub()
elif top == ast.Sub:
newOp = ast.Add()
elif top in [ast.Mult, ast.Div, | |
<gh_stars>10-100
import os
import os.path
import csv
import re
def words(filename):
f = open(os.path.join("data", "rectangle_raw", filename), "r")
for line in f:
for word in line.split():
yield word
def write_dict(dic, filename):
p = os.path.join("data", "rectangle", filename.replace(" ", "_"))
d = os.path.dirname(p)
if not os.path.exists(d):
os.makedirs(d)
print("Create " + p)
f = open(p, "w")
for i in range(-1, len(dic[next(iter(dic))])):
if i == -1:
f.write("ID")
else:
f.write(str(i))
for k in dic.keys():
if i == -1:
f.write("," + k)
else:
f.write("," + str(dic[k][i]))
f.write("\n")
###############################################################################
def convert_generic(filename, s1="nwh", s2="whpc"):
w = words(filename)
bins = {}
for c in s1:
if c == 'w':
bins["WIDTH"] = []
elif c == 'h':
bins["HEIGHT"] = []
for c in s1:
if c == 'n':
itemtype_number = int(next(w))
elif c == 'w':
bins["WIDTH"].append(int(next(w)))
elif c == 'h':
bins["HEIGHT"].append(int(next(w)))
elif c == 'x':
next(w)
write_dict(bins, filename + "_bins.csv")
items = {}
for c in s2:
if c == 'w':
items["WIDTH"] = []
elif c == 'h':
items["HEIGHT"] = []
elif c == 'p':
items["PROFIT"] = []
elif c == 'c':
items["COPIES"] = []
for i in range(0, itemtype_number):
for c in s2:
if c == 'w':
items["WIDTH"].append(int(next(w)))
elif c == 'h':
items["HEIGHT"].append(int(next(w)))
elif c == 'p':
items["PROFIT"].append(int(next(w)))
elif c == 'c':
items["COPIES"].append(int(next(w)))
elif c == 'x':
next(w)
write_dict(items, filename + "_items.csv")
def convert_vbpp(filename, s1="mn", s2="whpc", s3="", s4="whpc"):
w = words(filename)
for c in s1:
if c == 'n':
itemtype_number = int(next(w))
elif c == 'm':
bintype_number = int(next(w))
elif c == 'x':
next(w)
bins = {}
for c in s2:
if c == 'w':
bins["WIDTH"] = []
elif c == 'h':
bins["HEIGHT"] = []
elif c == 'p':
bins["COST"] = []
elif c == 'c':
bins["COPIES"] = []
for i in range(0, bintype_number):
for c in s2:
if c == 'w':
bins["WIDTH"].append(int(next(w)))
elif c == 'h':
bins["HEIGHT"].append(int(next(w)))
elif c == 'p':
bins["COST"].append(int(next(w)))
elif c == 'c':
bins["COPIES"].append(int(next(w)))
elif c == 'x':
next(w)
write_dict(bins, filename + "_bins.csv")
for c in s3:
if c == 'n':
itemtype_number = int(next(w))
elif c == 'm':
bintype_number = int(next(w))
elif c == 'x':
next(w)
items = {}
for c in s4:
if c == 'w':
items["WIDTH"] = []
elif c == 'h':
items["HEIGHT"] = []
elif c == 'p':
items["PROFIT"] = []
elif c == 'c':
items["COPIES"] = []
for i in range(0, itemtype_number):
for c in s4:
if c == 'w':
items["WIDTH"].append(int(next(w)))
elif c == 'h':
items["HEIGHT"].append(int(next(w)))
elif c == 'p':
items["PROFIT"].append(int(next(w)))
elif c == 'c':
items["COPIES"].append(int(next(w)))
elif c == 'x':
next(w)
write_dict(items, filename + "_items.csv")
def convert_berkey1987(filename):
w = words(filename)
for instance_number in range(0, 50):
bins = {"WIDTH": [], "HEIGHT": []}
items = {"WIDTH": [], "HEIGHT": []}
for _ in range(3):
next(w)
itemtype_number = int(next(w))
for _ in range(3):
next(w)
instance_relative_number = int(next(w))
for _ in range(7):
next(w)
bins["HEIGHT"].append(int(next(w)))
bins["WIDTH"].append(int(next(w)))
next(w)
for i in range(0, itemtype_number):
items["HEIGHT"].append(int(next(w)))
items["WIDTH"].append(int(next(w)))
if i == 0:
next(w)
suffix = (
"_" + str(itemtype_number)
+ "_" + str(instance_relative_number))
write_dict(bins, filename + suffix + "_bins.csv")
write_dict(items, filename + suffix + "_items.csv")
def convert_beasley2004(filename):
w = words(filename)
instance_number = int(next(w))
for instance in range(0, instance_number):
bins = {"WIDTH": [], "HEIGHT": []}
items = {"WIDTH": [], "HEIGHT": [], "PROFIT": [], "COPIES": []}
itemtype_number = int(next(w))
bins["WIDTH"].append(int(next(w)))
bins["HEIGHT"].append(int(next(w)))
for i in range(0, itemtype_number):
items["WIDTH"].append(int(next(w)))
items["HEIGHT"].append(int(next(w)))
next(w)
items["COPIES"].append(int(next(w)))
items["PROFIT"].append(int(next(w)))
suffix = "_" + str(instance + 1)
write_dict(bins, filename + suffix + "_bins.csv")
write_dict(items, filename + suffix + "_items.csv")
def convert_cintra2008(filename):
w = words(filename)
bins = {"WIDTH": [], "HEIGHT": []}
items = {"WIDTH": [], "HEIGHT": [], "COPIES": []}
next(w)
platetype_number = int(next(w))
itemtype_number = int(next(w))
for _ in range(3):
next(w)
bins["WIDTH"].append(int(next(w)))
bins["HEIGHT"].append(int(next(w)))
next(w)
for _ in range(1, platetype_number):
for _ in range(3):
next(w)
for i in range(0, itemtype_number):
items["WIDTH"].append(int(next(w)))
items["HEIGHT"].append(int(next(w)))
items["COPIES"].append(int(next(w)))
next(w)
write_dict(bins, filename + "_1bintype_bins.csv")
write_dict(items, filename + "_1bintype_items.csv")
def convert_egeblad2009(filename):
bins = {"WIDTH": [], "HEIGHT": []}
items = {"WIDTH": [], "HEIGHT": [], "PROFIT": [], "COPIES": []}
f = open("data/rectangle_raw/" + filename, "r")
line = f.readline().split(",")
bins["WIDTH"].append(int(line[1]))
bins["HEIGHT"].append(int(line[2]))
while True:
l = f.readline()
if not l:
break
line = l.split(",")
items["WIDTH"].append(int(line[2]))
items["HEIGHT"].append(int(line[3]))
items["PROFIT"].append(int(line[4]))
items["COPIES"].append(int(line[5]))
write_dict(bins, filename + "_bins.csv")
write_dict(items, filename + "_items.csv")
def convert_silveira2013(filename):
w = words(filename)
bins = {"WIDTH": [], "HEIGHT": []}
items = {"WIDTH": [], "HEIGHT": [], "NEW_STACK": []}
next(w)
next(w)
itemtype_number = int(next(w))
bins["HEIGHT"].append(int(next(w)))
bins["WIDTH"].append(int(next(w)))
while next(w, None):
stack_size = int(next(w))
for i in range(stack_size):
items["HEIGHT"].append(int(next(w)))
items["WIDTH"].append(int(next(w)))
items["NEW_STACK"].append((1 if i == 0 else 0))
write_dict(bins, filename + "_bins.csv")
write_dict(items, filename + "_items.csv")
def convert_afsharian2014(filename):
f = open(os.path.join("data", "rectangle_raw", filename), "r")
instances = {}
instance = None
while True:
line = f.readline()
if not line:
break
line_split = line.split()
if "static SmallObject[]" in line:
instance = line_split[2].split('_')[0]
for d in range(5):
instances[instance + "_D" + str(d)] = {
"bins": {"WIDTH": [], "HEIGHT": []},
"items": {"WIDTH": [], "HEIGHT": [], "PROFIT": []},
"defects": {"BIN": [], "X": [], "Y": [], "WIDTH": [], "HEIGHT": []},
}
continue
if "new Data(" in line:
instance = line.split('"')[1]
continue
if "new SmallObject(" in line:
numbers = re.findall(r'\d+', line)
for d in range(5):
instances[instance + "_D" + str(d)]["items"]["WIDTH"].append(numbers[0])
instances[instance + "_D" + str(d)]["items"]["HEIGHT"].append(numbers[1])
instances[instance + "_D" + str(d)]["items"]["PROFIT"].append(numbers[2])
continue
if "new Defect(" in line:
numbers = re.findall(r'\d+', line)
instances[instance]["defects"]["BIN"].append(0)
instances[instance]["defects"]["X"].append(numbers[0])
instances[instance]["defects"]["Y"].append(numbers[1])
instances[instance]["defects"]["WIDTH"].append(numbers[2])
instances[instance]["defects"]["HEIGHT"].append(numbers[3])
continue
if "}, " in line and "_D1" in instance:
numbers = re.findall(r'\d+', line)
instance = instance.split('_')[0]
for d in range(5):
instances[instance + "_D" + str(d)]["bins"]["WIDTH"].append(numbers[0])
instances[instance + "_D" + str(d)]["bins"]["HEIGHT"].append(numbers[1])
continue
for k, v in instances.items():
for k2, v2 in v.items():
write_dict(v2, filename + "/" + k + "_" + k2 + ".csv")
def convert_roadef2018(filename):
bins = {"WIDTH": [], "HEIGHT": []}
for _ in range(100):
bins["WIDTH"].append(6000)
bins["HEIGHT"].append(3210)
write_dict(bins, filename + "_bins.csv")
with open(os.path.join("data", "rectangle_raw", filename + "_batch.csv"), newline='') as csvfile:
items = {"WIDTH": [], "HEIGHT": [], "NEWSTACK": []}
spamreader = csv.reader(csvfile, delimiter=';', quotechar='|')
first_line = True
row_prec = None
for row in spamreader:
if first_line:
first_line = False
continue
items["WIDTH"].append(int(row[1]))
items["HEIGHT"].append(int(row[2]))
if len(items["NEWSTACK"]) == 0:
new_stack = True
else:
new_stack = (row_prec[3] != row[3])
items["NEWSTACK"].append(int(new_stack))
row_prec = row
write_dict(items, filename + "_items.csv")
with open(os.path.join("data", "rectangle_raw", filename + "_defects.csv"), newline='') as csvfile:
defects = {"BIN": [], "X": [], "Y": [], "WIDTH": [], "HEIGHT": []}
spamreader = csv.reader(csvfile, delimiter=';', quotechar='|')
first_line = True
for row in spamreader:
if first_line:
first_line = False
continue
defects["BIN"].append(int(row[1]))
defects["X"].append(int(float(row[2])))
defects["Y"].append(int(float(row[3])))
defects["WIDTH"].append(int(float(row[4])))
defects["HEIGHT"].append(int(float(row[5])))
write_dict(defects, filename + "_defects.csv")
def convert_martin2019b(filename):
# Note: the files contain values for number of copies, but they are not
# used in the corresponding paper.
w = words(filename)
items = []
bins = {"WIDTH": [], "HEIGHT": []}
bins["WIDTH"].append(int(next(w)))
bins["HEIGHT"].append(int(next(w)))
write_dict(bins, filename + "_bins.csv")
itemtype_number = int(next(w))
items = {"WIDTH": [], "HEIGHT": [], "COPIES": []}
for i in range(0, itemtype_number):
items["WIDTH"].append(int(next(w)))
items["HEIGHT"].append(int(next(w)))
items["PROFIT"].append(int(next(w)))
int(next(w))
write_dict(items, filename + "_items.csv")
defect_number = int(next(w))
defects = {"BIN": [], "X": [], "Y": [], "WIDTH": [], "HEIGHT": []}
for i in range(0, defect_number):
x1 = int(next(w))
y1 = int(next(w))
x2 = int(next(w))
y2 = int(next(w))
defects["BIN"].append(0)
defects["X"].append(x1)
defects["Y"].append(y1)
defects["WIDTH"].append(x2 - x1)
defects["HEIGHT"].append(y2 - y1)
write_dict(defects, filename + "_defects.csv")
def convert_long2020(filename):
w = words(filename)
bins = {"WIDTH": [], "HEIGHT": []}
items = {"WIDTH": [], "HEIGHT": [], "COPIES": []}
for _ in range(48):
next(w)
first = True
while next(w, None):
items["COPIES"].append(int(next(w)))
items["HEIGHT"].append(int(next(w)))
items["WIDTH"].append(int(next(w)))
if first:
max1cut = int(next(w))
bins["HEIGHT"].append(int(next(w)))
bins["WIDTH"].append(int(next(w)))
else:
for _ in range(3):
next(w)
first = False
write_dict(bins, filename + "_bins.csv")
write_dict(items, filename + "_items.csv")
p = os.path.join("data", "rectangle", filename.replace(" ", "_"))
with open(p + "_parameters.txt", "w") as params_file:
params_file.write("--max1cut " + str(max1cut) + "\n")
###############################################################################
if __name__ == "__main__":
convert_generic("herz1972/H", "whn", "wh")
for f in ["christofides1977/cgcut" + str(i) + ".txt" for i in range(1, 4)]:
convert_generic(f, "nwh", "whcp")
for f in ["beng1982/BENG" + str(i) for i in range(1, 11)]:
convert_generic(f, "nwh", "xwh")
for f in ["wang1983/" + i for i in ["WANG1", "WANG2", "WANG3"]]:
convert_generic(f, "xnwh", "whc")
convert_generic("wang1983/W", "whn", "whc")
for f in ["wang1983/" + i for i in ["WANGM1", "WANGM2"]]:
convert_vbpp(f, "mn", "whc", "", "whc")
for f in ["beasley1985/gcut" + str(i) + ".txt" for i in range(1, 14)]:
convert_generic(f, "nwh", "whp")
| |
math.exp(
-(X / 2 * (z_co / z_counter) ** 2)
/ (X + abs(z_co * z_counter) * xi * (nu_co + nu_counter))
)
elif xi < xi_critical:
common_factor = -(xi * X / 2) / (
X * abs(z_counter) + (nu_counter * z_counter ** 2 + nu_co * z_co ** 2)
)
gamma_counter = math.exp(common_factor * z_counter ** 2)
gamma_co = math.exp(common_factor * z_co ** 2)
# return the correct value depending on the 'type' argument
if type == "counter":
return gamma_counter
elif type == "co":
return gamma_co
elif type == "mean":
return (gamma_counter ** nu_counter * gamma_co ** nu_co) ** (
1 / (nu_counter + nu_co)
)
else:
raise Exception("Invalid 'type' argument. Enter 'counter'', 'co', or 'mean'")
def diffusion_coefficient_manning(
xi: float,
C_fix: float,
Cs: float,
vol_frac: float,
type: str = "counter",
nu_counter: int = 1,
nu_co: int = 1,
z_counter: int = 1,
z_co: int = -1,
):
"""
Return a diffusion coefficient inside a charged polymer,
according to Manning theory
Args:
xi:
Number representing the Manning parameter for the polymer,
dimensionless.
C_fix:
Number representing the concentration of fixed charges, including sign.
Must be specified in mol/L of water absorbed by the polymer. Note that
monovalent charged groups are assumed.
Cs:
Number representing the concentraiton of mobile salt inside the polymer.
Must be specified in mol/L of water absorbed by the polymer.
vol_frac:
The volume fraction of water sorbed by the ion exchange membrane.
xi:
Number representing the Manning parameter for the polymer,
dimensionless.
C_fix:
Number representing the concentration of fixed charges, including sign.
Must be specified in mol/L of water absorbed by the polymer. Note that
monovalent charged groups are assumed.
Cs:
Number representing the concentraiton of mobile salt inside the
polymer. Must be specified in mol/L of water absorbed by the polymer.
type::
Specifies whether the counter-ion, co-ion, or the mean diffusion
coefficient is returned. Valid arguments are 'counter', 'co'. 'mean' is not currently implemented.
Defaults to 'counter' if not specified.
nu_counter:
Stoichiometric coefficient of the counter-ion in the parent salt. Defaults to 1 if not specified.
nu_co : int
Stoichiometric coefficient of the co-ion in the parent salt. Defaults to -1 if not specified.
z_counter:
Net charge, including sign, of the counter-ion. Defaults to +1 if not specified. Note that the sign of
z_counter must be opposite to the sign of C_fix.
z_co:
Net charge, including sign, of the co-ion. Defaults to -1 if not specified. Note that the sign of
z_co must be the same as the sign of C_fix.
Returns:
float: The mean or individual ion diffusion coefficient inside the polymer, normalized
by the ion diffusion coefficient in bulk solution (D_mem / D_bulk).
Notes:
When \( \\xi \\gt \\frac{1}{|z_{ct}|} \), the counter-ion diffusion coefficient is given by:
$$
\\frac{\\bar D_{ct}}{D_{ct}} = \\bigg( \\frac{\\frac{X}{z_{ct}^2 \\nu_{ct} \\xi} + 1}
{\\frac{X}{|z_{ct}| \\nu_{ct}} + 1} \\bigg) \\bigg( 1 - \\frac{1}{3} z_{ct}^2 A(\\frac{1}{|z_{ct}|},
\\frac{X}{|z_{ct}| \\xi}\\bigg) \\bigg( \\frac{\\phi_w}{2 - \\phi_w} \\bigg)^2
$$
otherwise, when \( \\xi \\lt \\frac{1}{|z_{ct}|} \):
$$
\\frac{\\bar D_{ct}}{D_{ct}} = \\bigg( 1 - \\frac{1}{3} z_{ct}^2 A(\\frac{1}{|z_{ct}|},
\\frac{X}{|z_{ct}| \\xi}\\bigg) \\bigg( \\frac{\\phi_w}{2 - \\phi_w} \\bigg)^2
$$
In either case, co-ion diffusion coefficient is given by
$$
\\frac{\\bar D_{co}}{D_{co}} = \\bigg( 1 - \\frac{1}{3} z_{co}^2 A(\\frac{1}{|z_{ct}|},
\\frac{X}{|z_{ct}| \\xi}\\bigg) \\bigg( \\frac{\\phi_w}{2 - \\phi_w} \\bigg)^2
$$
where
$$
A = \\sum_{m_1} \\sum_{m_2} \\bigg [ \\pi |z_{ct}|(m_1^2+m_2^2)+|z_{ct}|+ \\frac{(\\nu_{ct}
+ \\nu_{co})|z_{ct} z_{co}||z_{ct}| \\xi}{X} \\bigg]^{-2}
$$
$$
X = \\frac{\\bar C_{co}}{\\bar C_{fix}}
$$
\(\\bar D\) are diffusion coefficients, \( \\bar C_{fix} \) is the fixed charge concentration (including sign),
\( \\xi \) is the Manning parameter, \( \\bar C_{co} \) is the co-ion concentration in the membrane,
and subscripts \(co\) and \(ct\) refer to the co-ion and counter-ion, respectively, and overbars indicate
membrane-phase quantities.
The mean salt diffusion coefficient is given by
$$
\\bar D_s = \\frac{\\bar D_{ct} \\bar D_{co} (z_{ct}^2 \\bar C_{ct} + z_{co}^2 \\bar C_{co} )}
{z_{ct}^2 \\bar D_{ct} \\bar C_{ct} + z_{co}^2 \\bar D_{co} \\bar C_{co} }
$$
References:
<NAME>.; <NAME>.; <NAME>.; <NAME>. Predicting Salt Permeability Coefficients
in Highly Swollen, Highly Charged Ion Exchange Membranes. ACS Appl. Mater. Interfaces 2017, 9 (4), 4044–4056.
<NAME>, <NAME>., <NAME>., <NAME>. Ion Diffusion Coefficients in
Ion Exchange Membranes: Significance of Counter-Ion Condensation. Macromolecules 2018, 51 (15), 5519–5529.
<NAME>. Limiting Laws and Counterion Condensation in Polyelectrolyte Solutions II.
Self‐ Diffusion of the Small Ions. J. Chem. Phys. 1969, 51 (3), 934–938.
"""
# check to make sure the signs of the input arguments are correct
if C_fix < 0:
if not (z_counter > 0 and z_co < 0):
raise Exception(
"Mismatch between signs of fixed charge, counter-ion, and co-ion. Aborting."
)
elif C_fix >= 0:
if not (z_counter < 0 and z_co > 0):
raise Exception(
"Mismatch between signs of fixed charge, counter-ion, and co-ion. Aborting."
)
# calculate the ratio of fixed charge to mobile salt concentration
X = abs(C_fix / Cs)
# calculate the critical value of the Manning parameter
xi_critical = 1 / abs(z_counter)
# select the appropriate activity coefficient expression based on the value
# of the Manning parameter
if xi >= xi_critical:
A = _A(
1 / abs(z_counter),
X / xi / abs(z_counter),
nu_counter=nu_counter,
nu_co=nu_co,
z_counter=z_counter,
z_co=z_co,
)
D_counter = (
(
(X / (z_counter ** 2 * nu_counter * xi) + 1)
/ (X / (abs(z_counter) * nu_counter) + 1)
)
* (1 - 1 / 3 * z_counter ** 2 * A)
* (vol_frac / (2 - vol_frac)) ** 2
)
elif xi < xi_critical:
A = _A(
xi, X, nu_counter=nu_counter, nu_co=nu_co, z_counter=z_counter, z_co=z_co
)
D_counter = (1 - 1 / 3 * z_counter ** 2 * A) * (vol_frac / (2 - vol_frac)) ** 2
D_co = (1 - 1 / 3 * z_co ** 2 * A) * (vol_frac / (2 - vol_frac)) ** 2
# return the correct value depending on the 'type' argument
if type == "counter":
return D_counter
elif type == "co":
return D_co
else:
raise Exception('Invalid "type" argument. Enter "counter" or "co"')
def _A(x, y, nu_counter=1, nu_co=1, z_counter=1, z_co=-1):
"""
Calculate the function 'A' required for determining diffusion coefficients
according to Manning's counter-ion condensation theory
Args:
x: float, y: float
nu_counter, nu_co : int, optional
Stoichiometric coefficients of the counter-ion and co-ion in the parent
salt. Defautls to 1 if not specified.
z_counter, z_co : int, optional
Net charge, including sign, of the counter-ion and co-ion in the parent
salt. Defaults to +1 and -1 if not specified. Note that the sign of
z_counter must be opposite to the sign of fixed_charge, while the sign
of z_co must match that of fixed_Charge.
Returns:
float: The mean or individual ion diffusion coefficient inside the polymer.
Notes:
The function A(x,y) is given by [#]_ [#]_ [#]_
$$
\\sum_{m1} \\sum_{m2} [ \\frac{\\pi}{x} (m_1^2+m_2^2)+|z_g|+ \\frac{(\\nu_g + \\nu_c)|z_g z_c|}{y}]^-2
$$
When $\\xi$ is greater than the critical value, $x=\\frac{1}{|z_g|}$ and $y=\\frac{X}{\\xi |z_g|}$.
If $\\xi$ is lower than the critical value (counter-ion condensation does not occur), then
$x=\\xi$ and $y=X$.
References:
<NAME>.; <NAME>.; <NAME>.; <NAME>. Predicting Salt
Permeability Coefficients in Highly Swollen, Highly Charged Ion Exchange Membranes.
ACS Appl. Mater. Interfaces 2017, acsami.6b14902.
<NAME>, <NAME>, <NAME>, Specific co-ion sorption and diffusion properties
influence membrane permselectivity, J. Membr. Sci. 563 (2018) 492–504.
doi:10.1016/j.memsci.2018.06.010.
<NAME>.; <NAME>. Elucidating Conductivity-Permselectivity Tradeoffs
in Electrodialysis and Reverse Electrodialysis by Structure-Property Analysis
of Ion-Exchange Membranes. J. Membr. Sci. 2018.
"""
# approximate infinity as this number
n = int(50)
# here are the results using nominal values of xi=2, Cfix=5, Csalt = 0.5, monovalent salt
# _A(1,5,1,1,1,-1)
# n = 10, A=0.35044914820345047
# n = 25, A=0.352641714021430
# n = 50, A=0.35295440167760833
# n = 100, A=0.35303255182051047
# n = 250, A=0.35305443229027905
# n = 1000, A=0.353058339444618
# n = 10000, A=0.35305859636495845
# here are the results using the approximation found in 10.1016/j.memsci.2018.11.045 eq. 14c
# A = (1/z_counter) ** | |
5.1.2
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._set_all_groups_to_plane(plane)
def set_all_new_groups_to_plane(self, plane):
"""
Set all groups that are not in any plane to this plane
:param plane: Plane Index to set all groups to
:type plane: int
.. versionadded:: 5.1.2
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._set_all_new_groups_to_plane(plane)
def set_def_plane(self, name):
"""
Set the default drawing plane.
:param name: Name
:type name: str
.. versionadded:: 5.1.2
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** 2D drawing to a 3D View will always be placed on the
default drawing plane. If no default drawing plane
has been set, the first valid plane in the view is
used as the default drawing plane.
"""
self._set_def_plane(name.encode())
def set_group_to_plane(self, plane, group):
"""
Set a group to a plane
:param plane: Plane Index to set all groups to
:param group: Name of group to set
:type plane: int
:type group: str
.. versionadded:: 5.1.2
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._set_group_to_plane(plane, group.encode())
def set_3dn(self, o3dn):
"""
Set the `GX3DN <geosoft.gxapi.GX3DN>` object for this view
:param o3dn: `GX3DN <geosoft.gxapi.GX3DN>` to set (NULL for 2D view)
:type o3dn: GX3DN
.. versionadded:: 5.1.2
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** To make the view a 2D view, set a `GX3DN <geosoft.gxapi.GX3DN>` of NULL.
"""
self._set_3dn(o3dn)
def get_3d_point_of_view(self, x, y, z, distance, declination, inclination):
"""
Get 3D point of view (values are will be `rDUMMY <geosoft.gxapi.rDUMMY>` if view for 2D views)
:param x: X center
:param y: Y center
:param z: Z center
:param distance: Distance from center
:param declination: Declination, 0 to 360 CW from Y
:param inclination: Inclination, -90 to +90
:type x: float_ref
:type y: float_ref
:type z: float_ref
:type distance: float_ref
:type declination: float_ref
:type inclination: float_ref
.. versionadded:: 9.2
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
x.value, y.value, z.value, distance.value, declination.value, inclination.value = self._get_3d_point_of_view(x.value, y.value, z.value, distance.value, declination.value, inclination.value)
def set_3d_point_of_view(self, x, y, z, distance, declination, inclination):
"""
Set 3D point of view (no effect on 2D views)
:param x: X center
:param y: Y center
:param z: Z center
:param distance: Distance from center
:param declination: Declination, 0 to 360 CW from Y
:param inclination: Inclination, -90 to +90
:type x: float
:type y: float
:type z: float
:type distance: float
:type declination: float
:type inclination: float
.. versionadded:: 9.2
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._set_3d_point_of_view(x, y, z, distance, declination, inclination)
def set_plane_clip_ply(self, plane, pply):
"""
Set the Plane Clip Region
:param plane: Plane index
:param pply: Clip Region
:type plane: int
:type pply: GXPLY
.. versionadded:: 5.1.4
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** By default it is the View's Clip Region
"""
self._set_plane_clip_ply(plane, pply)
def set_plane_equation(self, plane, pitch, yaw, roll, x, y, z, sx, sy, sz):
"""
Set the equation of a plane
:param plane: Plane index
:param pitch: Rotation about X (Z toward Y +ve, between -360 and 360)
:param yaw: Rotation about Y (Z toward X +ve, between -360 and 360)
:param roll: Rotation about Z (Y toward X +ve, between -360 and 360)
:param x: X offset of plane
:param y: Y offset of plane
:param z: Z offset of plane
:param sx: X scale
:param sy: Y scale
:param sz: Z scale
:type plane: int
:type pitch: float
:type yaw: float
:type roll: float
:type x: float
:type y: float
:type z: float
:type sx: float
:type sy: float
:type sz: float
.. versionadded:: 5.1.2
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** For a grid with the "Y" axis giving elevation:
use rotations = (-90, 0, 0) for a section with azimuth 90 (E-W)
use rotations = (-90, 0, -90) for a section with azimuth 0 (N-S)
"""
self._set_plane_equation(plane, pitch, yaw, roll, x, y, z, sx, sy, sz)
def set_plane_surface(self, plane, surface):
"""
Set the surface image of a plane
:param plane: Plane index
:param surface: Optional surface image/grid name, can be empty
:type plane: int
:type surface: str
.. versionadded:: 5.1.2
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._set_plane_surface(plane, surface.encode())
def get_plane_surface(self, plane, surface):
"""
Get the surface image of a plane
:param plane: Plane index
:param surface: Optional surface image/grid name, can be empty
:type plane: int
:type surface: str_ref
.. versionadded:: 9.9
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
surface.value = self._get_plane_surface(plane, surface.value.encode())
def set_plane_surf_info(self, plane, sample, base, scale, min, max):
"""
Set the surface information
:param plane: Plane index
:param sample: Sample rate (>=1)
:param base: Base
:param scale: Scale
:param min: Min
:param max: Max
:type plane: int
:type sample: int
:type base: float
:type scale: float
:type min: float
:type max: float
.. versionadded:: 5.1.2
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._set_plane_surf_info(plane, sample, base, scale, min, max)
def get_plane_surf_info(self, plane, sample, base, scale, min, max):
"""
Get the surface information
:param plane: Plane index
:param sample: Sample rate (>=1)
:param base: Base
:param scale: Scale
:param min: Min
:param max: Max
:type plane: int
:type sample: int_ref
:type base: float_ref
:type scale: float_ref
:type min: float_ref
:type max: float_ref
.. versionadded:: 9.9
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
sample.value, base.value, scale.value, min.value, max.value = self._get_plane_surf_info(plane, sample.value, base.value, scale.value, min.value, max.value)
# 3D Rendering 2D
def define_plane_3d(self, center_x, center_y, center_z, x_vector_x, x_vector_y, x_vector_z, y_vector_x, y_vector_y, y_vector_z):
"""
Define a 2D drawing plane based on point and normal
:param center_x: Center point X
:param center_y: Center point Y
:param center_z: Center point Z
:param x_vector_x: X Vector X
:param x_vector_y: X Vector Y
:param x_vector_z: X Vector Z
:param y_vector_x: Y Vector X
:param y_vector_y: Y Vector Y
:param y_vector_z: Y Vector Z
:type center_x: float
:type center_y: float
:type center_z: float
:type x_vector_x: float
:type x_vector_y: float
:type x_vector_z: float
:type y_vector_x: float
:type y_vector_y: float
:type y_vector_z: float
.. versionadded:: 5.1.6
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** 2D rendering commands are translated to 3D commands
based on the plane.
"""
self._define_plane_3d(center_x, center_y, center_z, x_vector_x, x_vector_y, x_vector_z, y_vector_x, y_vector_y, y_vector_z)
def define_viewer_axis_3d(self, center_x, center_y, center_z, dir_point_x, dir_point_y, dir_point_z):
"""
Define a 2D drawing plane based on the user's view that
oriented around the vector.
:param center_x: Center point X
:param center_y: Center point Y
:param center_z: Center point Z
:param dir_point_x: Directional Point X
:param dir_point_y: Directional Point Y
:param dir_point_z: Directional Point Z
:type center_x: float
:type center_y: float
:type center_z: float
:type dir_point_x: float
:type dir_point_y: float
:type dir_point_z: float
.. versionadded:: 5.1.6
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._define_viewer_axis_3d(center_x, center_y, center_z, dir_point_x, dir_point_y, dir_point_z)
def define_viewer_plane_3d(self, center_x, center_y, center_z):
"""
Define a 2D drawing plane based on the user's view.
:param center_x: Center point X
:param center_y: Center point Y
:param center_z: Center point Z
:type center_x: float
:type center_y: float
:type center_z: float
.. versionadded:: 5.1.6
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** The plane is always facing the viewer. Otherwise the
this is identical to the previous
"""
self._define_viewer_plane_3d(center_x, center_y, center_z)
# 3D Snapshots
def get_3d_snapshots(self):
"""
Get the list of 3D snapshots in a 3D view.
:returns: `GXLST <geosoft.gxapi.GXLST>` object
:rtype: GXLST
.. versionadded:: 9.9
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** Returns name/guid pairs.
"""
ret_val = self._get_3d_snapshots()
return GXLST(ret_val)
def restore_3d_snapshot(self, guid):
"""
Restore 3D view to specific snapshot state.
:param guid: Snapshot GUID
:type guid: str
.. versionadded:: 9.9
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._restore_3d_snapshot(guid.encode())
def capture_3d_snapshot(self, name, description, light_weight, guid):
"""
Capture current 3D view state to a snapshot.
:param name: Snapshot name
:param description: Snapshot description
:param light_weight: Is this a light weight snapshot, i.e. just captures view orientation and type and not group visibility/clipping etc.
:param guid: Snapshot GUID
:type name: str
:type description: str
:type light_weight: bool
:type guid: str_ref
.. versionadded:: 9.9
**License:** `Geosoft Open | |
restated restatement restatements
restates restating restaurateur restaurateurs restfully restfulness
restively restiveness restock restocked restocking restocks
restorative restoratives restorer restorers restrictively restroom
restrooms restructurings restudied restudies restudy restudying
resupplied resupplies resupply resupplying resurgent resuscitator
resuscitators retake retaken retakes retaking retaliatory retardant
retardants retardation retell retelling retells retentive
retentiveness rethinking rethinks rethought retinal retinue retinues
retiree retirees retold retook retool retooled retooling retools
retouch retouched retouches retouching retractable retrain retrained
retraining retrains retread retreaded retreading retreads retrench
retrenched retrenches retrenching retrenchment retrenchments retrial
retrials retributive retried retrievable retroactively retrod
retrodden retrofit retrofits retrofitted retrofitting retrograded
retrogrades retrograding retrogress retrogressed retrogresses
retrogressing retrogression retrogressive retrorocket retrorockets
retrospection retrying returnee returnees retyped retypes retyping
reunification reunified reunifies reunify reunifying reupholster
reupholstered reupholstering reupholsters reusable revaluation
revaluations revalue revalued revalues revaluing revealings reveille
reverend reverends reverential revilement reviler revilers revivalist
revivalists revivification revivified revivifies revivify revivifying
revocable revocation revocations revoltingly revolutionist
revolutionists rewindable rewinding rewinds rewire rewired rewires
rewiring reword reworded rewording rewords reworked reworking reworks
rewound rhapsodic rhea rheas rheostat rheostats rhetorically
rhetorician rhetoricians rheum rheumatic rheumatics rheumier rheumiest
rheumy rhinestone rhinestones rhizome rhizomes rho rhodium rhomboid
rhomboids rhombus rhombuses rhythmical rhythmically ribald ribaldry
riboflavin rick ricked rickets ricking ricks ricotta ridgepole
ridgepoles ridiculousness riff riffed riffing riffle riffled riffles
riffling riffraff riffs rifleman riflemen rightfulness rightist
rightists rigidness rigmarole rigmaroles rill rilled rilling rills
rime rimed rimes riming ringer ringers ringmaster ringmasters ringside
ripely riposted ripostes riposting rippers ripsaw ripsaws risible
risibles riskiness ritualism ritualistic ritually ritzier ritziest
ritzy riven riverbed riverbeds riverfront riverfronts riverside
riversides rivulet rivulets roadbed roadbeds roadhouse roadhouses
roadkill roadrunner roadrunners roadshow roadster roadsters roadway
roadways roadwork roadworthy roamer roamers roan roans roaster
roasters robotic robotics robustly rocketry rockiness rococo roebuck
roebucks roentgen roentgens roger rogered rogering rogers roguery
roguishly roil roiled roiling roils roister roistered roisterer
roisterers roistering roisters rollback rollbacks rollerskating
rollick rollicked rollicking rollicks romaine romanticism romanticist
romanticists romper rompers rood roods rooftop rooftops rookeries
rookery roomer roomers roomful roomfuls roominess rootless roseate
rosebud rosebuds rosebush rosebushes rosette rosetted rosettes
rosetting rosewood rosewoods rosily rosin rosined rosiness rosining
rosins rotational rotogravure rotogravures rottenness rotundity
rotundness roue roues roughneck roughnecked roughnecking roughnecks
roughshod roundelay roundelays roundhouse roundhouses roundish roundly
roundup roundups roundworm roundworms roustabout roustabouts rove
roved rover rovers roves roving rowdyism rowel rowels rower rowers
royalist royalists rs rubberier rubberiest rubberneck rubbernecked
rubbernecking rubbernecks rubbery rubbishy rubdown rubdowns rube
rubella rubes rubicund rubrics rucksacks ruddiness rudiment rudiments
ruefully ruggedly ruggedness ruination ruinously rumba rumbaed
rumbaing rumbas rumblings ruminant ruminants rumination ruminations
rumpus rumpuses runabout runabouts runaround runarounds runnel runnels
runoff runoffs rupee rupees rusk rusks russet russeted russets
russetting rustically rusticity rustiness rustproof rustproofed
rustproofing rustproofs rutabaga rutabagas s sabbaticals sable sabled
sables sabling saccharin saccharine sacerdotal sachem sachems sachet
sachets sackcloth sackful sackfuls sacramental sacredly sacredness
sacristan sacristans sacristies sacristy sacrosanct saddlebag
saddlebags sadistically safeness safflower safflowers sagacious
sagacity sago saguaro saguaros sahib sahibs sailboard sailboarded
sailboarding sailboards sailcloth sailfish sailfishes sainthood
saintliness saith saiths salaam salaamed salaaming salaams salacious
salaciously salaciousness salamander salamanders salesclerk
salesclerks salesgirl salesgirls salesmanship saline salines salinity
salivary salivation sallied sallies sallying salmonella salmonellae
salsa salsas saltcellar saltcellars saltine saltines saltiness
saltshaker saltshakers saltwater salubrious salutary salvageable
salver salvers salvo salvos samba sambaed sambaing sambas samovar
samovars sampan sampans samplers samurai sanctification
sanctimoniously sanctum sanctums sandalwood sandbank sandbanks sandbar
sandbars sandblast sandblasted sandblaster sandblasters sandblasting
sandblasts sandbox sandboxes sandcastle sandcastles sander sanders
sandhog sandhogs sandiness sandlot sandlots sandpiper sandpipers
sanely sangfroid sanguinary sanguine sanguined sanguines sanguining
sans sapience sapient sappier sappiest sappy saprophyte saprophytes
sapsucker sapsuckers sarcoma sarcomas sarcophagi sarcophagus sardonic
sardonically sarong sarongs sarsaparilla sarsaparillas sartorial
sartorially sashay sashayed sashaying sashays sass sassafras
sassafrases sassed sasses sassing satanically satanism sate sated
sateen sates satiate satiated satiates satiating satiety sating
satinwood satinwoods satiny satirically satrap satraps saturnine satyr
satyrs saucily sauciness savageness savanna savannas savant savants
savers sawhorse sawhorses sawmill sawmills sawyer sawyers sax saxes
saxophonist saxophonists scabbard scabbards scabbier scabbiest scabby
scabies scabrous scad scads scalawag scalawags scaldings scalene
scallion scallions scalper scalpers scam scammed scamming scamp scampi
scamps scams scandalmonger scandalmongers scandalously scansion
scantily scantiness scapula scapulae scarab scarabs scarceness
scarified scarifies scarify scarifying scat scathingly scatological
scats scatted scatting scavenge scavenged scavenges scavenging
scenically schedulers schema schematic schematically schematics
schemings scherzo scherzos schism schismatic schismatics schisms
schist schizoid schizoids schizophrenics schlemiel schlemiels schlep
schlepped schlepping schleps schlock schlockier schlockiest schlocky
schmaltz schmaltzier schmaltziest schmaltzy schmooze schmoozed
schmoozes schmoozing schmuck schmucks schnapps schnauzer schnauzers
scholastically schoolbook schoolbooks schooldays schoolgirl
schoolgirls schoolhouse schoolhouses schoolmarm schoolmarms
schoolmaster schoolmasters schoolmate schoolmates schoolmistress
schoolmistresses schoolroom schoolrooms schoolwork schoolyard
schoolyards schuss schussed schusses schussing schwa schwas sciatic
sciatica scimitar scimitars scintilla scintillas scintillate
scintillated scintillates scintillating scintillation scion scions
sclerosis sclerotic sclerotics scofflaw scofflaws scoldings scoliosis
sconce sconces scone scones scorcher scorchers scoreboard scoreboards
scorecard scorecards scoreless scorers scornfully scotched scotches
scotching scoutmaster scoutmasters scow scows scrabbled scrabbles
scrabbling scragglier scraggliest scraggly scrambler scramblers
scraper scrapers scrappier scrappiest scrappy scratchiness screechier
screechiest screechy screenings screenplay screenplays screenwriter
screenwriters screwball screwballs scribbler scribblers scrimmage
scrimmaged scrimmages scrimmaging scrimp scrimped scrimping scrimps
scrimshaw scrimshawed scrimshawing scrimshaws scrip scrips scriptural
scrod scrofula scrooge scrooges scrota scrotum scrounger scroungers
scrubber scrubbers scrubbier scrubbiest scrubby scrumptious scrunch
scrunched scrunches scrunching scuba scubaed scubaing scubas scud
scudded scudding scuds scull sculled sculleries scullery sculling
scullion scullions sculls sculpt sculpted sculpting sculpts sculptural
scumbag scumbags scummier scummiest scummy scupper scuppered
scuppering scuppers scurf scurfier scurfiest scurfy scurrilously
scurvier scurviest scurvy scuttlebutt scuzzier scuzziest scuzzy seabed
seabeds seabird seabirds seaboard seaboards seacoast seacoasts
seafarer seafarers seagoing sealant sealants sealer sealers sealskin
seamanship seamier seamiest seamless seamy seance seances seaplane
seaplanes searcher searchers searchingly seascape seascapes seasonally
seaward seawards seaway seaways seaworthier seaworthiest seaworthy
sebaceous secessionist secessionists seclusive secondhand secretariat
secretariats secretively secretiveness secs sectarian sectarianism
sectarians sectional sectionalism sectionals secularism sedately
sedation sedge sedimentation sedition seditious seducer seducers
seductively sedulous seediness seedless seeings seeker seekers
seemlier seemliest seemliness seemly seers seersucker seethings
segregationist segregationists segue segued segueing segues seismic
seismically seismograph seismographic seismographs seismologist
seismologists seismology selectivity selectman selectmen selenium
selfishly selfless selflessly selflessness selfsame sellout sellouts
seltzer selvage selvaged selvages selvaging semaphore semaphored
semaphores semaphoring semi semiannual semiautomatic semiautomatics
semicircular semiconscious semifinalist semifinalists semimonthlies
semimonthly seminal seminarian seminarians semipermeable semiprecious
semiprivate semiprofessional semiprofessionals semis semiskilled
semitone semitones semitrailer semitrailers semitropical semiweeklies
semiweekly senatorial senders senna sensationalist sensationalists
sensationally senselessly senselessness sensitively sensitiveness
sensually sensuously sensuousness sententious sentimentalism
sentimentalist sentimentalists sentimentally sentinel sentinels sepal
sepals separable separatism separatist separatists sepia sepsis septa
septet septets septic septics septuagenarian septuagenarians septum
sepulchral sequencers sequester sequestered sequestering sequesters
sequestration sequestrations sequined sequoia sequoias seraglio
seraglios serape serapes seraph seraphic seraphs sere sered
serendipitous serendipity serenely sereneness serer seres serest serf
serfdom serfs serge serially sering serous serpentine serrated serried
servicewoman servicewomen servility servings servo servomechanism
servomechanisms servos sesame sesames settee settees setup setups
seventieth seventieths severally sewerage sexagenarian sexagenarians
sexier sexiest sexiness sexists sexless sexpot sexpots sextant
sextants sextet sextets sexton sextons sh shabbiness shad shadiness
shadings shadowbox shadowboxed shadowboxes shadowboxing shads shag
shagged shagginess shagging shags shah shahs shakedown shakedowns
shaker shakers shakeup shakeups shakily shakiness shale shallot
shallots shallowness shalt shaman shamans shambled shambling
shamefaced shamefulness shamelessly shandy shanghai shanghaied
shanghaiing shanghais shank shanks shantung shantytown shantytowns
shapeless shapelessly shapelessness shapeliness shard shards
sharecropper sharecroppers sharkskin sharpers sharpshooter
sharpshooters shat shatterproof shavings shearer shearers sheathings
shebang shebangs sheepdog sheepdogs sheepfold sheepfolds sheepishness
sheepskin sheepskins sheeting sheikdom sheikdoms shekel shekels
shellac shellacked shellacking shellacs shenanigan shenanigans
shepherdess shepherdesses shibboleth shibboleths shiftily shiftiness
shiftlessness shill shilled shillelagh shillelaghs shilling shillings
shills shim shimmed shimmery shimmied shimmies shimming shimmy
shimmying shims shinbone shinbones shindig shindigs shiner shiners
shininess shinnied shinnies shinny shinnying shipboard shipboards
shipbuilder shipbuilders shipbuilding shipload shiploads shipmate
shipmates shipper shippers shipwright shipwrights shipyard shipyards
shires shirker shirkers shirr shirred shirring shirrings shirrs
shirtsleeve shirtsleeves shirttail shirttails shirtwaist shirtwaists
shit shits shittier shittiest shitting shitty shivery shocker shockers
shockingly shockproof shoddily shoddiness shoehorn shoehorned
shoehorning shoehorns shoemaker shoemakers shoeshine shoeshines shogun
shoguns shooter shooters shootings shootout shootouts shoplift
shoplifted shoplifting shoplifts shoptalk shopworn shoreline
shorelines shortbread shortcake shortcakes shortchange shortchanged
shortchanges shortchanging shortcut shortcuts shortcutting shortfalls
shorthorn shorthorns shortish shortsighted shortsightedly
shortsightedness shortstop shortstops shortwave shortwaves shovelful
shovelfuls showbiz showboat showboated showboating showboats showerier
showeriest showery showgirl showgirls showily showiness showmanship
showoff showoffs showpiece showpieces showplace showplaces showroom
showrooms shredder shredders shrewdly shrewish shrift shrike shrikes
shrillness shrilly shrinkable shrive shrived shriven shrives shriving
shrubbier shrubbiest shrubby shtick shticks shuckses shuffleboard
shuffleboards shuffler shufflers shush shushed shushes shushing
shutdowns shuteye shutout shutouts shutterbug shutterbugs shuttlecock
shuttlecocked shuttlecocking shuttlecocks shyly shyster shysters
sibilant sibilants sibyl sibyls sickbed sickbeds sickeningly sidearm
sidearms sidebar sidebars sideboard sideboards sideburns sidecar
sidecars sidekick sidekicks sidelight sidelights sidereal sidesaddle
sidesaddles sidesplitting sidestroke sidestroked sidestrokes
sidestroking sideswipe sideswiped sideswipes sideswiping sidewall
sidewalls sierras sifter sifters sightings sightread sightseeing
sightseer sightseers signally signatories signatory signboard
signboards signers signet signets signification significations
signings silage silaged silages silaging silencer silencers silica
silicate silicates siliceous silicone silicosis silkier silkies
silkiest silkworm silkworms silky silverfish silverfishes simian
simians simpatico simper simpered simpering | |
# -*- coding: utf-8 -*-
#
# inventory/categories/api/tests/test_categories_api.py
#
from django.contrib.auth import get_user_model
from rest_framework.reverse import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from inventory.categories.models import Category
from inventory.common.api.tests.base_test import BaseTest
from inventory.projects.models import Membership
UserModel = get_user_model()
class TestCategoryAPI(BaseTest, APITestCase):
DEFAULT_USER = UserModel.ROLE_MAP[UserModel.DEFAULT_USER]
PROJECT_USER = Membership.ROLE_MAP[Membership.PROJECT_USER]
def __init__(self, name):
super().__init__(name)
def setUp(self):
super().setUp()
# Create an InventoryType and Project.
self.in_type = self._create_inventory_type()
members = [
{'user': self.user, 'role_text': self.PROJECT_USER}
]
self.project = self._create_project(self.in_type, members=members)
kwargs = {'public_id': self.project.public_id}
self.project_uri = reverse('project-detail', kwargs=kwargs)
def get_category_field(self, uri, field):
"""
Get a category and return the value of the provided field.
"""
response = self.client.get(uri, format='json')
return response.data.get(field)
def test_GET_category_list_with_invalid_permissions(self):
"""
Test the category_list endpoint with no permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'get'
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-list')
self._test_users_with_invalid_permissions(uri, method)
self._test_project_users_with_invalid_permissions(uri, method)
def test_GET_category_list_with_valid_permissions(self):
"""
Test the category_list endpoint with valid permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'get'
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-list')
self._test_users_with_valid_permissions(
uri, method, default_user=False)
self._test_project_users_with_valid_permissions(uri, method)
def test_POST_category_list_with_invalid_permissions(self):
"""
Test that a POST to category_list fails with invalid permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'post'
uri = reverse('category-list')
data = {}
su = data.setdefault('SU', {})
su['name'] = 'TestCategory-01'
su['project'] = self.project_uri
data.setdefault('AD', su.copy())
data.setdefault('DU', su.copy())
self._test_users_with_invalid_permissions(
uri, method, request_data=data)
data.setdefault('POW', su.copy())
data.setdefault('PMA', su.copy())
data.setdefault('PDU', su.copy())
self._test_project_users_with_invalid_permissions(
uri, method, request_data=data)
def test_POST_category_list_with_valid_permissions(self):
"""
Test that a POST to category_list passes with valid permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'post'
uri = reverse('category-list')
data = {}
su = data.setdefault('SU', {})
su['name'] = 'TestCategory-01'
su['project'] = self.project_uri
ad = data.setdefault('AD', su.copy())
ad['name'] = 'TestCategory-02'
du = data.setdefault('DU', su.copy())
du['name'] = 'TestCategory-03'
self._test_users_with_valid_permissions(
uri, method, request_data=data)
pow = data.setdefault('POW', su.copy())
pow['name'] = 'TestCategory-04'
pma = data.setdefault('PMA', su.copy())
pma['name'] = 'TestCategory-05'
pdu = data.setdefault('PDU', su.copy())
pdu['name'] = 'TestCategory-06'
self._test_project_users_with_valid_permissions(
uri, method, project_user=False, request_data=data)
def test_OPTIONS_category_list_with_invalid_permissions(self):
"""
Test that the method OPTIONS fails with invald permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'options'
uri = reverse('category-list')
self._test_users_with_invalid_permissions(uri, method)
self._test_project_users_with_invalid_permissions(uri, method)
def test_OPTIONS_category_list_with_valid_permissions(self):
"""
Test that the method OPTIONS brings back the correct data.
"""
method = 'options'
uri = reverse('category-list')
self._test_users_with_valid_permissions(uri, method)
self._test_project_users_with_valid_permissions(uri, method)
def test_GET_category_detail_with_invalid_permissions(self):
"""
Test that a GET on the category_detail fails with invalid permissions.
"""
#self.skipTest("Temporarily skipped")
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-detail',
kwargs={'public_id': category.public_id})
method = 'get'
self._test_users_with_invalid_permissions(uri, method)
self._test_project_users_with_invalid_permissions(uri, method)
def test_GET_category_detail_with_valid_permissions(self):
"""
Test that a GET to category_detail passes with valid permissions.
"""
#self.skipTest("Temporarily skipped")
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-detail',
kwargs={'public_id': category.public_id})
method = 'get'
self._test_users_with_valid_permissions(uri, method)
self._test_project_users_with_valid_permissions(uri, method)
def test_PUT_category_detail_with_invalid_permissions(self):
"""
Test that a PUT to category_detail fails with invalid permissions.
"""
#self.skipTest("Temporarily skipped")
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-detail',
kwargs={'public_id': category.public_id})
method = 'put'
data = {}
su = data.setdefault('SU', {})
su['name'] = 'TestCategory-01'
su['project'] = self.project_uri
data.setdefault('AD', su.copy())
data.setdefault('DU', su.copy())
self._test_users_with_invalid_permissions(
uri, method, request_data=data)
data.setdefault('POW', su.copy())
data.setdefault('PMA', su.copy())
data.setdefault('PDU', su.copy())
self._test_project_users_with_invalid_permissions(
uri, method, request_data=data)
def test_PUT_category_detail_with_valid_permissions(self):
"""
Test that a PUT to category_detail passes with valid permissions.
"""
#self.skipTest("Temporarily skipped")
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-detail',
kwargs={'public_id': category.public_id})
method = 'put'
data = {}
su = data.setdefault('SU', {})
su['name'] = 'TestCategory-01'
su['project'] = self.project_uri
ad = data.setdefault('AD', su.copy())
ad['name'] = 'TestCategory-02'
du = data.setdefault('DU', su.copy())
du['name'] = 'TestCategory-03'
self._test_users_with_valid_permissions(
uri, method, request_data=data)
pow = data.setdefault('POW', su.copy())
pow['name'] = 'TestCategory-04'
pma = data.setdefault('PMA', su.copy())
pma['name'] = 'TestCategory-05'
pdu = data.setdefault('PDU', su.copy())
pdu['name'] = 'TestCategory-06'
self._test_project_users_with_valid_permissions(
uri, method, project_user=False, request_data=data)
def test_PATCH_category_detail_with_invalid_permissions(self):
"""
Test that a PATCH to category_detail fails with invalid permissions.
"""
#self.skipTest("Temporarily skipped")
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-detail',
kwargs={'public_id': category.public_id})
method = 'patch'
data = {}
su = data.setdefault('SU', {})
su['name'] = 'TestCategory-01'
su['project'] = self.project_uri
data.setdefault('AD', su.copy())
data.setdefault('DU', su.copy())
self._test_users_with_invalid_permissions(
uri, method, request_data=data)
data.setdefault('POW', su.copy())
data.setdefault('PMA', su.copy())
data.setdefault('PDU', su.copy())
self._test_project_users_with_invalid_permissions(
uri, method, request_data=data)
def test_PATCH_category_detail_with_valid_permissions(self):
"""
Test that a PATCH to category_detail passes with valid permissions.
"""
#self.skipTest("Temporarily skipped")
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-detail',
kwargs={'public_id': category.public_id})
method = 'patch'
data = {}
su = data.setdefault('SU', {})
su['name'] = 'TestCategory-01'
su['project'] = self.project_uri
ad = data.setdefault('AD', {})
ad['name'] = 'TestCategory-02'
du = data.setdefault('DU', {})
du['name'] = 'TestCategory-03'
self._test_users_with_valid_permissions(
uri, method, request_data=data)
pow = data.setdefault('POW', {})
pow['name'] = 'TestCategory-04'
pma = data.setdefault('PMA', {})
pma['name'] = 'TestCategory-05'
pdu = data.setdefault('PDU', {})
pdu['name'] = 'TestCategory-06'
self._test_project_users_with_valid_permissions(
uri, method, project_user=False, request_data=data)
def test_DELETE_category_detail_with_invalid_permissions(self):
"""
Test that a DELETE to category_detail fails with invalid permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'delete'
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-detail',
kwargs={'public_id': category.public_id})
self._test_users_with_invalid_permissions(uri, method)
self._test_project_users_with_invalid_permissions(uri, method)
def test_DELETE_category_detail_with_valid_permissions(self):
"""
Test that a DELETE to category_detail pass' with valid permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'delete'
# Test SUPERUSER
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-detail',
kwargs={'public_id': category.public_id})
self._test_superuser_with_valid_permissions(uri, method)
self._test_valid_GET_with_errors(uri)
# Test ADMINISTRATOR
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-detail',
kwargs={'public_id': category.public_id})
self._test_administrator_with_valid_permissions(uri, method)
self._test_valid_GET_with_errors(uri)
# Test DEFAULT_USER
## This is an invalid test since the DEFAULT_USER has no access.
# Test PROJECT_OWNER
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-detail',
kwargs={'public_id': category.public_id})
self._test_project_owner_with_valid_permissions(uri, method)
self._test_valid_GET_with_errors(uri)
# Test PROJECT_MANAGER
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-detail',
kwargs={'public_id': category.public_id})
self._test_project_manager_with_valid_permissions(uri, method)
self._test_valid_GET_with_errors(uri)
# Test PROJECT_USER
## This is an invalid test since the PROJECT_USER has no access.
def test_OPTIONS_category_detail_with_invalid_permissions(self):
"""
Test that the method OPTIONS fails with invald permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'options'
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-detail',
kwargs={'public_id': category.public_id})
self._test_users_with_invalid_permissions(uri, method)
self._test_project_users_with_invalid_permissions(uri, method)
def test_OPTIONS_category_detail_with_valid_permissions(self):
"""
Test that the method OPTIONS brings back the correct data.
"""
method = 'options'
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-detail',
kwargs={'public_id': category.public_id})
self._test_users_with_valid_permissions(uri, method)
self._test_project_users_with_valid_permissions(uri, method)
def test_create_category_twice_to_same_parent(self):
"""
Test that a category is not created twice with the same composite key.
"""
#self.skipTest("Temporarily skipped")
# Create Category one.
uri = reverse('category-list')
new_data = {'name': 'TestCategory-1',
'project': self.project_uri}
response = self.client.post(uri, new_data, format='json')
msg = "Response: {} should be {}, content: {}".format(
response.status_code, status.HTTP_201_CREATED, response.data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED, msg)
# Create Category two.
parent_uri = response.data.get('href')
uri = reverse('category-list')
new_data = {'name': 'TestCategory-2',
'parent': parent_uri,
'project': self.project_uri}
response = self.client.post(uri, new_data, format='json')
msg = "Response: {} should be {}, content: {}".format(
response.status_code, status.HTTP_201_CREATED, response.data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED, msg)
# Create Category two again--should fail.
uri = reverse('category-list')
new_data = {'name': 'TestCategory-2',
'parent': parent_uri,
'project': self.project_uri}
response = self.client.post(uri, new_data, format='json')
msg = "Response: {} should be {}, content: {}".format(
response.status_code, status.HTTP_400_BAD_REQUEST, response.data)
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST, msg)
def test_delimitor_in_category_name(self):
"""
Test that the delimitor is not in the category name.
"""
#self.skipTest("Temporarily skipped")
# Create Category one.
uri = reverse('category-list')
new_data = {'name': 'Test{}Category-1'.format(
Category.DEFAULT_SEPARATOR),
'project': self.project_uri}
response = self.client.post(uri, new_data, format='json')
msg = "Response: {} should be {}, content: {}".format(
response.status_code, status.HTTP_400_BAD_REQUEST, response.data)
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST, msg)
self.assertTrue(self._has_error(response, 'name'), msg)
self._test_errors(response, tests={
'name': u"A category name cannot ",
})
def test_category_is_not_parent(self):
"""
Test that this category does not exist in the current tree.
"""
#self.skipTest("Temporarily skipped")
# Create three catagories.
name = "Test Category 1"
cat0 = self._create_category(self.project, name=name)
name = "Test Category 2"
cat1 = self._create_category(self.project, name=name, parent=cat0)
name = "Test Category 3"
cat2 = self._create_category(self.project, name=name, parent=cat1)
# Try adding 'Test Category 2' to the tree using the API.
uri = reverse('category-list')
cat2_uri = reverse('category-detail',
kwargs={'public_id': cat2.public_id})
new_data = {'name': "Test Category 2",
'project': self.project_uri,
'parent': cat2_uri}
response = self.client.post(uri, new_data, format='json')
msg = "Response: {} should be {}, content: {}".format(
response.status_code, status.HTTP_400_BAD_REQUEST, response.data)
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST, msg)
self.assertTrue(self._has_error(response, 'name'), msg)
self._test_errors(response, tests={
'name': u"A category in this tree ",
})
def test_root_level_category_exists(self):
"""
Test that there are no root level categories with this name that
already exist for this owner.
"""
#self.skipTest("Temporarily skipped")
# Create a catagory.
name = "Duplicate Name"
cat = self._create_category(self.project, name=name)
# Create a category through the API.
new_data = {'name': name,
'project': self.project_uri}
uri = reverse('category-list')
response = self.client.post(uri, new_data, format='json')
msg | |
# import XML libraries
import xml.etree.ElementTree as ET
import xml.dom.minidom as minidom
import HTMLParser
# Function to create an XML structure
def make_problem_XML(
problem_title='Missing title',
problem_text=False,
label_text='Enter your answer below.',
description_text=False,
answers=[{'correctness': 'true', 'text': 'Answers are missing'}],
solution_text = '<p>Missing solution</p>',
options = {'problem_type': 'MC'}):
"""
make_problem_XML: a function to create an XML object for an edX problem.
The actual work is done by other functions below,
make_choice_problem_XML() and make_line_problem_XML(),
which use the arguments as listed below.
Arguments:
- problem_title: The title of the problem. Required.
- problem_text: The extended text for the problem, including paragraph tags and other HTML.
This argument is genuinely optional.
- label_text: The action statement for the problem. Should be a single line of text.
This is the instruction for the student and is required.
- description_text: Additional info, like "check all that apply" for those kinds of problems.
This argument is genuinely optional.
- answers: A list of dictionaries as follows:
For Numerical and Text problems:
[{'answer': a correct answer}, {'answer': another correct answer}, {etc}]
For MC and Checkbox problems, each item in the list will become an answer choice:
[{'correctness': 'true' or 'false', 'answer': 'the text for this option'}, {etc}, {etc}]
The text for MC and Checkbox can include LaTeX and images. No hints currently included.
- solution_text: The extended text for the solution, including paragraph tags and other HTML.
- options: A dictionary of options. Currently accepts:
"problem_type", which can be...
"MC": Multiple-choice problems
"Checkbox": Select-all-that-apply. Does partial credit by default.
"Numerical": Numerical problems, with a 5% tolerance
"Text": Text-entry problem
"AnyText": A custom-grader problem that marks any text entered as correct
"showanswer",
"weight",
"rerandomize", and
"max_attempts",
which take the typical values for those arguments in edX
"tolerance" for numerical problems.
Please send a decimal and we'll interpret it as a percentage. 0.1 = 10% tolerance.
Later this may include other problem types, partial credit info, etc.
The default values for these arguments are used for troubleshooting.
Return: an XML element tree.
"""
# Create the tree object with its root element
problem_tag = ET.Element('problem')
problem_tag.set('display_name', problem_title)
problem_tree = ET.ElementTree(problem_tag)
# Add a script tag so our problems can re-render properly
# with a minimum of download burden.
# Relies on having Prism.js available.
script_tag = ET.SubElement(problem_tag, 'script')
script_tag.set('type', 'text/javascript')
script_raw = """
$(document).ready(function(){
console.log('highlighting MATLAB syntax');
$('.language-matlab').each(function(e){
window.Prism.highlightAllUnder(this);
});
});
"""
script_tag.text = script_raw
# Set other problem options. For partial documentation see:
# https://edx.readthedocs.io/projects/edx-open-learning-xml/en/latest/components/problem-components.html
if 'showanswer' in options:
problem_tag.set('showanswer', options['showanswer'])
if 'weight' in options:
problem_tag.set('weight', options['weight'])
if 'rerandomize' in options:
problem_tag.set('rerandomize', options['rerandomize'])
if 'max_attempts' in options:
problem_tag.set('max_attempts', options['max_attempts'])
# Add the problem text
if problem_text is not False:
problem_tag.text = problem_text
# Pass the tree to functions that build the rest of the problem XML.
if options['problem_type'] == 'Numerical' or options['problem_type'] == 'Text':
return make_line_problem_XML(
problem_tree, problem_tag, problem_text, label_text, description_text,
answers, solution_text, options
)
elif options['problem_type'] == 'MC' or options['problem_type'] == 'Checkbox':
return make_choice_problem_XML(
problem_tree, problem_tag, problem_text, label_text, description_text,
answers, solution_text, options
)
elif options['problem_type'] == 'AnyText':
return make_anytext_problem_XML(
problem_tree, problem_tag, problem_text, label_text, description_text,
answers, solution_text, options
)
else:
# Leaving out error messages until we decide which version of Python we're using.
# print 'The ' + str(options['problem_type']) + ' problem type is not currently supported.'
return False
# Function to create the XML structure for MC and checkbox problems
# Parameters are described under make_problem_XML() above.
def make_choice_problem_XML(
problem_tree,
problem_tag,
problem_text=False,
label_text='Enter your answer below.',
description_text=False,
answers=[{'correctness': 'true', 'answer': 'Answers are missing'}],
solution_text = '<p>Missing solution</p>',
options = {'problem_type': 'MC'}):
# Create the structure for the problem.
if options['problem_type'] == 'MC':
type_tag = ET.SubElement(problem_tag, 'multiplechoiceresponse')
type_tag.set('type','MultipleChoice')
elif options['problem_type'] == 'Checkbox':
type_tag = ET.SubElement(problem_tag, 'choiceresponse')
type_tag.set('partial_credit', 'EDC')
# Needs some expansion for various extra credit options.
if 'extra_credit' in options:
type_tag.set('extra_credit', options['extra_credit'])
label_tag = ET.SubElement(type_tag, 'label')
label_tag.text = label_text
if options['problem_type'] == 'Checkbox' and description_text is False:
description_text = 'Check all that apply.'
if description_text is not False:
description_tag = ET.SubElement(type_tag, 'description')
description_tag.text = description_text
if options['problem_type'] == 'MC':
choicegroup_tag = ET.SubElement(type_tag, 'choicegroup')
elif options['problem_type'] == 'Checkbox':
choicegroup_tag = ET.SubElement(type_tag, 'checkboxgroup')
# Iterate over the choices and add them one by one.
for item in answers:
item_tag = ET.SubElement(choicegroup_tag, 'choice')
item_tag.set('correct', item['correctness'])
item_tag.text = item['answer']
if 'hint' in item:
hint_tag = ET.SubElement(item_tag, 'choicehint')
hint_tag.text = item['hint']
# Create the structure for the solution
solution_tag = ET.SubElement(type_tag, 'solution')
solution_div_tag = ET.SubElement(solution_tag, 'div')
solution_div_tag.set('class', 'detailed-solution')
explanation_p_tag = ET.SubElement(solution_div_tag, 'p')
explanation_p_tag.text = 'Explanation'
explanation_p_tag.tail = solution_text
return problem_tree
# Function to create the XML structure for numerical or text problems.
# Parameters are described under make_problem_XML() above.
def make_line_problem_XML(
problem_tree,
problem_tag,
problem_text=False,
label_text='Enter your answer below.',
description_text=False,
answers=[{'answer': '-1'}],
solution_text = '<p>Missing solution</p>',
options = {'problem_type': 'Text'}):
# Create the structure for the problem.
if options['problem_type'] == 'Numerical':
type_tag = ET.SubElement(problem_tag, 'numericalresponse')
if 'tolerance' not in options:
options['tolerance'] = 0.05 # 5% tolerance on numerical problems by default.
else:
type_tag = ET.SubElement(problem_tag, 'stringresponse')
type_tag.set('type', 'ci') # case-insensitive by default.
# Needs some expansion for various extra credit options.
# if 'extra_credit' in options:
# type_tag.set('extra_credit', options['extra_credit'])
type_tag.set('answer', answers[0]['answer'])
label_tag = ET.SubElement(type_tag, 'label')
label_tag.text = label_text
if description_text is not False:
description_tag = ET.SubElement(type_tag, 'description')
description_tag.text = description_text
# Add additional answers if they exist.
if len(answers) > 1:
for item in answers:
additional_answer_tag = ET.SubElement(type_tag, 'additional_answer')
additional_answer_tag.set('answer', item['answer'])
if options['problem_type'] == 'Numerical':
input_tag = ET.SubElement(type_tag, 'formulaequationinput')
tolerance_tag = ET.SubElement(type_tag, 'responseparam')
tolerance_tag.set('type', 'tolerance')
tolerance_tag.set('default', str(int(float(options['tolerance']) * 100)) + '%')
else:
input_tag = ET.SubElement(type_tag, 'textline')
input_tag.set('size', '30')
# Create the structure for the solution
solution_tag = ET.SubElement(type_tag, 'solution')
solution_div_tag = ET.SubElement(solution_tag, 'div')
solution_div_tag.set('class', 'detailed-solution')
explanation_p_tag = ET.SubElement(solution_div_tag, 'p')
explanation_p_tag.text = 'Explanation'
explanation_p_tag.tail = solution_text
return problem_tree
# Function to create the XML structure for "anything is correct" problems.
# Parameters are described under make_problem_XML() above.
def make_anytext_problem_XML(
problem_tree,
problem_tag,
problem_text=False,
label_text='Enter your answer below.',
description_text=False,
answers=[{'correctness': 'true', 'answer': 'Answers are missing'}],
solution_text = '<p>Missing solution</p>',
options = {'problem_type': 'AnyText', 'feedback':'Thank you for your response.'}):
# Insert the python grading script
pythonscript = """
<![CDATA[
def test_text(expect, ans):
if ans:
return True
def hint_fn(answer_ids, student_answers, new_cmap, old_cmap):
aid = answer_ids[0]
hint = ''
hint = '""" + options['feedback'] + """'.format(hint)
new_cmap.set_hint_and_mode(aid,hint,'always')
]]>
"""
script_tag = ET.SubElement(problem_tag, 'script')
script_tag.set('type','loncapa/python')
script_tag.text = pythonscript
# Make the customresponse tag and its sub-tags
type_tag = ET.SubElement(problem_tag, 'customresponse')
type_tag.set('cfn', 'test_text')
type_tag.set('expect', 'anything')
textline_tag = ET.SubElement(type_tag, 'textline')
textline_tag.set('size', '40')
textline_tag.set('correct_answer', 'anything')
textline_tag.set('label', 'Your response')
hintgroup_tag = ET.SubElement(type_tag, 'hintgroup')
hintgroup_tag.set('hintfn', 'hint_fn')
# Create the structure for the solution
solution_tag = ET.SubElement(type_tag, 'solution')
solution_div_tag = ET.SubElement(solution_tag, 'div')
solution_div_tag.set('class', 'detailed-solution')
explanation_p_tag = ET.SubElement(solution_div_tag, 'p')
explanation_p_tag.text = 'Explanation'
explanation_p_tag.tail = solution_text
return problem_tree
def write_problem_file(problem_XML, problem_filename):
"""
write_problem_file: write a complete edX problem XML structure to disk.
Arguments:
- problem_XML: The ElementTree object for the problem.
- problem_filename: The filename.
Return: True if successful, False if not.
Outputs: A pretty-printed XML file at 4 spaces per indent
"""
# HTML entities in the problem text get encoded during the XML-writing step, so we need to decode them here.
parser = HTMLParser.HTMLParser()
xml_string = minidom.parseString(ET.tostring(problem_XML.getroot())).toprettyxml(indent=" ")
xml_string = parser.unescape(xml_string)
with open(problem_filename, "w") as f:
# We start from character 23 because the XML declaration is an unwanted 22 characters (counting \r).
# I should do this better, but this works for now.
f.writelines(xml_string[23:])
#################
# Testing code
#################
"""
# Make an MC problem
title = 'Sample MC Problem'
text = '<p>test text</p>'
label = 'test label'
answers = [{'answer': 'wrong one', 'correctness': 'false', 'hint':'Don\'t choose the wrong one.'}, {'answer': 'right one', 'correctness': 'true', 'hint':'The right one was right!'}]
solution = '<p>blank solution</p>'
options = {'problem_type': 'MC'}
the_xml = make_problem_XML(
problem_title = title,
problem_text = text,
label_text = label,
answers = answers,
solution_text = solution,
options = options)
write_problem_file(the_xml, 'test_MC_problem.xml')
# Make a checkbox problem
title = 'Sample Checkbox Problem'
text = '<p>test text</p>'
label = 'test label'
answers = [{'answer': 'wrong one', 'correctness': 'false'}, {'answer': 'right one', 'correctness': 'true'}]
solution = '<p>blank solution</p>'
options = {'problem_type': 'Checkbox'}
the_xml = make_problem_XML(
problem_title = title,
problem_text = text,
| |
1)
self.gridLayout_10.addWidget(self.jointAxisGroup_3, 0, 0, 1, 2)
self.analogGB_3 = QtWidgets.QGroupBox(self.joint3tab)
self.analogGB_3.setObjectName("analogGB_3")
self.gridLayout_118 = QtWidgets.QGridLayout(self.analogGB_3)
self.gridLayout_118.setContentsMargins(8, 8, 8, 8)
self.gridLayout_118.setSpacing(5)
self.gridLayout_118.setObjectName("gridLayout_118")
self.analogScaleMax_3 = QtWidgets.QLineEdit(self.analogGB_3)
self.analogScaleMax_3.setObjectName("analogScaleMax_3")
self.gridLayout_118.addWidget(self.analogScaleMax_3, 2, 1, 1, 1)
self.label_28 = QtWidgets.QLabel(self.analogGB_3)
self.label_28.setObjectName("label_28")
self.gridLayout_118.addWidget(self.label_28, 1, 0, 1, 1)
self.analogMinLimit_3 = QtWidgets.QLineEdit(self.analogGB_3)
self.analogMinLimit_3.setObjectName("analogMinLimit_3")
self.gridLayout_118.addWidget(self.analogMinLimit_3, 0, 1, 1, 1)
self.label_29 = QtWidgets.QLabel(self.analogGB_3)
self.label_29.setObjectName("label_29")
self.gridLayout_118.addWidget(self.label_29, 2, 0, 1, 1)
self.analogMaxLimit_3 = QtWidgets.QLineEdit(self.analogGB_3)
self.analogMaxLimit_3.setObjectName("analogMaxLimit_3")
self.gridLayout_118.addWidget(self.analogMaxLimit_3, 1, 1, 1, 1)
self.analogDefault_3 = QtWidgets.QPushButton(self.analogGB_3)
self.analogDefault_3.setObjectName("analogDefault_3")
self.gridLayout_118.addWidget(self.analogDefault_3, 0, 2, 1, 1)
self.label_30 = QtWidgets.QLabel(self.analogGB_3)
self.label_30.setObjectName("label_30")
self.gridLayout_118.addWidget(self.label_30, 0, 0, 1, 1)
self.gridLayout_10.addWidget(self.analogGB_3, 2, 0, 1, 1)
self.groupBox_16 = QtWidgets.QGroupBox(self.joint3tab)
self.groupBox_16.setObjectName("groupBox_16")
self.gridLayout_25 = QtWidgets.QGridLayout(self.groupBox_16)
self.gridLayout_25.setContentsMargins(8, 8, 8, 8)
self.gridLayout_25.setSpacing(5)
self.gridLayout_25.setObjectName("gridLayout_25")
self.label_49 = QtWidgets.QLabel(self.groupBox_16)
self.label_49.setObjectName("label_49")
self.gridLayout_25.addWidget(self.label_49, 0, 0, 1, 1)
self.encoderScale_3 = QtWidgets.QLineEdit(self.groupBox_16)
self.encoderScale_3.setObjectName("encoderScale_3")
self.gridLayout_25.addWidget(self.encoderScale_3, 0, 1, 1, 1)
self.gridLayout_10.addWidget(self.groupBox_16, 3, 0, 1, 1)
self.jointType_3.addTab(self.joint3tab, "")
self.joint4tab = QtWidgets.QWidget()
self.joint4tab.setObjectName("joint4tab")
self.gridLayout_14 = QtWidgets.QGridLayout(self.joint4tab)
self.gridLayout_14.setContentsMargins(8, 8, 8, 8)
self.gridLayout_14.setSpacing(5)
self.gridLayout_14.setObjectName("gridLayout_14")
self.groupBox_17 = QtWidgets.QGroupBox(self.joint4tab)
self.groupBox_17.setObjectName("groupBox_17")
self.gridLayout_21 = QtWidgets.QGridLayout(self.groupBox_17)
self.gridLayout_21.setContentsMargins(10, 10, 10, 10)
self.gridLayout_21.setSpacing(5)
self.gridLayout_21.setObjectName("gridLayout_21")
self.label_98 = QtWidgets.QLabel(self.groupBox_17)
self.label_98.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_98.setObjectName("label_98")
self.gridLayout_21.addWidget(self.label_98, 0, 0, 1, 1)
self.homeSearchVel_4 = QtWidgets.QLineEdit(self.groupBox_17)
self.homeSearchVel_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.homeSearchVel_4.setObjectName("homeSearchVel_4")
self.gridLayout_21.addWidget(self.homeSearchVel_4, 2, 1, 1, 1)
self.label_15 = QtWidgets.QLabel(self.groupBox_17)
self.label_15.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_15.setObjectName("label_15")
self.gridLayout_21.addWidget(self.label_15, 4, 0, 1, 1)
self.label_141 = QtWidgets.QLabel(self.groupBox_17)
self.label_141.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_141.setObjectName("label_141")
self.gridLayout_21.addWidget(self.label_141, 5, 0, 1, 1)
self.homeIgnoreLimits_4 = QtWidgets.QCheckBox(self.groupBox_17)
self.homeIgnoreLimits_4.setObjectName("homeIgnoreLimits_4")
self.gridLayout_21.addWidget(self.homeIgnoreLimits_4, 6, 1, 1, 1)
self.homeSequence_4 = QtWidgets.QLineEdit(self.groupBox_17)
self.homeSequence_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.homeSequence_4.setObjectName("homeSequence_4")
self.gridLayout_21.addWidget(self.homeSequence_4, 5, 1, 1, 1)
self.homeLatchVel_4 = QtWidgets.QLineEdit(self.groupBox_17)
self.homeLatchVel_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.homeLatchVel_4.setObjectName("homeLatchVel_4")
self.gridLayout_21.addWidget(self.homeLatchVel_4, 3, 1, 1, 1)
self.label_100 = QtWidgets.QLabel(self.groupBox_17)
self.label_100.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_100.setObjectName("label_100")
self.gridLayout_21.addWidget(self.label_100, 2, 0, 1, 1)
self.home_4 = QtWidgets.QLineEdit(self.groupBox_17)
self.home_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.home_4.setObjectName("home_4")
self.gridLayout_21.addWidget(self.home_4, 0, 1, 1, 1)
self.homeUseIndex_4 = QtWidgets.QCheckBox(self.groupBox_17)
self.homeUseIndex_4.setObjectName("homeUseIndex_4")
self.gridLayout_21.addWidget(self.homeUseIndex_4, 7, 1, 1, 1)
self.label_99 = QtWidgets.QLabel(self.groupBox_17)
self.label_99.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_99.setObjectName("label_99")
self.gridLayout_21.addWidget(self.label_99, 1, 0, 1, 1)
self.homeOffset_4 = QtWidgets.QLineEdit(self.groupBox_17)
self.homeOffset_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.homeOffset_4.setObjectName("homeOffset_4")
self.gridLayout_21.addWidget(self.homeOffset_4, 1, 1, 1, 1)
self.homeFinalVelocity_4 = QtWidgets.QLineEdit(self.groupBox_17)
self.homeFinalVelocity_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.homeFinalVelocity_4.setObjectName("homeFinalVelocity_4")
self.gridLayout_21.addWidget(self.homeFinalVelocity_4, 4, 1, 1, 1)
self.label_101 = QtWidgets.QLabel(self.groupBox_17)
self.label_101.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_101.setObjectName("label_101")
self.gridLayout_21.addWidget(self.label_101, 3, 0, 1, 1)
self.homeSwitchShared_4 = QtWidgets.QCheckBox(self.groupBox_17)
self.homeSwitchShared_4.setObjectName("homeSwitchShared_4")
self.gridLayout_21.addWidget(self.homeSwitchShared_4, 8, 1, 1, 1)
self.gridLayout_14.addWidget(self.groupBox_17, 1, 1, 1, 1)
self.groupBox_19 = QtWidgets.QGroupBox(self.joint4tab)
self.groupBox_19.setObjectName("groupBox_19")
self.gridLayout_23 = QtWidgets.QGridLayout(self.groupBox_19)
self.gridLayout_23.setContentsMargins(10, 10, 10, 10)
self.gridLayout_23.setSpacing(5)
self.gridLayout_23.setObjectName("gridLayout_23")
self.label_254 = QtWidgets.QLabel(self.groupBox_19)
self.label_254.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_254.setObjectName("label_254")
self.gridLayout_23.addWidget(self.label_254, 0, 0, 1, 1)
self.p_4 = QtWidgets.QLineEdit(self.groupBox_19)
self.p_4.setText("")
self.p_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.p_4.setObjectName("p_4")
self.gridLayout_23.addWidget(self.p_4, 0, 1, 1, 1)
self.label_255 = QtWidgets.QLabel(self.groupBox_19)
self.label_255.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_255.setObjectName("label_255")
self.gridLayout_23.addWidget(self.label_255, 0, 2, 1, 1)
self.deadband_4 = QtWidgets.QLineEdit(self.groupBox_19)
self.deadband_4.setText("")
self.deadband_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.deadband_4.setObjectName("deadband_4")
self.gridLayout_23.addWidget(self.deadband_4, 0, 3, 1, 1)
self.label_256 = QtWidgets.QLabel(self.groupBox_19)
self.label_256.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_256.setObjectName("label_256")
self.gridLayout_23.addWidget(self.label_256, 1, 0, 1, 1)
self.i_4 = QtWidgets.QLineEdit(self.groupBox_19)
self.i_4.setText("")
self.i_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.i_4.setObjectName("i_4")
self.gridLayout_23.addWidget(self.i_4, 1, 1, 1, 1)
self.label_257 = QtWidgets.QLabel(self.groupBox_19)
self.label_257.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_257.setObjectName("label_257")
self.gridLayout_23.addWidget(self.label_257, 1, 2, 1, 1)
self.bias_4 = QtWidgets.QLineEdit(self.groupBox_19)
self.bias_4.setText("")
self.bias_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.bias_4.setObjectName("bias_4")
self.gridLayout_23.addWidget(self.bias_4, 1, 3, 1, 1)
self.label_258 = QtWidgets.QLabel(self.groupBox_19)
self.label_258.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_258.setObjectName("label_258")
self.gridLayout_23.addWidget(self.label_258, 2, 0, 1, 1)
self.d_4 = QtWidgets.QLineEdit(self.groupBox_19)
self.d_4.setText("")
self.d_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.d_4.setObjectName("d_4")
self.gridLayout_23.addWidget(self.d_4, 2, 1, 1, 1)
self.label_259 = QtWidgets.QLabel(self.groupBox_19)
self.label_259.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_259.setObjectName("label_259")
self.gridLayout_23.addWidget(self.label_259, 2, 2, 1, 1)
self.maxOutput_4 = QtWidgets.QLineEdit(self.groupBox_19)
self.maxOutput_4.setText("")
self.maxOutput_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.maxOutput_4.setObjectName("maxOutput_4")
self.gridLayout_23.addWidget(self.maxOutput_4, 2, 3, 1, 1)
self.label_260 = QtWidgets.QLabel(self.groupBox_19)
self.label_260.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_260.setObjectName("label_260")
self.gridLayout_23.addWidget(self.label_260, 3, 0, 1, 1)
self.ff0_4 = QtWidgets.QLineEdit(self.groupBox_19)
self.ff0_4.setText("")
self.ff0_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.ff0_4.setObjectName("ff0_4")
self.gridLayout_23.addWidget(self.ff0_4, 3, 1, 1, 1)
self.label_262 = QtWidgets.QLabel(self.groupBox_19)
self.label_262.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_262.setObjectName("label_262")
self.gridLayout_23.addWidget(self.label_262, 4, 0, 1, 1)
self.ff1_4 = QtWidgets.QLineEdit(self.groupBox_19)
self.ff1_4.setText("")
self.ff1_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.ff1_4.setObjectName("ff1_4")
self.gridLayout_23.addWidget(self.ff1_4, 4, 1, 1, 1)
self.label_263 = QtWidgets.QLabel(self.groupBox_19)
self.label_263.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_263.setObjectName("label_263")
self.gridLayout_23.addWidget(self.label_263, 5, 0, 1, 1)
self.ff2_4 = QtWidgets.QLineEdit(self.groupBox_19)
self.ff2_4.setText("")
self.ff2_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.ff2_4.setObjectName("ff2_4")
self.gridLayout_23.addWidget(self.ff2_4, 5, 1, 1, 1)
self.pidDefault_4 = QtWidgets.QPushButton(self.groupBox_19)
self.pidDefault_4.setObjectName("pidDefault_4")
self.gridLayout_23.addWidget(self.pidDefault_4, 5, 3, 1, 1)
self.maxError_4 = QtWidgets.QLineEdit(self.groupBox_19)
self.maxError_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.maxError_4.setObjectName("maxError_4")
self.gridLayout_23.addWidget(self.maxError_4, 3, 3, 1, 1)
self.label_188 = QtWidgets.QLabel(self.groupBox_19)
self.label_188.setObjectName("label_188")
self.gridLayout_23.addWidget(self.label_188, 3, 2, 1, 1)
self.gridLayout_14.addWidget(self.groupBox_19, 1, 0, 1, 1)
self.analogGB_4 = QtWidgets.QGroupBox(self.joint4tab)
self.analogGB_4.setObjectName("analogGB_4")
self.gridLayout_119 = QtWidgets.QGridLayout(self.analogGB_4)
self.gridLayout_119.setContentsMargins(8, 8, 8, 8)
self.gridLayout_119.setSpacing(5)
self.gridLayout_119.setObjectName("gridLayout_119")
self.analogScaleMax_4 = QtWidgets.QLineEdit(self.analogGB_4)
self.analogScaleMax_4.setObjectName("analogScaleMax_4")
self.gridLayout_119.addWidget(self.analogScaleMax_4, 2, 1, 1, 1)
self.label_35 = QtWidgets.QLabel(self.analogGB_4)
self.label_35.setObjectName("label_35")
self.gridLayout_119.addWidget(self.label_35, 1, 0, 1, 1)
self.analogMinLimit_4 = QtWidgets.QLineEdit(self.analogGB_4)
self.analogMinLimit_4.setObjectName("analogMinLimit_4")
self.gridLayout_119.addWidget(self.analogMinLimit_4, 0, 1, 1, 1)
self.label_36 = QtWidgets.QLabel(self.analogGB_4)
self.label_36.setObjectName("label_36")
self.gridLayout_119.addWidget(self.label_36, 2, 0, 1, 1)
self.analogMaxLimit_4 = QtWidgets.QLineEdit(self.analogGB_4)
self.analogMaxLimit_4.setObjectName("analogMaxLimit_4")
self.gridLayout_119.addWidget(self.analogMaxLimit_4, 1, 1, 1, 1)
self.analogDefault_4 = QtWidgets.QPushButton(self.analogGB_4)
self.analogDefault_4.setObjectName("analogDefault_4")
self.gridLayout_119.addWidget(self.analogDefault_4, 0, 2, 1, 1)
self.label_44 = QtWidgets.QLabel(self.analogGB_4)
self.label_44.setObjectName("label_44")
self.gridLayout_119.addWidget(self.label_44, 0, 0, 1, 1)
self.gridLayout_14.addWidget(self.analogGB_4, 2, 0, 1, 1)
self.gridGroupBox9 = QtWidgets.QGroupBox(self.joint4tab)
self.gridGroupBox9.setObjectName("gridGroupBox9")
self.gridLayout_36 = QtWidgets.QGridLayout(self.gridGroupBox9)
self.gridLayout_36.setContentsMargins(8, 8, 8, 8)
self.gridLayout_36.setSpacing(5)
self.gridLayout_36.setObjectName("gridLayout_36")
self.label_207 = QtWidgets.QLabel(self.gridGroupBox9)
self.label_207.setObjectName("label_207")
self.gridLayout_36.addWidget(self.label_207, 0, 0, 1, 1)
self.label_208 = QtWidgets.QLabel(self.gridGroupBox9)
self.label_208.setObjectName("label_208")
self.gridLayout_36.addWidget(self.label_208, 1, 0, 1, 1)
self.label_209 = QtWidgets.QLabel(self.gridGroupBox9)
self.label_209.setObjectName("label_209")
self.gridLayout_36.addWidget(self.label_209, 2, 0, 1, 1)
self.distanceJoint_4 = QtWidgets.QLabel(self.gridGroupBox9)
self.distanceJoint_4.setFrameShape(QtWidgets.QFrame.Box)
self.distanceJoint_4.setText("")
self.distanceJoint_4.setObjectName("distanceJoint_4")
self.gridLayout_36.addWidget(self.distanceJoint_4, 1, 1, 1, 1)
self.timeJoint_4 = QtWidgets.QLabel(self.gridGroupBox9)
self.timeJoint_4.setFrameShape(QtWidgets.QFrame.Box)
self.timeJoint_4.setText("")
self.timeJoint_4.setObjectName("timeJoint_4")
self.gridLayout_36.addWidget(self.timeJoint_4, 0, 1, 1, 1)
self.stepRateJoint_4 = QtWidgets.QLabel(self.gridGroupBox9)
self.stepRateJoint_4.setFrameShape(QtWidgets.QFrame.Box)
self.stepRateJoint_4.setText("")
self.stepRateJoint_4.setObjectName("stepRateJoint_4")
self.gridLayout_36.addWidget(self.stepRateJoint_4, 2, 1, 1, 1)
self.gridLayout_14.addWidget(self.gridGroupBox9, 2, 1, 1, 1)
self.jointAxisGroup_4 = QtWidgets.QGroupBox(self.joint4tab)
self.jointAxisGroup_4.setObjectName("jointAxisGroup_4")
self.gridLayout_22 = QtWidgets.QGridLayout(self.jointAxisGroup_4)
self.gridLayout_22.setContentsMargins(10, 10, 10, 10)
self.gridLayout_22.setSpacing(5)
self.gridLayout_22.setObjectName("gridLayout_22")
self.axisCB_4 = QtWidgets.QComboBox(self.jointAxisGroup_4)
self.axisCB_4.setObjectName("axisCB_4")
self.gridLayout_22.addWidget(self.axisCB_4, 1, 0, 1, 1)
self.label_66 = QtWidgets.QLabel(self.jointAxisGroup_4)
self.label_66.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignHCenter)
self.label_66.setWordWrap(True)
self.label_66.setObjectName("label_66")
self.gridLayout_22.addWidget(self.label_66, 0, 3, 1, 1)
self.label_59 = QtWidgets.QLabel(self.jointAxisGroup_4)
self.label_59.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignHCenter)
self.label_59.setObjectName("label_59")
self.gridLayout_22.addWidget(self.label_59, 0, 0, 1, 1)
self.minLimit_4 = QtWidgets.QLineEdit(self.jointAxisGroup_4)
self.minLimit_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.minLimit_4.setObjectName("minLimit_4")
self.gridLayout_22.addWidget(self.minLimit_4, 1, 3, 1, 1)
self.maxLimit_4 = QtWidgets.QLineEdit(self.jointAxisGroup_4)
self.maxLimit_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.maxLimit_4.setObjectName("maxLimit_4")
self.gridLayout_22.addWidget(self.maxLimit_4, 1, 4, 1, 1)
self.label_71 = QtWidgets.QLabel(self.jointAxisGroup_4)
self.label_71.setTextFormat(QtCore.Qt.AutoText)
self.label_71.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignHCenter)
self.label_71.setWordWrap(True)
self.label_71.setObjectName("label_71")
self.gridLayout_22.addWidget(self.label_71, 0, 4, 1, 1)
self.label_80 = QtWidgets.QLabel(self.jointAxisGroup_4)
self.label_80.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignHCenter)
self.label_80.setWordWrap(True)
self.label_80.setObjectName("label_80")
self.gridLayout_22.addWidget(self.label_80, 0, 5, 1, 1)
self.maxVelocity_4 = QtWidgets.QLineEdit(self.jointAxisGroup_4)
self.maxVelocity_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.maxVelocity_4.setObjectName("maxVelocity_4")
self.gridLayout_22.addWidget(self.maxVelocity_4, 1, 5, 1, 1)
self.label_61 = QtWidgets.QLabel(self.jointAxisGroup_4)
self.label_61.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignHCenter)
self.label_61.setObjectName("label_61")
self.gridLayout_22.addWidget(self.label_61, 0, 2, 1, 1)
self.label_81 = QtWidgets.QLabel(self.jointAxisGroup_4)
self.label_81.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignHCenter)
self.label_81.setWordWrap(True)
self.label_81.setObjectName("label_81")
self.gridLayout_22.addWidget(self.label_81, 0, 6, 1, 1)
self.maxAccel_4 = QtWidgets.QLineEdit(self.jointAxisGroup_4)
self.maxAccel_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.maxAccel_4.setObjectName("maxAccel_4")
self.gridLayout_22.addWidget(self.maxAccel_4, 1, 6, 1, 1)
self.label_31 = QtWidgets.QLabel(self.jointAxisGroup_4)
self.label_31.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft)
self.label_31.setObjectName("label_31")
self.gridLayout_22.addWidget(self.label_31, 0, 1, 1, 1)
self.axisType_4 = QtWidgets.QLabel(self.jointAxisGroup_4)
self.axisType_4.setFrameShape(QtWidgets.QFrame.Box)
self.axisType_4.setText("")
self.axisType_4.setObjectName("axisType_4")
self.gridLayout_22.addWidget(self.axisType_4, 1, 1, 1, 1)
self.scale_4 = QtWidgets.QLineEdit(self.jointAxisGroup_4)
self.scale_4.setObjectName("scale_4")
self.gridLayout_22.addWidget(self.scale_4, 1, 2, 1, 1)
self.reverse_4 = QtWidgets.QCheckBox(self.jointAxisGroup_4)
self.reverse_4.setObjectName("reverse_4")
self.gridLayout_22.addWidget(self.reverse_4, 1, 7, 1, 1)
self.gridLayout_14.addWidget(self.jointAxisGroup_4, 0, 0, 1, 2)
self.groupBox_20 = QtWidgets.QGroupBox(self.joint4tab)
self.groupBox_20.setObjectName("groupBox_20")
self.gridLayout_24 = QtWidgets.QGridLayout(self.groupBox_20)
self.gridLayout_24.setContentsMargins(8, 8, 8, 8)
self.gridLayout_24.setSpacing(5)
self.gridLayout_24.setObjectName("gridLayout_24")
self.label_57 = QtWidgets.QLabel(self.groupBox_20)
self.label_57.setObjectName("label_57")
self.gridLayout_24.addWidget(self.label_57, 0, 0, 1, 1)
self.encoderScale_4 = QtWidgets.QLineEdit(self.groupBox_20)
self.encoderScale_4.setObjectName("encoderScale_4")
self.gridLayout_24.addWidget(self.encoderScale_4, 0, 1, 1, 1)
self.gridLayout_14.addWidget(self.groupBox_20, 3, 0, 1, 1)
self.jointType_3.addTab(self.joint4tab, "")
self.tab_3 = QtWidgets.QWidget()
self.tab_3.setObjectName("tab_3")
self.gridLayout_15 = QtWidgets.QGridLayout(self.tab_3)
self.gridLayout_15.setContentsMargins(8, 8, 8, 8)
self.gridLayout_15.setSpacing(5)
self.gridLayout_15.setObjectName("gridLayout_15")
self.groupBox_63 = QtWidgets.QGroupBox(self.tab_3)
self.groupBox_63.setObjectName("groupBox_63")
self.gridLayout_110 = QtWidgets.QGridLayout(self.groupBox_63)
self.gridLayout_110.setContentsMargins(10, 10, 10, 10)
self.gridLayout_110.setSpacing(5)
self.gridLayout_110.setObjectName("gridLayout_110")
self.label_759 = QtWidgets.QLabel(self.groupBox_63)
self.label_759.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_759.setObjectName("label_759")
self.gridLayout_110.addWidget(self.label_759, 0, 0, 1, 1)
self.p_5 = QtWidgets.QLineEdit(self.groupBox_63)
self.p_5.setText("")
self.p_5.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.p_5.setObjectName("p_5")
self.gridLayout_110.addWidget(self.p_5, 0, 1, 1, 1)
self.label_760 = QtWidgets.QLabel(self.groupBox_63)
self.label_760.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_760.setObjectName("label_760")
self.gridLayout_110.addWidget(self.label_760, 0, 2, 1, 1)
self.deadband_5 = QtWidgets.QLineEdit(self.groupBox_63)
self.deadband_5.setText("")
self.deadband_5.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.deadband_5.setObjectName("deadband_5")
self.gridLayout_110.addWidget(self.deadband_5, 0, 3, 1, 1)
self.label_761 = QtWidgets.QLabel(self.groupBox_63)
self.label_761.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_761.setObjectName("label_761")
self.gridLayout_110.addWidget(self.label_761, 1, 0, 1, 1)
self.i_5 = QtWidgets.QLineEdit(self.groupBox_63)
self.i_5.setText("")
self.i_5.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.i_5.setObjectName("i_5")
self.gridLayout_110.addWidget(self.i_5, 1, 1, 1, 1)
self.label_762 = QtWidgets.QLabel(self.groupBox_63)
self.label_762.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_762.setObjectName("label_762")
self.gridLayout_110.addWidget(self.label_762, 1, 2, 1, 1)
self.bias_5 = QtWidgets.QLineEdit(self.groupBox_63)
self.bias_5.setText("")
self.bias_5.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.bias_5.setObjectName("bias_5")
self.gridLayout_110.addWidget(self.bias_5, 1, 3, 1, 1)
self.label_763 = QtWidgets.QLabel(self.groupBox_63)
self.label_763.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_763.setObjectName("label_763")
self.gridLayout_110.addWidget(self.label_763, 2, 0, 1, 1)
self.d_5 = QtWidgets.QLineEdit(self.groupBox_63)
self.d_5.setText("")
self.d_5.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.d_5.setObjectName("d_5")
self.gridLayout_110.addWidget(self.d_5, 2, 1, 1, 1)
self.label_764 = QtWidgets.QLabel(self.groupBox_63)
self.label_764.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_764.setObjectName("label_764")
self.gridLayout_110.addWidget(self.label_764, 2, 2, 1, 1)
self.maxOutput_5 = QtWidgets.QLineEdit(self.groupBox_63)
self.maxOutput_5.setText("")
self.maxOutput_5.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.maxOutput_5.setObjectName("maxOutput_5")
self.gridLayout_110.addWidget(self.maxOutput_5, 2, 3, 1, 1)
self.label_765 = QtWidgets.QLabel(self.groupBox_63)
self.label_765.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_765.setObjectName("label_765")
self.gridLayout_110.addWidget(self.label_765, 3, 0, 1, 1)
self.ff0_5 = QtWidgets.QLineEdit(self.groupBox_63)
self.ff0_5.setText("")
self.ff0_5.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.ff0_5.setObjectName("ff0_5")
self.gridLayout_110.addWidget(self.ff0_5, 3, 1, 1, 1)
self.label_766 = QtWidgets.QLabel(self.groupBox_63)
self.label_766.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_766.setObjectName("label_766")
self.gridLayout_110.addWidget(self.label_766, 4, 0, 1, 1)
self.ff1_5 = QtWidgets.QLineEdit(self.groupBox_63)
self.ff1_5.setText("")
self.ff1_5.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.ff1_5.setObjectName("ff1_5")
self.gridLayout_110.addWidget(self.ff1_5, 4, 1, 1, 1)
self.label_767 = QtWidgets.QLabel(self.groupBox_63)
self.label_767.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_767.setObjectName("label_767")
self.gridLayout_110.addWidget(self.label_767, 5, 0, 1, 1)
self.ff2_5 = QtWidgets.QLineEdit(self.groupBox_63)
self.ff2_5.setText("")
self.ff2_5.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.ff2_5.setObjectName("ff2_5")
self.gridLayout_110.addWidget(self.ff2_5, 5, 1, 1, 1)
self.pidDefault_5 = QtWidgets.QPushButton(self.groupBox_63)
self.pidDefault_5.setObjectName("pidDefault_5")
self.gridLayout_110.addWidget(self.pidDefault_5, 5, 3, 1, 1)
self.maxError_5 = QtWidgets.QLineEdit(self.groupBox_63)
self.maxError_5.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.maxError_5.setObjectName("maxError_5")
self.gridLayout_110.addWidget(self.maxError_5, 3, 3, 1, 1)
self.label_768 = QtWidgets.QLabel(self.groupBox_63)
self.label_768.setObjectName("label_768")
self.gridLayout_110.addWidget(self.label_768, 3, 2, 1, 1)
self.gridLayout_15.addWidget(self.groupBox_63, 1, 0, 1, 1)
self.gridGroupBox_4 = QtWidgets.QGroupBox(self.tab_3)
self.gridGroupBox_4.setObjectName("gridGroupBox_4")
self.gridLayout_109 = QtWidgets.QGridLayout(self.gridGroupBox_4)
self.gridLayout_109.setContentsMargins(8, 8, 8, 8)
self.gridLayout_109.setSpacing(5)
self.gridLayout_109.setObjectName("gridLayout_109")
self.stepRateJoint_5 = QtWidgets.QLabel(self.gridGroupBox_4)
self.stepRateJoint_5.setFrameShape(QtWidgets.QFrame.Box)
self.stepRateJoint_5.setText("")
self.stepRateJoint_5.setObjectName("stepRateJoint_5")
self.gridLayout_109.addWidget(self.stepRateJoint_5, 2, 1, 1, 1)
self.label_758 = QtWidgets.QLabel(self.gridGroupBox_4)
self.label_758.setObjectName("label_758")
self.gridLayout_109.addWidget(self.label_758, 2, 0, 1, 1)
self.timeJoint_5 = QtWidgets.QLabel(self.gridGroupBox_4)
self.timeJoint_5.setFrameShape(QtWidgets.QFrame.Box)
self.timeJoint_5.setText("")
self.timeJoint_5.setObjectName("timeJoint_5")
self.gridLayout_109.addWidget(self.timeJoint_5, 0, 1, 1, 1)
self.label_756 = QtWidgets.QLabel(self.gridGroupBox_4)
self.label_756.setObjectName("label_756")
self.gridLayout_109.addWidget(self.label_756, 0, 0, 1, 1)
self.label_757 = QtWidgets.QLabel(self.gridGroupBox_4)
self.label_757.setObjectName("label_757")
self.gridLayout_109.addWidget(self.label_757, 1, 0, 1, 1)
self.distanceJoint_5 = QtWidgets.QLabel(self.gridGroupBox_4)
self.distanceJoint_5.setFrameShape(QtWidgets.QFrame.Box)
self.distanceJoint_5.setText("")
self.distanceJoint_5.setObjectName("distanceJoint_5")
self.gridLayout_109.addWidget(self.distanceJoint_5, 1, 1, 1, 1)
self.gridLayout_15.addWidget(self.gridGroupBox_4, 2, 1, 1, 1)
self.groupBox_62 = QtWidgets.QGroupBox(self.tab_3)
self.groupBox_62.setObjectName("groupBox_62")
self.gridLayout_108 = QtWidgets.QGridLayout(self.groupBox_62)
self.gridLayout_108.setContentsMargins(10, 10, 10, 10)
self.gridLayout_108.setSpacing(5)
self.gridLayout_108.setObjectName("gridLayout_108")
self.label_751 = QtWidgets.QLabel(self.groupBox_62)
self.label_751.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_751.setObjectName("label_751")
self.gridLayout_108.addWidget(self.label_751, 0, 0, 1, 1)
self.homeSearchVel_5 = QtWidgets.QLineEdit(self.groupBox_62)
self.homeSearchVel_5.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.homeSearchVel_5.setObjectName("homeSearchVel_5")
self.gridLayout_108.addWidget(self.homeSearchVel_5, 2, 1, 1, 1)
self.label_16 = QtWidgets.QLabel(self.groupBox_62)
self.label_16.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_16.setObjectName("label_16")
self.gridLayout_108.addWidget(self.label_16, 4, 0, 1, 1)
self.label_752 = QtWidgets.QLabel(self.groupBox_62)
self.label_752.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_752.setObjectName("label_752")
| |
import re
import uuid
import json
class MagicTrait:
'An inventory of magical data references'
def __delattr__(self, name):
'Processes delete properties'
self.__delitem__(name)
def __delitem__(self, key):
'Deletes the item accessed like an array'
# if it exists
if key in self.__dict__.keys():
#remove it
del self.__dict__[key]
def __getattr__(self, name):
'Processes get and set type methods processes get property'
def function(*args):
#if the method starts with get
if name[:3] == 'get':
#get_user_name('-')
separator = '_'
if len(args) and isinstance(args[0], str):
separator = args[0]
#determine the key name
key = name.replace('_', separator)
#lowercase and get rid of prefix
key = key[(3 + len(separator)):]
#get attribute or None
return self.__getitem__(key)
# the method has to start with set
#set_user_name('-')
separator = '_'
if len(args) > 1 and isinstance(args[1], str):
separator = args[1]
#determine the key name
key = name.replace('_', separator)
#get rid of prefix
key = key[(3 + len(separator)):]
#if there are no arguments and its a key
if not len(args) and key in self.__dict__.keys():
#remove it
self.__delitem__(key)
else:
#otherwise set it
self.__setitem__(key, args[0])
#either way return this
return self
# if its a function
if name[:3] == 'get' or name[:3] == 'set':
return function
#Lastly return the attribute or None
return self.__getitem__(name)
def __getitem__(self, key):
'Returns the value accessed like an array'
# if key is of invalid type or value, the list values will raise the error
if key in self.__dict__.keys():
return self.__dict__[key]
return None
def __iter__(self):
'Iterates throught the data'
return iter(self.__dict__.items())
def __len__(self):
'Returns the length'
return len(self.__dict__)
def __setattr__(self, name, value):
'Processes set properties'
self.__setitem__(name, value)
def __setitem__(self, key, value):
'Sets the item accessed like an array'
self.__dict__[key] = value
def __str__(self):
'Object to string'
return json.dumps(self.__dict__, indent = 4)
class DotTrait:
'''
The _dotTrait allows multidimensional data to be
accessed like `foo.bar.zoo` as well as be manipulated
in the same fashion.
'''
def get_dot(self, notation = '', separator = '.'):
'Gets a value given the path in the registry.'
# notation should be a string
if not isinstance(notation, str):
return None
keys = notation.split(separator)
# notation should something
if notation == '' or not len(keys):
return self.__dict__
# get the last key, this will be treated separately
last = keys.pop();
pointer = self.__dict__
# Now parse
for key in keys:
if not isinstance(pointer, dict) or not key in pointer:
return None
pointer = pointer[key];
# last round
if not isinstance(pointer, dict) or not last in pointer:
return None
return pointer[last]
def is_dot(self, notation = '', separator = '.'):
'Checks to see if a key is set'
# notation should be a string
if not isinstance(notation, str):
return False
keys = notation.split(separator)
# notation should something
if notation == '' or not len(keys):
return False
# get the last key, this will be treated separately
last = keys.pop();
pointer = self.__dict__
# Now parse
for key in keys:
if not isinstance(pointer, dict) or not key in pointer:
return False
pointer = pointer[key];
# last round
if not isinstance(pointer, dict) or not last in pointer:
return False
return True
def remove_dot(self, notation = '', separator = '.'):
'Removes name space given notation'
# notation should be a string
if not isinstance(notation, str):
return self
keys = notation.split(separator)
# notation should something
if notation == '' or not len(keys):
return self
# get the last key, this will be treated separately
last = keys.pop();
pointer = self.__dict__
# Now parse
for key in keys:
if not isinstance(pointer, dict) or not key in pointer:
return self
pointer = pointer[key];
# last round
if not isinstance(pointer, dict) or not last in pointer:
return self
del pointer[last]
return self
def set_dot(self, notation, value, separator = '.'):
'Creates the name space given the space and sets the value to that name space'
if not isinstance(notation, str):
return self
keys = notation.split(separator)
if notation == '' or not len(keys):
return self
last = keys.pop();
pointer = self.__dict__
# Now parse
for key in keys:
if not key in pointer or not isinstance(pointer[key], dict):
pointer[key] = {}
pointer = pointer[key];
pointer[last] = value
return self
class RegistryInterface: #ignore coverage
'''
Registry are designed to easily manipulate data in
preparation to integrate with any multi dimensional
data store.
'''
def exists(self, *args):
'Returns true if the path keys exist in the dataset'
pass
def get(self, *args):
'Returns the exact data given the path keys'
pass
def empty(self, *args):
'Returns true if the path keys does not exist in the dataset or if it has an empy value'
pass
def remove(self, *args):
'Removes the data found in the path keys'
pass
def set(self, *args):
'Sets the given data to given the path keys'
pass
class Registry(MagicTrait, DotTrait, RegistryInterface):
'''
Registry are designed to easily manipulate data in
preparation to integrate with any multi dimensional
data store.
'''
def __init__(self, data = None):
'Sets up the data'
self.set(data)
def exists(self, *args):
'Returns true if the path keys exist in the dataset'
if not len(args):
return not self.empty(*args)
separator = '--' + str(uuid.uuid4().hex) + '--'
return self.is_dot(separator.join(map(str, args)), separator);
def get(self, *args):
'Returns the exact data given the path keys'
if not len(args):
return self.__dict__;
separator = '--' + str(uuid.uuid4().hex) + '--'
return self.get_dot(separator.join(map(str, args)), separator)
def empty(self, *args):
'Returns true if the path keys does not exist in the dataset or if it has an empy value'
if args is None or not len(args):
return len(self.__dict__) == 0
separator = '--' + str(uuid.uuid4().hex) + '--'
value = self.get_dot(separator.join(map(str, args)), separator)
if value is None:
return True
if isinstance(value, (list, tuple, str)):
return len(value) == 0
return True
def remove(self, *args):
'Removes the data found in the path keys'
if not len(args):
return self
separator = '--' + str(uuid.uuid4().hex) + '--'
return self.remove_dot(separator.join(map(str, args)), separator)
def set(self, *args):
'Sets the given data to given the path keys'
if not len(args):
return self
if len(args) == 1:
if isinstance(args[0], dict):
for key, value in args[0].items():
self.__setitem__(key, value)
return self
separator = '--' + str(uuid.uuid4().hex) + '--'
args = list(args)
value = args.pop()
return self.set_dot(separator.join(map(str, args)), value, separator)
class ModelInterface: #ignore coverage
'''
Models are designed to easily manipulate data in
preparation to integrate with any one dimensional
data store. This is the main model object.
'''
def get(self):
'Returns the entire data'
pass
def set(self, data):
'Sets the entire data'
pass
class Model(MagicTrait, DotTrait, ModelInterface):
'''
Models are designed to easily manipulate data in
preparation to integrate with any one dimensional
data store. This is the main model object.
'''
def __init__(self, data = None):
'Sets up the data'
self.set(data)
def get(self):
'Returns the entire data'
return self.__dict__
def set(self, data):
'Sets the entire data'
if isinstance(data, dict):
for key, value in data.items():
self.__setitem__(key, value)
return self
class CollectionInterface: #ignore coverage
'''
Collections are a managable list of models. Model
methods called by the collection are simply passed
to each model in the collection. Collections perform
the same functionality as a model, except on a more
massive level. This is the main collection object.
'''
def add(self, model = {}):
'Adds a row to the collection'
pass
def cut(self, index = 'last'):
'Removes a row and reindexes the collection'
pass
def each(self, callback):
'Loops through returned result sets'
pass
def get(self):
'Returns the entire data'
pass
def set(self, collection):
pass
class Collection(CollectionInterface):
'''
Collections are a managable list of models. Model
methods called by the collection are simply passed
to each model in the collection. Collections perform
the same functionality as a model, except on a more
massive level. This is the main collection object.
'''
FIRST = 'first'
LAST = 'last'
def __delattr__(self, name):
'Processes delete properties'
self.__delitem__(name)
def __delitem__(self, key):
'Deletes the item accessed like an array'
#if its an integer
if isinstance(key, int):
if key < len(self._list):
del self._list[key]
return
# it is not an integer
# go through each model | |
validate=dict(json={'target': 'active'})),
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.node_set_provision_state,
self.fake_baremetal_node['uuid'],
'active',
wait=True,
timeout=300)
self.assert_calls()
def test_node_set_provision_state_wait_provide(self):
self.fake_baremetal_node['provision_state'] = 'manageable'
available_node = self.fake_baremetal_node.copy()
available_node['provision_state'] = 'available'
self.register_uris([
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'provide'})),
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=available_node),
])
return_value = self.cloud.node_set_provision_state(
self.fake_baremetal_node['uuid'],
'provide',
wait=True)
self.assertSubdict(available_node, return_value)
self.assert_calls()
def test_wait_for_baremetal_node_lock_locked(self):
self.fake_baremetal_node['reservation'] = 'conductor0'
unlocked_node = self.fake_baremetal_node.copy()
unlocked_node['reservation'] = None
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=unlocked_node),
])
self.assertIsNone(
self.cloud.wait_for_baremetal_node_lock(
self.fake_baremetal_node,
timeout=1))
self.assert_calls()
def test_wait_for_baremetal_node_lock_not_locked(self):
self.fake_baremetal_node['reservation'] = None
self.assertIsNone(
self.cloud.wait_for_baremetal_node_lock(
self.fake_baremetal_node,
timeout=1))
# NOTE(dtantsur): service discovery apparently requires 3 calls
self.assertEqual(3, len(self.adapter.request_history))
def test_wait_for_baremetal_node_lock_timeout(self):
self.fake_baremetal_node['reservation'] = 'conductor0'
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.wait_for_baremetal_node_lock,
self.fake_baremetal_node,
timeout=0.001)
self.assert_calls()
def test_activate_node(self):
self.fake_baremetal_node['provision_state'] = 'active'
self.register_uris([
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'active',
'configdrive': 'http://host/file'})),
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
return_value = self.cloud.activate_node(
self.fake_baremetal_node['uuid'],
configdrive='http://host/file',
wait=True)
self.assertIsNone(return_value)
self.assert_calls()
def test_deactivate_node(self):
self.fake_baremetal_node['provision_state'] = 'available'
self.register_uris([
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'deleted'})),
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
return_value = self.cloud.deactivate_node(
self.fake_baremetal_node['uuid'],
wait=True)
self.assertIsNone(return_value)
self.assert_calls()
def test_register_machine(self):
mac_address = '00:01:02:03:04:05'
nics = [{'mac': mac_address}]
node_uuid = self.fake_baremetal_node['uuid']
# TODO(TheJulia): There is a lot of duplication
# in testing creation. Surely this hsould be a helper
# or something. We should fix this.
node_to_post = {
'chassis_uuid': None,
'driver': None,
'driver_info': None,
'name': self.fake_baremetal_node['name'],
'properties': None,
'uuid': node_uuid}
self.fake_baremetal_node['provision_state'] = 'available'
if 'provision_state' in node_to_post:
node_to_post.pop('provision_state')
self.register_uris([
dict(
method='POST',
uri=self.get_mock_url(
resource='nodes'),
json=self.fake_baremetal_node,
validate=dict(json=node_to_post)),
dict(
method='POST',
uri=self.get_mock_url(
resource='ports'),
validate=dict(json={'address': mac_address,
'node_uuid': node_uuid}),
json=self.fake_baremetal_port),
])
return_value = self.cloud.register_machine(nics, **node_to_post)
self.assertDictEqual(self.fake_baremetal_node, return_value)
self.assert_calls()
# TODO(TheJulia): We need to de-duplicate these tests.
# Possibly a dedicated class, although we should do it
# then as we may find differences that need to be
# accounted for newer microversions.
def test_register_machine_enroll(self):
mac_address = '00:01:02:03:04:05'
nics = [{'mac': mac_address}]
node_uuid = self.fake_baremetal_node['uuid']
node_to_post = {
'chassis_uuid': None,
'driver': None,
'driver_info': None,
'name': self.fake_baremetal_node['name'],
'properties': None,
'uuid': node_uuid}
self.fake_baremetal_node['provision_state'] = 'enroll'
manageable_node = self.fake_baremetal_node.copy()
manageable_node['provision_state'] = 'manageable'
available_node = self.fake_baremetal_node.copy()
available_node['provision_state'] = 'available'
self.register_uris([
dict(
method='POST',
uri=self.get_mock_url(
resource='nodes'),
validate=dict(json=node_to_post),
json=self.fake_baremetal_node),
dict(
method='POST',
uri=self.get_mock_url(
resource='ports'),
validate=dict(json={'address': mac_address,
'node_uuid': node_uuid}),
json=self.fake_baremetal_port),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'manage'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=manageable_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=manageable_node),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'provide'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=available_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=available_node),
])
# NOTE(When we migrate to a newer microversion, this test
# may require revision. It was written for microversion
# ?1.13?, which accidently got reverted to 1.6 at one
# point during code being refactored soon after the
# change landed. Presently, with the lock at 1.6,
# this code is never used in the current code path.
return_value = self.cloud.register_machine(nics, **node_to_post)
self.assertSubdict(available_node, return_value)
self.assert_calls()
def test_register_machine_enroll_wait(self):
mac_address = self.fake_baremetal_port
nics = [{'mac': mac_address}]
node_uuid = self.fake_baremetal_node['uuid']
node_to_post = {
'chassis_uuid': None,
'driver': None,
'driver_info': None,
'name': self.fake_baremetal_node['name'],
'properties': None,
'uuid': node_uuid}
self.fake_baremetal_node['provision_state'] = 'enroll'
manageable_node = self.fake_baremetal_node.copy()
manageable_node['provision_state'] = 'manageable'
available_node = self.fake_baremetal_node.copy()
available_node['provision_state'] = 'available'
self.register_uris([
dict(
method='POST',
uri=self.get_mock_url(
resource='nodes'),
validate=dict(json=node_to_post),
json=self.fake_baremetal_node),
dict(
method='POST',
uri=self.get_mock_url(
resource='ports'),
validate=dict(json={'address': mac_address,
'node_uuid': node_uuid}),
json=self.fake_baremetal_port),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'manage'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=manageable_node),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'provide'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=available_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=available_node),
])
return_value = self.cloud.register_machine(
nics, wait=True, **node_to_post)
self.assertSubdict(available_node, return_value)
self.assert_calls()
def test_register_machine_enroll_failure(self):
mac_address = '00:01:02:03:04:05'
nics = [{'mac': mac_address}]
node_uuid = self.fake_baremetal_node['uuid']
node_to_post = {
'chassis_uuid': None,
'driver': None,
'driver_info': None,
'name': self.fake_baremetal_node['name'],
'properties': None,
'uuid': node_uuid}
self.fake_baremetal_node['provision_state'] = 'enroll'
failed_node = self.fake_baremetal_node.copy()
failed_node['reservation'] = 'conductor0'
failed_node['provision_state'] = 'verifying'
failed_node['last_error'] = 'kaboom!'
self.register_uris([
dict(
method='POST',
uri=self.get_mock_url(
resource='nodes'),
json=self.fake_baremetal_node,
validate=dict(json=node_to_post)),
dict(
method='POST',
uri=self.get_mock_url(
resource='ports'),
validate=dict(json={'address': mac_address,
'node_uuid': node_uuid}),
json=self.fake_baremetal_port),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'manage'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=failed_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=failed_node),
])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.register_machine,
nics,
**node_to_post)
self.assert_calls()
def test_register_machine_enroll_timeout(self):
mac_address = '00:01:02:03:04:05'
nics = [{'mac': mac_address}]
node_uuid = self.fake_baremetal_node['uuid']
node_to_post = {
'chassis_uuid': None,
'driver': None,
'driver_info': None,
'name': self.fake_baremetal_node['name'],
'properties': None,
'uuid': node_uuid}
self.fake_baremetal_node['provision_state'] = 'enroll'
busy_node = self.fake_baremetal_node.copy()
busy_node['reservation'] = 'conductor0'
busy_node['provision_state'] = 'verifying'
self.register_uris([
dict(
method='POST',
uri=self.get_mock_url(
resource='nodes'),
json=self.fake_baremetal_node,
validate=dict(json=node_to_post)),
dict(
method='POST',
uri=self.get_mock_url(
resource='ports'),
validate=dict(json={'address': mac_address,
'node_uuid': node_uuid}),
json=self.fake_baremetal_port),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'manage'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=busy_node),
])
# NOTE(TheJulia): This test shortcircuits the timeout loop
# such that it executes only once. The very last returned
# state to the API is essentially a busy state that we
# want to block on until it has cleared.
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.register_machine,
nics,
timeout=0.001,
lock_timeout=0.001,
**node_to_post)
self.assert_calls()
def test_register_machine_enroll_timeout_wait(self):
mac_address = '00:01:02:03:04:05'
nics = [{'mac': mac_address}]
node_uuid = self.fake_baremetal_node['uuid']
node_to_post = {
'chassis_uuid': None,
'driver': None,
'driver_info': None,
'name': self.fake_baremetal_node['name'],
'properties': None,
'uuid': node_uuid}
self.fake_baremetal_node['provision_state'] = 'enroll'
self.register_uris([
dict(
method='POST',
uri=self.get_mock_url(
resource='nodes'),
json=self.fake_baremetal_node,
validate=dict(json=node_to_post)),
dict(
method='POST',
uri=self.get_mock_url(
resource='ports'),
validate=dict(json={'address': mac_address,
'node_uuid': node_uuid}),
json=self.fake_baremetal_port),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'manage'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.register_machine,
nics,
wait=True,
timeout=0.001,
**node_to_post)
self.assert_calls()
def test_register_machine_port_create_failed(self):
mac_address = '00:01:02:03:04:05'
nics = [{'mac': mac_address}]
node_uuid = self.fake_baremetal_node['uuid']
node_to_post = {
'chassis_uuid': None,
'driver': None,
'driver_info': None,
'name': self.fake_baremetal_node['name'],
'properties': None,
'uuid': node_uuid}
self.fake_baremetal_node['provision_state'] = 'available'
self.register_uris([
dict(
method='POST',
uri=self.get_mock_url(
resource='nodes'),
json=self.fake_baremetal_node,
validate=dict(json=node_to_post)),
dict(
method='POST',
uri=self.get_mock_url(
resource='ports'),
status_code=400,
json={'error': 'invalid'},
validate=dict(json={'address': mac_address,
'node_uuid': node_uuid})),
dict(
method='DELETE',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']])),
])
self.assertRaises(exc.OpenStackCloudException,
self.cloud.register_machine,
nics, **node_to_post)
self.assert_calls()
def test_unregister_machine(self):
mac_address = self.fake_baremetal_port['address']
nics = [{'mac': mac_address}]
port_uuid = self.fake_baremetal_port['uuid']
# NOTE(TheJulia): The two values below should be the same.
port_node_uuid = self.fake_baremetal_port['node_uuid']
port_url_address = 'detail?address=%s' % mac_address
self.fake_baremetal_node['provision_state'] = 'available'
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='ports',
append=[port_url_address]),
json={'ports': [{'address': mac_address,
'node_uuid': port_node_uuid,
'uuid': port_uuid}]}),
dict(
method='DELETE',
uri=self.get_mock_url(
resource='ports',
append=[self.fake_baremetal_port['uuid']])),
dict(
method='DELETE',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']])),
])
self.cloud.unregister_machine(
nics, self.fake_baremetal_node['uuid'])
self.assert_calls()
def test_unregister_machine_timeout(self):
mac_address = self.fake_baremetal_port['address']
nics = [{'mac': mac_address}]
port_uuid = self.fake_baremetal_port['uuid']
port_node_uuid = self.fake_baremetal_port['node_uuid']
port_url_address = 'detail?address=%s' % mac_address
self.fake_baremetal_node['provision_state'] = 'available'
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='ports',
append=[port_url_address]),
json={'ports': [{'address': mac_address,
'node_uuid': port_node_uuid,
'uuid': port_uuid}]}),
dict(
method='DELETE',
uri=self.get_mock_url(
resource='ports',
append=[self.fake_baremetal_port['uuid']])),
dict(
method='DELETE',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']])),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.unregister_machine,
nics,
self.fake_baremetal_node['uuid'],
wait=True,
timeout=0.001)
self.assert_calls()
def test_unregister_machine_locked_timeout(self):
mac_address = self.fake_baremetal_port['address']
nics = [{'mac': mac_address}]
self.fake_baremetal_node['provision_state'] = 'available'
self.fake_baremetal_node['reservation'] = 'conductor99'
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.unregister_machine,
nics,
self.fake_baremetal_node['uuid'],
timeout=0.001)
self.assert_calls()
def test_unregister_machine_retries(self):
mac_address = self.fake_baremetal_port['address']
nics = [{'mac': mac_address}]
port_uuid = self.fake_baremetal_port['uuid']
# NOTE(TheJulia): The two values below should be the same.
port_node_uuid = self.fake_baremetal_port['node_uuid']
port_url_address = 'detail?address=%s' % mac_address
self.fake_baremetal_node['provision_state'] = 'available'
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='ports',
append=[port_url_address]),
json={'ports': [{'address': mac_address,
'node_uuid': port_node_uuid,
'uuid': port_uuid}]}),
dict(
method='DELETE',
status_code=503,
uri=self.get_mock_url(
resource='ports',
append=[self.fake_baremetal_port['uuid']])),
dict(
method='DELETE',
status_code=409,
uri=self.get_mock_url(
resource='ports',
append=[self.fake_baremetal_port['uuid']])),
dict(
method='DELETE',
uri=self.get_mock_url(
resource='ports',
append=[self.fake_baremetal_port['uuid']])),
dict(
method='DELETE',
status_code=409,
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']])),
dict(
method='DELETE',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']])),
])
self.cloud.unregister_machine(
nics, self.fake_baremetal_node['uuid'])
self.assert_calls()
def test_unregister_machine_unavailable(self):
# This is a list of invalid states that the method
# should fail on.
invalid_states = ['active', 'cleaning', 'clean wait', 'clean failed']
mac_address = self.fake_baremetal_port['address']
nics = [{'mac': mac_address}]
url_list = []
for state in invalid_states:
self.fake_baremetal_node['provision_state'] = state
url_list.append(
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node))
self.register_uris(url_list)
for state in invalid_states:
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.unregister_machine,
nics,
self.fake_baremetal_node['uuid'])
self.assert_calls()
def test_update_machine_patch_no_action(self):
self.register_uris([dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
# NOTE(TheJulia): This is just testing mechanics.
update_dict = self.cloud.update_machine(
self.fake_baremetal_node['uuid'])
self.assertIsNone(update_dict['changes'])
self.assertSubdict(self.fake_baremetal_node, update_dict['node'])
self.assert_calls()
def test_attach_port_to_machine(self):
vif_id = '953ccbee-e854-450f-95fe-fe5e40d611ec'
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(
method='GET',
| |
"""This module hosts unit tests for the cli."""
import os
from pathlib import Path
import sys
from unittest.mock import MagicMock
from click.testing import CliRunner
from docker.errors import ImageNotFound
import paramiko
import pytest
from yaml.composer import ComposerError
from build_magic import __version__ as version
from build_magic.cli import build_magic
from build_magic.exc import DockerDaemonError
from build_magic.reference import ExitCode
USAGE = """Usage: build-magic [OPTIONS] [ARGS]...
build-magic is an un-opinionated build automation tool. Some potential uses include:
* Building applications across multiple platforms.
* Conducting installation dry runs.
* Automating repeated tasks.
* Deploying and installing artifacts to remote machines.
Examples:
* Archive two files on the local machine.
build-magic tar -czf myfiles.tar.gz file1.txt file2.txt
* Archive two files on the local machine and delete the original files.
build-magic -c build "tar -czf myfiles.tar.gz file1.txt file2.txt" -c execute "rm file1.txt file2.txt"
* Copy two files to a remote machine and archive them.
build-magic -r remote -e user@myhost --copy . -c build "tar -czf myfiles.tar.gz f1.txt f2.txt" f1.txt f2.txt
* Build a project in a Linux container.
build-magic -r docker -e Ubuntu:latest -c execute "configure" -c build "make all"
* Execute multiple commands in a config file.
build-magic -C myconfig.yaml
* Execute a particular stage in a config file.
build-magic -C myconfig.yaml -t build
Use --help for detailed usage of each option.
Visit https://cmmorrow.github.io/build-magic/user_guide/cli_usage/ for a detailed usage description.
"""
@pytest.fixture
def cli():
"""Provides a CliRunner object for invoking cli calls."""
return CliRunner()
@pytest.fixture
def magic_dir(tmp_path_factory):
"""Provides a temporary directory for testing copy/working directory behavior."""
magic = tmp_path_factory.mktemp('build_magic')
return magic
@pytest.fixture
def tmp_file(magic_dir):
"""Provides a test file in the temp directory."""
hello = magic_dir / 'hello.txt'
hello.write_text('hello world')
yield magic_dir
os.remove('hello.txt')
@pytest.fixture
def current_file(magic_dir):
"""Provides a test file in the current directory."""
current = Path().cwd().resolve()
hello = current / 'hello.txt'
hello.write_text('hello world')
yield magic_dir
os.chdir(str(current))
os.remove('hello.txt')
@pytest.fixture
def config_file(magic_dir):
"""Provides a config file in the temp directory."""
if os.sys.platform == 'win32':
filename = 'config_win.yaml'
else:
filename = 'config.yaml'
config = magic_dir / filename
content = Path(__file__).parent.joinpath('files').joinpath(filename).read_text()
config.write_text(content)
yield config
os.remove(magic_dir / filename)
@pytest.fixture
def multi_config(magic_dir):
"""Provides a config file with multiple stage in the temp directory."""
if os.sys.platform == 'win32':
filename = 'multi_win.yaml'
else:
filename = 'multi.yaml'
config = magic_dir / filename
content = Path(__file__).parent.joinpath('files').joinpath(filename).read_text()
config.write_text(content)
yield config
os.remove(magic_dir / filename)
@pytest.fixture
def targets_config(magic_dir):
"""Provides a config file for testing multiple targets in the temp directory."""
filename = 'targets.yaml'
config = magic_dir / filename
content = Path(__file__).parent.joinpath('files').joinpath(filename).read_text()
config.write_text(content)
yield config
os.remove(magic_dir / filename)
@pytest.fixture
def default_config():
"""Provides a default config file in the current directory."""
filename = 'build-magic.yaml'
current = Path().cwd().resolve()
config = current / filename
content = Path(__file__).parent.joinpath('files').joinpath(filename).read_text()
config.write_text(content)
yield config
os.remove(config)
@pytest.fixture
def second_default(magic_dir):
"""Provides an additional default config as an alternative."""
filename = 'build-magic.yml'
current = Path().cwd().resolve()
config = current / filename
content = Path(__file__).parent.joinpath('files').joinpath('build-magic.yaml').read_text()
config.write_text(content)
yield magic_dir
os.chdir(str(current))
os.remove(filename)
@pytest.fixture
def variable_and_default_config(default_config, variables_config):
"""Provides a default and variable config file in the current directory."""
filename = variables_config.name
current = Path().cwd().resolve()
config = current / filename
content = variables_config.read_text()
config.write_text(content)
yield magic_dir
# os.chdir(str(current))
os.remove(current / filename)
@pytest.fixture
def prompt_and_default_config(default_config, prompt_config):
"""Provides a default and prompt config file in the current directory."""
filename = prompt_config.name
current = Path().cwd().resolve()
config = current / filename
content = prompt_config.read_text()
config.write_text(content)
yield magic_dir
os.chdir(str(current))
os.remove(filename)
@pytest.fixture
def parameters_config(magic_dir):
"""Provides a config file with parameters in the temp directory."""
filename = 'parameters.yaml'
config = magic_dir / filename
content = Path(__file__).parent.joinpath('files').joinpath(filename).read_text()
config.write_text(content)
yield config
os.remove(magic_dir / filename)
@pytest.fixture
def variables_config(magic_dir):
"""Provides a config file for testing variable substitution in the temp directory."""
filename = 'variables.yaml'
config = magic_dir / filename
content = Path(__file__).parent.joinpath('files').joinpath(filename).read_text()
config.write_text(content)
yield config
os.remove(magic_dir / filename)
@pytest.fixture
def prompt_config(magic_dir):
"""Provides a config file with a prompt for variable input in the temp directory."""
filename = 'prompt.yaml'
config = magic_dir / filename
content = Path(__file__).parent.joinpath('files').joinpath(filename).read_text()
config.write_text(content)
yield config
os.remove(magic_dir / filename)
@pytest.fixture
def ls():
"""Provides the correct list command for the executing operating system."""
if os.sys.platform == 'win32':
return 'dir'
else:
return 'ls'
@pytest.fixture
def cat():
"""Provides the correct cat command for the executing operating system."""
if os.sys.platform == 'win32':
return 'type'
else:
return 'cat'
@pytest.fixture
def cp():
"""Provides the correct file copy command for the executing operating system."""
if os.sys.platform == 'win32':
return 'copy'
else:
return 'cp'
def test_cli_no_options(cli):
"""Verify that the usage is printed when no options or arguments are provided."""
res = cli.invoke(build_magic)
assert res.exit_code == ExitCode.NO_TESTS
assert res.output == USAGE
def test_cli_help(cli):
"""Verify the help is displayed when given the --help option."""
ref = """Usage: build-magic [OPTIONS] [ARGS]...
An un-opinionated build automation tool.
ARGS - One of three possible uses based on context:
1. If the --copy option is used, each argument in ARGS is a file name in the
copy from directory to copy to the working directory.
2. If there is a config file named build-magic.yaml in the working directory,
ARGS is the name of a stage to execute.
3. ARGS are considered a single command to execute if the --command option
isn't used.
Visit https://cmmorrow.github.io/build-magic/user_guide/cli_usage/ for a
detailed usage description.
Options:
-c, --command <TEXT TEXT>... A directive, command pair to execute.
-C, --config FILENAME The config file to load parameters from.
--copy TEXT Copy files from the specified path.
-e, --environment TEXT The command runner environment to use.
-r, --runner [local|remote|vagrant|docker]
The command runner to use.
--name TEXT The stage name to use.
-t, --target TEXT Run a particular stage in a config file by
name.
--template Generates a config file template in the
current directory.
--wd DIRECTORY The working directory to run commands from.
--continue / --stop Continue to run after failure if True.
-p, --parameter <TEXT TEXT>... Space separated key/value used for runner
specific settings.
-v, --variable <TEXT TEXT>... Space separated key/value config file
variables.
--prompt TEXT Config file variable with prompt for value.
--action [default|cleanup|persist]
The setup and teardown action to perform.
--plain / --fancy Enables basic output. Ideal for logging and
automation.
--quiet Suppresses all output from build-magic.
--verbose Verbose output -- stdout from executed
commands will be printed when complete.
--version Show the version and exit.
--help Show this message and exit.
"""
res = cli.invoke(build_magic, ['--help'])
assert res.exit_code == ExitCode.PASSED
assert res.output == ref
def test_cli_single_command(cli):
"""Verify passing a single single command as arguments works correctly."""
res = cli.invoke(build_magic, ['echo hello world'])
assert res.exit_code == ExitCode.PASSED
def test_cli_multiple_commands(cli, ls):
"""Verify passing multiple commands with the -c and --command options works correctly."""
res = cli.invoke(build_magic, ['-c', 'execute', 'echo hello world', '-c', 'execute', f'{ls}'])
assert res.exit_code == ExitCode.PASSED
res = cli.invoke(build_magic, ['--command', 'execute', 'echo hello world', '--command', 'execute', f'{ls}'])
assert res.exit_code == ExitCode.PASSED
def test_cli_runner(cli, ls):
"""Verify the local runner is used with -r and --runner options works correctly."""
res = cli.invoke(build_magic, ['-r', 'local', f'{ls}'])
assert res.exit_code == ExitCode.PASSED
res = cli.invoke(build_magic, ['--runner', 'local', f'{ls}'])
assert res.exit_code == ExitCode.PASSED
def test_cli_stage_name(cli):
"""Verify the stage --name option works as expected."""
res = cli.invoke(build_magic, ['--name', 'test stage', 'echo hello'])
assert res.exit_code == ExitCode.PASSED
assert 'Starting Stage 1: test stage' in res.output
assert 'Stage 1: test stage - finished with result COMPLETE'
res = cli.invoke(build_magic, ['--name', 'test stage', '-c', 'execute', 'echo hello'])
assert res.exit_code == ExitCode.PASSED
assert 'Starting Stage 1: test stage' in res.output
assert 'Stage 1: test stage - finished with result COMPLETE'
def test_cli_invalid_runner(cli):
"""Test the case where an invalid command runner is provided."""
ref = """Usage: build-magic [OPTIONS] [ARGS]...
Try 'build-magic --help' for help.
Error: Invalid value for '--runner' / '-r': 'dummy' is not one of 'local', 'remote', 'vagrant', 'docker'.
"""
res = cli.invoke(build_magic, ['-r', 'dummy', 'ls'])
assert res.exit_code == ExitCode.INPUT_ERROR
assert res.output == ref
def test_cli_docker_missing_environment(cli):
"""Test the case where the docker runner is called without the environment option."""
ref = """Environment must be a Docker image if using the Docker runner.
"""
res = cli.invoke(build_magic, ['-r', 'docker', 'ls'])
assert res.exit_code == ExitCode.INPUT_ERROR
assert res.output == ref
def test_cli_docker_environment_not_found(cli, mocker):
"""Test the case where the requested image is not found."""
mocker.patch('docker.client.DockerClient.containers', new_callable=mocker.PropertyMock)
mocker.patch('docker.client.DockerClient.containers.list', return_value=[])
mocker.patch('docker.client.DockerClient.containers.run', side_effect=ImageNotFound('Not Found'))
res = cli.invoke(build_magic, ['-r', 'docker', '-e', 'centos:7', 'echo', '"hello world"'])
assert 'Setup failed: Not Found' in res.output
def test_cli_docker_container_already_running(cli, mocker):
"""Test the case where a build-magic container is already running."""
mocker.patch('docker.client.DockerClient.containers', new_callable=mocker.PropertyMock)
mocker.patch('docker.client.DockerClient.containers.list', return_value=[MagicMock])
res = cli.invoke(build_magic, ['-r', 'docker', '-e', 'centos:7', 'echo', '"hello world"'])
| |
<filename>controllerClass.py
# coding=utf-8
import os
import re
import time
import socket
import inspect
import threading
import subprocess
import logger
gsmThreadName = 'gsmReceptor'
gprsThreadName = 'gprsReceptor'
wifiThreadName = 'wifiReceptor'
emailThreadName = 'emailReceptor'
ethernetThreadName = 'ethernetReceptor'
bluetoothThreadName = 'bluetoothReceptor'
threadNameList = [gsmThreadName, gprsThreadName, wifiThreadName, ethernetThreadName, bluetoothThreadName, emailThreadName]
class Controller(threading.Thread):
availableGsm = False # Indica si el modo GSM está disponible
availableGprs = False # Indica si el modo GPRS está disponible
availableWifi = False # Indica si el modo WIFI está disponible
availableEthernet = False # Indica si el modo ETHERNET está disponible
availableBluetooth = False # Indica si el modo BLUTOOTH está disponible
availableEmail = False # Indica si el modo EMAIL está disponible
gsmInstance = None
gprsInstance = None
wifiInstance = None
ethernetInstance = None
bluetoothInstance = None
emailInstance = None
isActive = False
def __init__(self, _REFRESH_TIME):
threading.Thread.__init__(self, name = 'ControllerThread')
self.REFRESH_TIME = _REFRESH_TIME
def __del__(self):
self.gsmInstance.isActive = False
self.gprsInstance.isActive = False
self.wifiInstance.isActive = False
self.ethernetInstance.isActive = False
self.bluetoothInstance.isActive = False
self.emailInstance.isActive = False
# Esperamos que terminen los hilos receptores
for receptorThread in threading.enumerate():
if receptorThread.getName() in threadNameList and receptorThread.isAlive():
receptorThread.join()
logger.write('INFO', '[CONTROLLER] Objeto destruido.')
def run(self):
self.isActive = True
while self.isActive:
self.availableGsm = self.verifyGsmConnection()
self.availableGprs = self.verifyGprsConnection()
self.availableWifi = self.verifyWifiConnection()
self.availableEthernet = self.verifyEthernetConnection()
self.availableBluetooth = self.verifyBluetoothConnection()
self.availableEmail = self.verifyEmailConnection()
time.sleep(self.REFRESH_TIME)
logger.write('WARNING', '[CONTROLLER] Función \'%s\' terminada.' % inspect.stack()[0][3])
def verifyGsmConnection(self):
# Generamos la expresión regular
ttyUSBPattern = re.compile('ttyUSB[0-9]+')
lsDevProcess = subprocess.Popen(['ls', '/dev/'], stdout = subprocess.PIPE, stderr = subprocess.PIPE)
lsDevOutput, lsDevError = lsDevProcess.communicate()
ttyUSBDevices = ttyUSBPattern.findall(lsDevOutput)
# Se detectaron dispositivos USB conectados
for ttyUSBx in reversed(ttyUSBDevices):
# Si el puerto serie nunca fue establecido, entonces la instancia no esta siendo usada
if self.gsmInstance.serialPort is None:
# Si no se produce ningún error durante la configuración, ponemos al módem a recibir SMS y llamadas
if self.gsmInstance.connect('/dev/' + ttyUSBx):
gsmThread = threading.Thread(target = self.gsmInstance.receive, name = gsmThreadName)
logger.write('INFO', '[GSM] Listo para usarse (' + ttyUSBx + ').')
gsmThread.start()
return True
# Si se produce un error durante la configuración, devolvemos 'False'
else:
return False
# Si el módem ya está en modo activo (funcionando), devolvemos 'True'
elif self.gsmInstance.isActive:
return True
# Llegamos acá si se produce un error en el 'connect' del módem (y todavía está conectado)
else:
return False
# Si anteriormente hubo un intento de 'connect()' con o sin éxito, debemos limpiar el puerto
if self.gsmInstance.serialPort is not None:
self.gsmInstance.successfulConnection = None
self.gsmInstance.serialPort = None
self.gsmInstance.isActive = False
self.gsmInstance.closePort()
return False
def verifyGprsConnection(self):
# Generamos la expresión regular
pppPattern = re.compile('ppp[0-9]+')
for networkInterface in os.popen('ip link show').readlines():
# Con 'pppPattern.search(networkInterface)' buscamos alguna coincidencia
matchedPattern = pppPattern.search(networkInterface)
# La interfaz actual coincide con un patrón 'ppp'
if matchedPattern is not None and networkInterface.find("state UNKNOWN") > 0:
# Esto se cumple cuando nunca se realizó un intento de configuración
if self.gprsInstance.localInterface is None:
# Obtenemos la interfaz que concide con el patrón
self.gprsInstance.localInterface = matchedPattern.group()
# Obtenemos la dirección IP local asignada estáticamente o por DHCP
commandToExecute = 'ip addr show ' + self.gprsInstance.localInterface + ' | grep inet'
localIPAddress = os.popen(commandToExecute).readline().split()[1].split('/')[0]
# Si no se produce ningún error durante la configuración, ponemos a la IP a escuchar
if self.gprsInstance.connect(localIPAddress):
gprsThread = threading.Thread(target = self.gprsInstance.receive, name = gprsThreadName)
gprsInfo = self.gprsInstance.localInterface + ' - ' + self.gprsInstance.localIPAddress
logger.write('INFO', '[GRPS] Listo para usarse (' + gprsInfo + ').')
gprsThread.start()
return True
# Si se produce un error durante la configuración, devolvemos 'False'
else:
return False
# El patrón coincidente es igual a la interfaz de la instancia
elif matchedPattern.group() == self.gprsInstance.localInterface:
# Si no se produjo ningún error durante la configuración, devolvemos 'True'
if self.gprsInstance.successfulConnection:
return True
# Entonces significa que hubo un error, devolvemos 'False'
else:
return False
# El patrón coincidente está siendo usado pero no es igual a la interfaz de la instancia
else:
continue
# No se encontró coincidencia en la iteración actual, entonces seguimos buscando
else:
continue
# Si entramos es porque había una conexión activa y se perdió
if self.gprsInstance.localInterface is not None:
# Limpiamos todos los campos del objeto NETWORK
self.gprsInstance.successfulConnection = None
self.gprsInstance.localInterface = None
self.gprsInstance.localIPAddress = None
self.gprsInstance.isActive = False
return False
def verifyWifiConnection(self):
# Generamos la expresión regular
wlanPattern = re.compile('wlan[0-9]+')
activeInterfacesList = open('/tmp/activeInterfaces', 'a+').read()
for networkInterface in os.popen('ip link show').readlines():
# Con 'wlanPattern.search(networkInterface)' buscamos alguna coincidencia
matchedPattern = wlanPattern.search(networkInterface)
# La interfaz actual coincide con un patrón 'wlan'
if matchedPattern is not None and networkInterface.find("state UP") > 0:
# El patrón coincidente no está siendo usado y la instancia no está activa (habrá que habilitarla)
if matchedPattern.group() not in activeInterfacesList and self.wifiInstance.localInterface is None:
# Obtenemos la interfaz que concide con el patrón
self.wifiInstance.localInterface = matchedPattern.group()
# Escribimos en nuestro archivo la interfaz, para indicar que está ocupada
activeInterfacesFile = open('/tmp/activeInterfaces', 'a+')
activeInterfacesFile.write(self.wifiInstance.localInterface + '\n')
activeInterfacesFile.close()
# Obtenemos la dirección IP local asignada estáticamente o por DHCP
commandToExecute = 'ip addr show ' + self.wifiInstance.localInterface + ' | grep inet'
localIPAddress = os.popen(commandToExecute).readline().split()[1].split('/')[0]
# Si no se produce ningún error durante la configuración, ponemos a la IP a escuchar
if self.wifiInstance.connect(localIPAddress):
wifiThread = threading.Thread(target = self.wifiInstance.receive, name = wifiThreadName)
wifiInfo = self.wifiInstance.localInterface + ' - ' + self.wifiInstance.localIPAddress
logger.write('INFO', '[WIFI] Listo para usarse (' + wifiInfo + ').')
wifiThread.start()
return True
# Si se produce un error durante la configuración, devolvemos 'False'
else:
return False
# El patrón coincidente es igual a la interfaz de la instancia
elif matchedPattern.group() == self.wifiInstance.localInterface:
# Si no se produjo ningún error durante la configuración, devolvemos 'True'
if self.wifiInstance.successfulConnection:
return True
# Entonces significa que hubo un error, devolvemos 'False'
else:
return False
# El patrón coincidente está siendo usado pero no es igual a la interfaz de la instancia
else:
continue
# No se encontró coincidencia en la iteración actual, entonces seguimos buscando
else:
continue
# Si anteriormente hubo un intento de 'connect()' con o sin éxito, debemos limpiar la interfaz
if self.wifiInstance.localInterface is not None:
localInterface = self.wifiInstance.localInterface
# Limpiamos todos los campos del objeto NETWORK
self.wifiInstance.successfulConnection = None
self.wifiInstance.localInterface = None
self.wifiInstance.localIPAddress = None
self.wifiInstance.isActive = False
# Eliminamos del archivo la interfaz de red usada
dataToWrite = open('/tmp/activeInterfaces').read().replace(localInterface + '\n', '')
activeInterfacesFile = open('/tmp/activeInterfaces', 'w')
activeInterfacesFile.write(dataToWrite)
activeInterfacesFile.close()
return False
def verifyEthernetConnection(self):
# Generamos la expresión regular
ethPattern = re.compile('eth[0-9]+')
activeInterfacesList = open('/tmp/activeInterfaces', 'a+').read()
for networkInterface in os.popen('ip link show').readlines():
# Con 'ethPattern.search(networkInterface)' buscamos alguna coincidencia
matchedPattern = ethPattern.search(networkInterface)
# La interfaz actual coincide con un patrón 'eth'
if matchedPattern is not None and networkInterface.find("state UP") > 0:
# El patrón coincidente no está siendo usado y la instancia no está activa (habrá que habilitarla)
if matchedPattern.group() not in activeInterfacesList and self.ethernetInstance.localInterface is None:
# Obtenemos la interfaz que concide con el patrón
self.ethernetInstance.localInterface = matchedPattern.group()
# Escribimos en nuestro archivo la interfaz, para indicar que está ocupada
activeInterfacesFile = open('/tmp/activeInterfaces', 'a+')
activeInterfacesFile.write(self.ethernetInstance.localInterface + '\n')
activeInterfacesFile.close()
# Obtenemos la dirección IP local asignada estáticamente o por DHCP
commandToExecute = 'ip addr show ' + self.ethernetInstance.localInterface + ' | grep inet'
localIPAddress = os.popen(commandToExecute).readline().split()[1].split('/')[0]
# Si no se produce ningún error durante la configuración, ponemos a la IP a escuchar
if self.ethernetInstance.connect(localIPAddress):
ethernetThread = threading.Thread(target = self.ethernetInstance.receive, name = ethernetThreadName)
ethernetInfo = self.ethernetInstance.localInterface + ' - ' + self.ethernetInstance.localIPAddress
logger.write('INFO', '[ETHERNET] Listo para usarse (' + ethernetInfo + ').')
ethernetThread.start()
return True
# Si se produce un error durante la configuración, devolvemos 'False'
else:
return False
# El patrón coincidente es igual a la interfaz de la instancia
elif matchedPattern.group() == self.ethernetInstance.localInterface:
# Si no se produjo ningún error durante la configuración, devolvemos 'True'
if self.ethernetInstance.successfulConnection:
return True
# Entonces significa que hubo un error, devolvemos 'False'
else:
return False
# El patrón coincidente está siendo usado pero no es igual a la interfaz de la instancia
else:
continue
# No se encontró coincidencia en la iteración actual, entonces seguimos buscando
else:
continue
# Si anteriormente hubo un intento de 'connect()' con o sin éxito, debemos limpiar la interfaz
if self.ethernetInstance.localInterface is not None:
localInterface = self.ethernetInstance.localInterface
# Limpiamos todos los campos del objeto NETWORK
self.ethernetInstance.successfulConnection = None
self.ethernetInstance.localInterface = None
self.ethernetInstance.localIPAddress = None
self.ethernetInstance.isActive = False
# Eliminamos del archivo la interfaz de red usada
dataToWrite = open('/tmp/activeInterfaces').read().replace(localInterface + '\n', '')
activeInterfacesFile = open('/tmp/activeInterfaces', 'w')
activeInterfacesFile.write(dataToWrite)
activeInterfacesFile.close()
return False
def verifyBluetoothConnection(self):
activeInterfacesList = open('/tmp/activeInterfaces', 'a+').read()
# Ejemplo de bluetoothDevices: ['Devices:\n', '\thci0\t00:24:7E:64:7B:4A\n']
bluetoothDevices = os.popen('hcitool dev').readlines()
# Sacamos el primer elemento por izquierda ('Devices:\n')
bluetoothDevices.pop(0)
for btDevice in bluetoothDevices:
# Ejemplo de btDevice: \thci0\t00:24:7E:64:7B:4A\n
btInterface = btDevice.split('\t')[1]
btAddress = btDevice.split('\t')[2].replace('\n', '')
# La interfaz encontrada no está siendo usada y la instancia no está activa (habrá que habilitarla)
if btInterface not in activeInterfacesList and self.bluetoothInstance.localInterface is None:
# Obtenemos la interfaz encontrada
self.bluetoothInstance.localInterface = btInterface
# Escribimos en nuestro archivo la interfaz, para indicar que está ocupada
activeInterfacesFile = open('/tmp/activeInterfaces', 'a+')
activeInterfacesFile.write(btInterface + '\n')
activeInterfacesFile.close()
# Si no se produce ningún error durante la configuración, ponemos a la MAC a escuchar
if self.bluetoothInstance.connect(btAddress):
bluetoothThread = threading.Thread(target = self.bluetoothInstance.receive, name = bluetoothThreadName)
bluetoothInfo = self.bluetoothInstance.localInterface + ' - ' + self.bluetoothInstance.localMACAddress
logger.write('INFO', '[BLUETOOTH] Listo para usarse (' + bluetoothInfo + ').')
bluetoothThread.start()
return True
# Si se produce un error durante la configuración, devolvemos 'False'
else:
return False
# La interfaz encontrada es igual a la interfaz de la instancia
elif btInterface == self.bluetoothInstance.localInterface:
# Si no se produjo ningún error durante la configuración, devolvemos 'True'
if self.bluetoothInstance.successfulConnection:
return True
# Entonces significa que hubo un error, devolvemos 'False'
else:
return False
# La interfaz encontrada está siendo usado pero no es | |
import torch
import torch.nn as nn
import torch.nn.functional as F
import random
from .neural_blocks import (
SkipConnMLP, UpdateOperator, FourierEncoder, PositionalEncoder, NNEncoder
)
from .utils import (
dir_to_elev_azim, autograd, sample_random_hemisphere, laplace_cdf, load_sigmoid,
)
import src.refl as refl
from .renderers import ( load_occlusion_kind, direct )
import src.march as march
@torch.jit.script
def cumuprod_exclusive(t):
cp = torch.cumprod(t, dim=0)
cp = torch.roll(cp, 1, dims=0)
cp[0, ...] = 1.0
return cp
<EMAIL> # cannot jit script cause of tensordot :)
def compute_pts_ts(
rays, near, far, steps, lindisp=False,
perturb: float = 0,
):
r_o, r_d = rays.split([3,3], dim=-1)
device = r_o.device
if lindisp:
t_vals = torch.linspace(0, 1, steps, device=device, dtype=r_o.dtype)
ts = 1/(1/max(near, 1e-10) * (1-t_vals) + 1/far * (t_vals))
else:
ts = torch.linspace(near, far, steps=steps, device=device, dtype=r_o.dtype)
if perturb > 0:
mids = 0.5 * (ts[:-1] + ts[1:])
lower = torch.cat([mids, ts[-1:]])
upper = torch.cat([ts[:1], mids])
rand = torch.rand_like(lower) * perturb
ts = lower + (upper - lower) * rand
pts = r_o.unsqueeze(0) + torch.tensordot(ts, r_d, dims = 0)
return pts, ts, r_o, r_d
# given a set of densities, and distances between the densities,
# compute alphas from them.
<EMAIL>
def alpha_from_density(
density, ts, r_d,
softplus: bool = True,
):
device=density.device
if softplus: sigma_a = F.softplus(density-1)
else: sigma_a = F.relu(density)
end_val = torch.full_like(ts[..., :1], 1e10)
dists = torch.cat([ts[..., 1:] - ts[..., :-1], end_val], dim=-1)
while len(dists.shape) < 4: dists = dists[..., None]
dists = dists * torch.linalg.norm(r_d, dim=-1)
alpha = 1 - torch.exp(-sigma_a * dists)
weights = alpha * cumuprod_exclusive(1.0 - alpha + 1e-10)
return alpha, weights
# TODO delete these for utils
# sigmoids which shrink or expand the total range to prevent gradient vanishing,
# or prevent it from representing full density items.
# fat sigmoid has no vanishing gradient, but thin sigmoid leads to better outlines.
def fat_sigmoid(v, eps: float = 1e-3): return v.sigmoid() * (1+2*eps) - eps
def thin_sigmoid(v, eps: float = 1e-2): return fat_sigmoid(v, -eps)
def cyclic_sigmoid(v, eps:float=-1e-2,period:int=5):
return ((v/period).sin()+1)/2 * (1+2*eps) - eps
# perform volumetric integration of density with some other quantity
# returns the integrated 2nd value over density at timesteps.
@torch.jit.script
def volumetric_integrate(weights, other):
return torch.sum(weights[..., None] * other, dim=0)
# perform volumetric integration but only using some of other's values where the weights
# are big enough.
#
# TODO the computation of `other` itself should be sparse, so that it doesn't need to be
# computed in the first place.
@torch.jit.script
def sparse_volumetric_integrate(weights, other, eps:float=1e-3):
vals = torch.full_like(other, 1e-3)
mask = weights > 1e-3
vals[mask] = other[mask]
return torch.sum(weights[..., None] * vals, dim=0)
# bg functions, need to be here for pickling
def black(_elaz_r_d, _weights): return 0
def white(_, weights): 1-weights.sum(dim=0).unsqueeze(-1)
# having a random color will probably help prevent any background
def random_color(_elaz_r_d, weights):
# TODO need to think this through more
# This will make it so that there never is a background.
summed = (1-weights.sum(dim=0).unsqueeze(-1))
return torch.rand_like(summed) * summed
class CommonNeRF(nn.Module):
def __init__(
self,
steps: int = 64,
#out_features: int = 3, # 3 is for RGB
t_near: float = 0,
t_far: float = 1,
density_std: float = 0.01,
noise_std: int = 1e-2,
mip = None,
instance_latent_size: int = 0,
per_pixel_latent_size: int = 0,
per_point_latent_size: int = 0,
sigmoid_kind: str = "thin",
bg: str = "black",
record_depth: bool = False,
device="cuda",
):
super().__init__()
self.empty_latent = torch.zeros(1,1,1,1,0, device=device, dtype=torch.float)
self.t_near = t_near
self.t_far = t_far
self.steps = steps
self.mip = mip
self.per_pixel_latent_size = per_pixel_latent_size
self.per_pixel_latent = None
self.instance_latent_size = instance_latent_size
self.instance_latent = None
self.per_pt_latent_size = per_point_latent_size
self.per_pt_latent = None
self.alpha = None
self.noise_std = 0.2
# TODO add activation for using sigmoid or fat sigmoid
self.set_bg(bg)
self.set_sigmoid(sigmoid_kind)
self.record_depth = record_depth
self.depth = None
def forward(self, _x): raise NotImplementedError()
def set_bg(self, bg="black"):
if bg == "black":
self.sky_color = black
elif bg == "white":
self.sky_color = white
elif bg == "mlp":
self.sky_mlp = SkipConnMLP(
in_size=2, out=3, enc=NNEncoder(in_size=2,out=3),
num_layers=3, hidden_size=32, device=device, xavier_init=True,
)
self.sky_color = self.sky_from_mlp
elif bg == "random":
self.sky_color = random_color
else:
raise NotImplementedError(f"Unexpected bg: {bg}")
def set_sigmoid(self, kind="thin"): self.feat_act = load_sigmoid(kind)
def sky_from_mlp(self, elaz_r_d, weights):
return (1-weights.sum(dim=0)).unsqueeze(-1) * fat_sigmoid(self.sky_mlp(elaz_r_d))
def total_latent_size(self) -> int:
return self.mip_size() + \
self.per_pixel_latent_size + \
self.instance_latent_size + \
self.per_pt_latent_size
def set_per_pt_latent(self, latent):
assert(latent.shape[-1] == self.per_pt_latent_size), \
f"expected latent in [T, B, H, W, L={self.per_pixel_latent_size}], got {latent.shape}"
assert(len(latent.shape) == 5), \
f"expected latent in [T, B, H, W, L], got {latent.shape}"
self.per_pt_latent = latent
def set_per_pixel_latent(self, latent):
assert(latent.shape[-1] == self.per_pixel_latent_size), \
f"expected latent in [B, H, W, L={self.per_pixel_latent_size}], got {latent.shape}"
assert(len(latent.shape) == 4), \
f"expected latent in [B, H, W, L], got {latent.shape}"
self.per_pixel_latent = latent
def set_instance_latent(self, latent):
assert(latent.shape[-1] == self.instance_latent_size), "expected latent in [B, L]"
assert(len(latent.shape) == 2), "expected latent in [B, L]"
self.instance_latent = latent
# produces a segmentation mask of sorts, using the alpha for occupancy.
def acc(self): return self.alpha.max(dim=0)[0]
def acc_smooth(self): return self.weights.sum(dim=0).unsqueeze(-1)
def set_refl(self, refl):
if hasattr(self, "refl"): self.refl = refl
def depths(self, depths):
with torch.no_grad():
return volumetric_integrate(self.alpha, depths[..., None, None, None])
@property
def nerf(self): return self
def mip_size(self): return 0 if self.mip is None else self.mip.size() * 6
def mip_encoding(self, r_o, r_d, ts):
if self.mip is None: return None
end_val = torch.tensor([1e10], device=ts.device, dtype=ts.dtype)
ts = torch.cat([ts, end_val], dim=-1)
return self.mip(r_o, r_d, ts[..., :-1], ts[..., 1:])
# gets the current latent vector for this NeRF instance
def curr_latent(self, pts_shape) -> ["T", "B", "H", "W", "L_pp + L_inst"]:
curr = self.empty_latent.expand(pts_shape[:-1] + (0,)) if self.per_pt_latent is None \
else self.per_pt_latent
if self.per_pixel_latent is not None:
ppl = self.per_pixel_latent[None, ...].expand(pts.shape[:-1] + (-1,))
curr = torch.cat([curr, ppl], dim=-1)
if self.instance_latent is not None:
il = self.instance_latent[None, :, None, None, :].expand_as(pts.shape[:-1] + (-1,))
curr = torch.cat([curr, il], dim=-1)
return curr
class TinyNeRF(CommonNeRF):
# No frills, single MLP NeRF
def __init__(
self,
out_features: int = 3,
device="cuda",
**kwargs,
):
super().__init__(**kwargs, device=device)
self.estim = SkipConnMLP(
in_size=3, out=1 + out_features,
latent_size = self.total_latent_size(),
num_layers=6, hidden_size=128,
xavier_init=True,
)
def forward(self, rays):
pts, ts, r_o, r_d = compute_pts_ts(
rays, self.t_near, self.t_far, self.steps,
perturb = 1 if self.training else 0,
)
self.ts = ts
return self.from_pts(pts, ts, r_o, r_d)
def from_pts(self, pts, ts, r_o, r_d):
latent = self.curr_latent(pts.shape)
mip_enc = self.mip_encoding(r_o, r_d, ts)
if mip_enc is not None: latent = torch.cat([latent, mip_enc], dim=-1)
density, feats = self.estim(pts, latent).split([1, 3], dim=-1)
self.alpha, self.weights = alpha_from_density(density, ts, r_d)
return volumetric_integrate(self.weights, self.feat_act(feats)) + \
self.sky_color(None, self.weights)
# A plain old nerf
class PlainNeRF(CommonNeRF):
def __init__(
self,
intermediate_size: int = 32,
out_features: int = 3,
device: torch.device = "cuda",
**kwargs,
):
super().__init__(**kwargs, device=device)
self.latent_size = self.total_latent_size()
self.first = SkipConnMLP(
in_size=3, out=1 + intermediate_size, latent_size=self.latent_size,
enc=FourierEncoder(input_dims=3, device=device),
num_layers = 6, hidden_size = 128, xavier_init=True,
)
self.refl = refl.View(
out_features=out_features,
latent_size=self.latent_size+intermediate_size,
)
def forward(self, rays):
pts, ts, r_o, r_d = compute_pts_ts(
rays, self.t_near, self.t_far, self.steps, perturb = 1 if self.training else 0,
)
self.ts = ts
return self.from_pts(pts, ts, r_o, r_d)
def from_pts(self, pts, ts, r_o, r_d):
latent = self.curr_latent(pts.shape)
mip_enc = self.mip_encoding(r_o, r_d, ts)
# If there is a mip encoding, stack it with the latent encoding.
if mip_enc is not None: latent = torch.cat([latent, mip_enc], dim=-1)
first_out = self.first(pts, latent if latent.shape[-1] != 0 else None)
density = first_out[..., 0]
if self.training and self.noise_std > 0:
density = density + torch.randn_like(density) * self.noise_std
intermediate = first_out[..., 1:]
#n = None
#if self.refl.can_use_normal: n = autograd(pts, density)
view = r_d[None, ...].expand_as(pts)
rgb = self.refl(
x=pts, view=view,
latent=torch.cat([latent, intermediate], dim=-1),
)
self.alpha, self.weights = alpha_from_density(density, ts, r_d)
return volumetric_integrate(self.weights, rgb) + self.sky_color(view, self.weights)
# NeRF with a thin middle layer, for encoding information
class NeRFAE(CommonNeRF):
def __init__(
self,
intermediate_size: int = 32,
out_features: int = 3,
encoding_size: int = 32,
normalize_latent: bool = True,
device="cuda",
**kwargs,
):
super().__init__(**kwargs, device=device)
self.latent_size = self.total_latent_size()
self.encode = SkipConnMLP(
in_size=3, out=encoding_size,
latent_size=self.latent_size,
num_layers=5, hidden_size=128,
enc=FourierEncoder(input_dims=3, device=device),
xavier_init=True,
)
self.density_tform = SkipConnMLP(
in_size=encoding_size, out=1+intermediate_size, latent_size=0,
num_layers=5, hidden_size=64, xavier_init=True,
)
self.refl = refl.View(
out_features=out_features,
latent_size=encoding_size+intermediate_size,
)
self.encoding_size = encoding_size
self.regularize_latent = False
self.normalize_latent = normalize_latent
def set_regularize_latent(self):
self.regularize_latent = True
self.latent_l2_loss = 0
def forward(self, rays):
pts, ts, r_o, r_d = compute_pts_ts(
rays, self.t_near, self.t_far, self.steps,
perturb = 1 if self.training else 0,
)
self.ts = ts
return self.from_pts(pts, ts, r_o, r_d)
| |
<filename>nemo/collections/tts/helpers/common.py<gh_stars>0
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import ast
from typing import Tuple
import numpy as np
import torch
from torch import nn
from torch.cuda import amp
from torch.cuda.amp import autocast as autocast
from torch.nn import functional as F
from nemo.collections.tts.helpers.partialconv1d import PartialConv1d as pconv1d
from nemo.collections.tts.helpers.splines import (
piecewise_linear_inverse_transform,
piecewise_linear_transform,
unbounded_piecewise_quadratic_transform,
)
def update_params(config, params):
for param in params:
print(param)
k, v = param.split("=")
try:
v = ast.literal_eval(v)
except:
pass
k_split = k.split('.')
if len(k_split) > 1:
parent_k = k_split[0]
cur_param = ['.'.join(k_split[1:]) + "=" + str(v)]
update_params(config[parent_k], cur_param)
elif k in config and len(k_split) == 1:
print(f"overriding {k} with {v}")
config[k] = v
else:
print("{}, {} params not updated".format(k, v))
def get_mask_from_lengths(lengths):
"""Constructs binary mask from a 1D torch tensor of input lengths
Args:
lengths (torch.tensor): 1D tensor
Returns:
mask (torch.tensor): num_sequences x max_length x 1 binary tensor
"""
max_len = torch.max(lengths).item()
ids = torch.arange(0, max_len, out=torch.cuda.LongTensor(max_len))
mask = (ids < lengths.unsqueeze(1)).bool()
return mask
@torch.jit.script
def fused_add_tanh_sigmoid_multiply(input_a, input_b):
t_act = torch.tanh(input_a)
s_act = torch.sigmoid(input_b)
acts = t_act * s_act
return acts
class ExponentialClass(torch.nn.Module):
def __init__(self):
super(ExponentialClass, self).__init__()
def forward(self, x):
return torch.exp(x)
class LinearNorm(torch.nn.Module):
def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
super(LinearNorm, self).__init__()
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
torch.nn.init.xavier_uniform_(self.linear_layer.weight, gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, x):
return self.linear_layer(x)
class ConvNorm(torch.nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=None,
dilation=1,
bias=True,
w_init_gain='linear',
use_partial_padding=False,
use_weight_norm=False,
):
super(ConvNorm, self).__init__()
if padding is None:
assert kernel_size % 2 == 1
padding = int(dilation * (kernel_size - 1) / 2)
self.kernel_size = kernel_size
self.dilation = dilation
self.use_partial_padding = use_partial_padding
self.use_weight_norm = use_weight_norm
conv_fn = torch.nn.Conv1d
if self.use_partial_padding:
conv_fn = pconv1d
self.conv = conv_fn(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
)
torch.nn.init.xavier_uniform_(self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain))
if self.use_weight_norm:
self.conv = nn.utils.weight_norm(self.conv)
def forward(self, signal, mask=None):
if self.use_partial_padding:
conv_signal = self.conv(signal, mask)
else:
conv_signal = self.conv(signal)
return conv_signal
class DenseLayer(nn.Module):
def __init__(self, in_dim=1024, sizes=[1024, 1024]):
super(DenseLayer, self).__init__()
in_sizes = [in_dim] + sizes[:-1]
self.layers = nn.ModuleList(
[LinearNorm(in_size, out_size, bias=True) for (in_size, out_size) in zip(in_sizes, sizes)]
)
def forward(self, x):
for linear in self.layers:
x = torch.tanh(linear(x))
return x
class LengthRegulator(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, dur):
output = []
for x_i, dur_i in zip(x, dur):
expanded = self.expand(x_i, dur_i)
output.append(expanded)
output = self.pad(output)
return output
def expand(self, x, dur):
output = []
for i, frame in enumerate(x):
expanded_len = int(dur[i] + 0.5)
expanded = frame.expand(expanded_len, -1)
output.append(expanded)
output = torch.cat(output, 0)
return output
def pad(self, x):
output = []
max_len = max([x[i].size(0) for i in range(len(x))])
for i, seq in enumerate(x):
padded = F.pad(seq, [0, 0, 0, max_len - seq.size(0)], 'constant', 0.0)
output.append(padded)
output = torch.stack(output)
return output
class ConvLSTMLinear(nn.Module):
def __init__(self, in_dim, out_dim, n_layers=2, n_channels=256, kernel_size=3, p_dropout=0.1):
super(ConvLSTMLinear, self).__init__()
self.out_dim = out_dim
self.dropout = nn.Dropout(p=p_dropout)
convolutions = []
for i in range(n_layers):
conv_layer = ConvNorm(
in_dim if i == 0 else n_channels,
n_channels,
kernel_size=kernel_size,
stride=1,
padding=int((kernel_size - 1) / 2),
dilation=1,
w_init_gain='relu',
)
conv_layer = torch.nn.utils.weight_norm(conv_layer.conv, name='weight')
convolutions.append(conv_layer)
self.convolutions = nn.ModuleList(convolutions)
self.bilstm = nn.LSTM(n_channels, int(n_channels // 2), 1, batch_first=True, bidirectional=True)
lstm_norm_fn_pntr = nn.utils.spectral_norm
self.bilstm = lstm_norm_fn_pntr(self.bilstm, 'weight_hh_l0')
self.bilstm = lstm_norm_fn_pntr(self.bilstm, 'weight_hh_l0_reverse')
self.dense = nn.Linear(n_channels, out_dim)
def run_padded_sequence(self, context, lens):
context_embedded = []
for b_ind in range(context.size()[0]): # TODO: speed up
curr_context = context[b_ind : b_ind + 1, :, : lens[b_ind]].clone()
for conv in self.convolutions:
curr_context = self.dropout(F.relu(conv(curr_context)))
context_embedded.append(curr_context[0].transpose(0, 1))
context = torch.nn.utils.rnn.pad_sequence(context_embedded, batch_first=True)
return context
def run_unsorted_inputs(self, fn, context, lens):
lens_sorted, ids_sorted = torch.sort(lens, descending=True)
unsort_ids = [0] * lens.size(0)
for i in range(len(ids_sorted)):
unsort_ids[ids_sorted[i]] = i
lens_sorted = lens_sorted.long().cpu()
context = context[ids_sorted]
context = nn.utils.rnn.pack_padded_sequence(context, lens_sorted, batch_first=True)
context = fn(context)[0]
context = nn.utils.rnn.pad_packed_sequence(context, batch_first=True)[0]
# map back to original indices
context = context[unsort_ids]
return context
def forward(self, context, lens):
if context.size()[0] > 1:
context = self.run_padded_sequence(context, lens)
else:
for conv in self.convolutions:
context = self.dropout(F.relu(conv(context)))
context = context.transpose(1, 2)
self.bilstm.flatten_parameters()
if lens is not None:
context = self.run_unsorted_inputs(self.bilstm, context, lens)
else:
context = self.bilstm(context)[0]
x_hat = self.dense(context).permute(0, 2, 1)
return x_hat
def infer(self, z, txt_enc, spk_emb):
x_hat = self.forward(txt_enc, spk_emb)['x_hat']
x_hat = self.feature_processing.denormalize(x_hat)
return x_hat
class Encoder(nn.Module):
"""Encoder module:
- Three 1-d convolution banks
- Bidirectional LSTM
"""
def __init__(
self,
encoder_n_convolutions=3,
encoder_embedding_dim=512,
encoder_kernel_size=5,
norm_fn=nn.BatchNorm1d,
lstm_norm_fn=None,
):
super(Encoder, self).__init__()
convolutions = []
for _ in range(encoder_n_convolutions):
conv_layer = nn.Sequential(
ConvNorm(
encoder_embedding_dim,
encoder_embedding_dim,
kernel_size=encoder_kernel_size,
stride=1,
padding=int((encoder_kernel_size - 1) / 2),
dilation=1,
w_init_gain='relu',
use_partial_padding=True,
),
norm_fn(encoder_embedding_dim, affine=True),
)
convolutions.append(conv_layer)
self.convolutions = nn.ModuleList(convolutions)
self.lstm = nn.LSTM(
encoder_embedding_dim, int(encoder_embedding_dim / 2), 1, batch_first=True, bidirectional=True
)
if lstm_norm_fn is not None:
if 'spectral' in lstm_norm_fn:
print("Applying spectral norm to text encoder LSTM")
lstm_norm_fn_pntr = torch.nn.utils.spectral_norm
elif 'weight' in lstm_norm_fn:
print("Applying weight norm to text encoder LSTM")
lstm_norm_fn_pntr = torch.nn.utils.weight_norm
self.lstm = lstm_norm_fn_pntr(self.lstm, 'weight_hh_l0')
self.lstm = lstm_norm_fn_pntr(self.lstm, 'weight_hh_l0_reverse')
@amp.autocast(False)
def forward(self, x, in_lens):
"""
Args:
x (torch.tensor): N x C x L padded input of text embeddings
in_lens (torch.tensor): 1D tensor of sequence lengths
"""
if x.size()[0] > 1:
x_embedded = []
for b_ind in range(x.size()[0]): # TODO: improve speed
curr_x = x[b_ind : b_ind + 1, :, : in_lens[b_ind]].clone()
for conv in self.convolutions:
curr_x = F.dropout(F.relu(conv(curr_x)), 0.5, self.training)
x_embedded.append(curr_x[0].transpose(0, 1))
x = torch.nn.utils.rnn.pad_sequence(x_embedded, batch_first=True)
else:
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x)), 0.5, self.training)
x = x.transpose(1, 2)
# recent amp change -- change in_lens to int
in_lens = in_lens.int().cpu()
# mikyas added this for RuntimeError: `lengths` array must be sorted in decreasing order when `enforce_sorted` is True. You can pass `enforce_sorted=False` to pack_padded_sequence and/or pack_sequence to sidestep this requirement if you do not need ONNX exportability.
in_lens = sorted(in_lens, reverse=True)
x = nn.utils.rnn.pack_padded_sequence(x, in_lens, batch_first=True)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True)
return outputs
@amp.autocast(False)
def infer(self, x):
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x)), 0.5, self.training)
x = x.transpose(1, 2)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
return outputs
class Invertible1x1ConvLUS(torch.nn.Module):
def __init__(self, c):
super(Invertible1x1ConvLUS, self).__init__()
# Sample a random orthonormal matrix to initialize weights
W = torch.qr(torch.FloatTensor(c, c).normal_())[0]
# Ensure determinant is 1.0 not -1.0
if torch.det(W) < 0:
W[:, 0] = -1 * W[:, 0]
p, lower, upper = torch.lu_unpack(*torch.lu(W))
self.register_buffer('p', p)
# diagonals of lower will always be 1s anyway
lower = torch.tril(lower, -1)
lower_diag = torch.diag(torch.eye(c, c))
self.register_buffer('lower_diag', lower_diag)
self.lower = nn.Parameter(lower)
self.upper_diag = nn.Parameter(torch.diag(upper))
self.upper = nn.Parameter(torch.triu(upper, 1))
@amp.autocast(False)
def forward(self, z, inverse=False):
U = torch.triu(self.upper, 1) + torch.diag(self.upper_diag)
L = torch.tril(self.lower, -1) + torch.diag(self.lower_diag)
W = torch.mm(self.p, torch.mm(L, U))
if inverse:
if not hasattr(self, 'W_inverse'):
# inverse computation
W_inverse = W.float().inverse()
if z.type() == 'torch.cuda.HalfTensor':
W_inverse = W_inverse.half()
self.W_inverse = W_inverse[..., None]
z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
return z
else:
W = W[..., None]
z = F.conv1d(z, W, bias=None, stride=1, padding=0)
log_det_W = torch.sum(torch.log(torch.abs(self.upper_diag)))
return z, log_det_W
class Invertible1x1Conv(torch.nn.Module):
"""
The layer outputs both the convolution, and the log determinant
of its weight matrix. If inverse=True it does convolution with
inverse
"""
def __init__(self, c):
super(Invertible1x1Conv, self).__init__()
self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0, bias=False)
# Sample a random orthonormal matrix to initialize weights
W = torch.qr(torch.FloatTensor(c, c).normal_())[0]
# Ensure determinant is 1.0 not -1.0
if torch.det(W) < 0:
W[:, 0] = -1 * W[:, 0]
W = W.view(c, c, 1)
self.conv.weight.data = W
def forward(self, z, inverse=False):
# DO NOT apply n_of_groups, as it doesn't account for padded sequences
W = self.conv.weight.squeeze()
if inverse:
if not hasattr(self, 'W_inverse'):
# Inverse computation
W_inverse = W.float().inverse()
if z.type() == 'torch.cuda.HalfTensor':
W_inverse = W_inverse.half()
self.W_inverse = W_inverse[..., None]
z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
return z
else:
# Forward computation
log_det_W = torch.logdet(W).clone()
z = self.conv(z)
return z, | |
machine_name: name of machine
Returns:
true if there has been a pole slip at machine
"""
var = "outofstep"
pole_var = self.result.loc[:, (machine_name, var)].values
pole_slip = False
if np.count_nonzero(pole_var) > 0:
pole_slip = True
return pole_slip
def get_rotor_angles_static(self, machine_names=None):
""" Get relative rotor angles from load flow simulations
Returns:
Initial relative rotor angles for all machines
"""
if machine_names is None:
machines = self.app.GetCalcRelevantObjects("*.ElmSym")
else:
machines = []
for machine_name in machine_names:
machine_object = self.app.GetCalcRelevantObjects(
machine_name + ".ElmSym"
)
machines.append(machine_object[0])
rotor_ang = []
phi_ref = 0
for m in machines:
if self.check_if_in_service(m.loc_name):
u_t = m.GetAttribute("n:u1:bus1")
i_t = m.GetAttribute("m:i1:bus1")
r_stator = m.typ_id.rstr
x_q = m.typ_id.xq
phi = np.arctan(u_t + i_t*(r_stator+x_q))*180/np.pi - 90
if self.is_ref(m.loc_name):
rotor_ang.append(0)
phi_ref = phi
else:
rotor_ang.append(phi-phi_ref-m.GetAttribute(
"n:phiurel:bus1"))
return rotor_ang
def get_initial_rotor_angles(self):
""" Get initial relative rotor angles
Returns:
Initial relative rotor angles for all machines
"""
var = "firel"
initial_ang = []
for name, gen in self.gens.items():
if gen.in_service:
pole_slip = self.result.loc[
0, (name, "outofstep")
] # always float
angle = self.result.loc[0, (name, var)] # .values
if type(angle) != type(pole_slip):
angle = angle.replace(",", ".")
angle = float(angle)
initial_ang.append(angle)
else:
initial_ang.append(0)
return initial_ang
# TODO, this mehtod should be generalised and a test made
def get_generator_voltage_angles(self, machine_names=None):
""" Get machine voltage angles
Returns:
Voltage angles for all machines
"""
if machine_names is None:
machines = self.app.GetCalcRelevantObjects("*.ElmSym")
else:
machines = []
for machine_name in machine_names:
machine_object = self.app.GetCalcRelevantObjects(
machine_name + ".ElmSym"
)
machines.append(machine_object[0])
initial_ang = []
for m in machines:
if self.check_if_in_service(m.loc_name):
initial_ang.append(m.GetAttribute("n:phiurel:bus1"))
else:
initial_ang.append(0)
return initial_ang
def get_machines_inertia_list(self):
"""
Function to get array of all machines inertias,'M', corresponding to
2HS/omega_0.
Returns:
List with machine name and corresponding inertia
"""
# generator types (ed up with H array)
omega_0 = 50
machine_list = self.app.GetCalcRelevantObjects("*.ElmSym")
machine_type = []
machine_name = []
# Identify the machine type
# (GENSAL - salient pole, or GENROU - round pole)
for machine in machine_list:
machine_type.append(machine.typ_id)
machine_name.append(machine.loc_name)
inertias = []
for machine in machine_type:
inertias.append(2 * machine.sgn * machine.h / omega_0)
inertia_list = np.column_stack([machine_name, inertias])
return inertia_list
def create_short_circuit(self, target, time, name):
"""Create a three phase short circuit.
Args:
target: Component to short.
time: Start time of the short circuit.
name: Name of the event.
"""
# Get the event folder
evt_folder = self.app.GetFromStudyCase("IntEvt")
# Get event name of events in folder
events = [i.loc_name for i in evt_folder.GetContents("*.EvtShc")]
# Delete existing events with the same name
if name in events:
self.delete_short_circuit(name)
# Create an empty short circuit event
evt_folder.CreateObject("EvtShc", name)
# Get the empty short circuit event
sc = evt_folder.GetContents(name + ".EvtShc")[0]
# Set time, target and type of short circuit
sc.time = time
sc.p_target = target.pf_object
sc.i_shc = 0
def delete_short_circuit(self, name):
"""Delete a short circuit event.
Args:
name: Name of the event.
"""
# Get the event folder
evt_folder = self.app.GetFromStudyCase("IntEvt")
# Find the short circuit and clear event to delete
sc = evt_folder.GetContents(name + ".EvtShc")
scc = evt_folder.GetContents(name + "_clear" + ".EvtShc")
if sc:
sc[0].Delete()
if scc:
scc[0].Delete()
def create_switch_event(self, target, time, name=None):
"""Create a switching event.
Args:
target: Component to switch.
time: When to switch
name: Name of the event.
comp: Object to create the event for
"""
if not name:
name = target.name + "_switch"
# Get the event folder
evt_folder = self.app.GetFromStudyCase("IntEvt")
# Get event name of events in folder
events = [i.loc_name for i in evt_folder.GetContents("*.EvtSwitch")]
# Delete existing events with the same name
if name in events:
self.delete_switch_event(name)
# Create an empty switch event
evt_folder.CreateObject("EvtSwitch", name)
# Get the empty switch event
sw = evt_folder.GetContents(name + ".EvtSwitch")[0]
# Set time, target and type of short circuit
sw.time = time
sw.p_target = target.pf_object
def delete_switch_event(self, name):
"""Delete a switch event.
Args:
name: Name of the event.
"""
# Get the event folder
evt_folder = self.app.GetFromStudyCase("IntEvt")
# Find the switch event and clear event to delete
sw = evt_folder.GetContents(name + ".EvtSwitch")
sww = evt_folder.GetContents(name + "_clear" + ".EvtSwitch")
if sw:
sw[0].Delete()
if sww:
sww[0].Delete()
def clear_all_events(self):
# Get the event folder
evt_folder = self.app.GetFromStudyCase("IntEvt")
# Get a list of all events
events = evt_folder.GetContents("*")
# Loop through all events and use the correct delete function
for e in events:
evt_name = e.loc_name
evt_class = e.GetClassName()
if evt_class == "EvtSwitch":
self.delete_short_circuit(evt_name)
elif evt_class == "EvtShc":
if evt_name.split("-")[0] == "trip":
self.delete_trip_line_event(evt_name)
else:
self.delete_switch_event(evt_name)
def get_events(self):
""" Return a list of events """
evt_folder = self.app.GetFromStudyCase("IntEvt")
events = [i.loc_name for i in evt_folder.GetContents()]
return events
def get_output_window_content(self):
"""Returns the messages from the power factory output window."""
return self.window.GetContent()
def clear_output_window(self):
"""Clears the output window."""
self.window.Clear()
def run_load_flow(self, balanced=0, power_control=0, slack=0):
"""Method for running a load flow.
Args:
balanced:
0: Three phase balanced load flow.
1: Three phase unbalanced load flow.
2: DC load flow.
power_control:
0: As dispatched
1: According to secondary control
2: According to primary control
3: According to inertias
slack: This is only relevant if power_control is 0
0: By reference machine
1: By load at reference bus
2: By static generator at reference bus
3: By loads
4: By synchronous generators
5: By synchronous generators and static generators
"""
self.ldf.ipot_net = balanced
self.ldf.iopt_aptdist = power_control
self.ldf.iPbalancing = slack
return self.ldf.Execute()
def set_element_OPF_attr(
self, attr, element_type, relative_attr={"Pmin_uc": "P_max", "Pmax_uc": "P_max"}
):
""" Set attributes of element in optimal power flow
Args:
attribute (str)
element_type (str) e.g. *.ElmSym for all generators
"""
for elm in self.app.GetCalcRelevantObjects(element_type):
for k, v in attr.items():
if k in relative_attr.keys():
base_val = getattr(elm, relative_attr[k])
v_mod = np.array(v) * base_val
setattr(elm, k, v_mod.tolist())
else:
setattr(elm, k, v)
def set_generator_OPF_cost(self, cost_dict):
""" Set generator cost attributes for optimal power flow
Args:
cost_segments: double dict
key 1: generator names,
dict 2: ccost: list of segment cost_data
cpower: list of segment power
iInterPol: int
0: spline
1: piecewiselinear
2: polynomial
3: hermine
penaltyCost: float
fixedCost: float
"""
for cf, cost_data in cost_dict.items():
if len(cost_data["ccost"]) != len(cost_data["cpower"]):
print("Number of segments for cost and power is not equal!")
gen_set = cost_data["generators"]
for gen_name in gen_set:
relative_attr = ["ccost", "cpower"]
gen = self.app.GetCalcRelevantObjects(gen_name + ".ElmSym")[0]
for k, v in cost_data.items():
if k == "generators":
continue
if k in relative_attr:
v_mod = np.array(v) * gen.P_max
setattr(gen, k, v_mod.tolist())
continue
setattr(gen, k, v)
def run_OPF(self, power_flow=0, obj_function='cst', attributes={}):
"""Method for running optimal power flow
Args:
power_flow:
0: AC optimization (interior point method)
1: DC optimization (linear programming (LP))
2: Contingency constrained DC optimization (LP))
obj_function:
los: Minimization of losses (total)
slo: Minimization of losses (selection)
cst: Minimization of cost
shd: Minimization of load shedding
rpr: Maximization of reactive power reserve
dev: Minimization of control variable deviations
Kwargs:
Controls (boolean):
iopt_pd: Generator active power dispatch
iopt_qd: Generator/SVS reactive power dispatch
iopt_trf: Transformer tap positions
iopt_sht: Switchable shunts
iopt_genP: Active power limits of generators
iopt_genQ: Reactive power limits of generators/SVS
iopt_brnch: Branch flow limits (max. loading)
iopt_bus: Voltage limits of busbars/terminals
iopt_add: Boundary flow limits
Soft constraints (boolean):
penaltySoftConstr: Penalty factor for soft constraints (float)
isForceSoftPLims: Enforce soft active power limits of
generators
isForceSoftQLims: Enforce soft reactive power limits of
generators/SVS
isForceSoftLoadingLims: Enforce soft branch flow limits
(max. loading)
isForceSoftVoltageLims: Enforce soft voltage limits of
busbars/terminal
"""
if not hasattr(self, "opf"):
self.opf = self.app.GetFromStudyCase("ComOpf")
self.opf.ipopt_ACDC = power_flow
self.opf.iopt_obj = obj_function
for k, v in attributes.items():
setattr(self.opf, k, v)
return self.opf.Execute()
def get_OPF_results(self):
opf_res = {}
gens = self.app.GetCalcRelevantObjects("*.ElmSym")
gen_var = ["c:avgCosts", "c:Pdisp", "c:cst_disp"]
for gen in gens:
gen_name = gen.GetFullName().split("\\")[-1].split(".")[0]
opf_res[gen_name] = {i.split(":")[1]: gen.GetAttribute(i)
for i in gen_var}
loads = self.app.GetCalcRelevantObjects("*.ElmLod")
load_var = ["m:P:bus1", "c:Pmism"]
for load in loads:
load_name = load.GetFullName().split("\\")[-1].split(".")[0]
opf_res[load_name] = {
i.split(":")[1]: load.GetAttribute(i) for i in load_var
}
lines = self.app.GetCalcRelevantObjects("*.ElmLne")
line_var = ["m:P:bus1", "c:loading"]
for line in lines:
if not line.outserv:
line_name = line.GetFullName().split('\\')[-1].split('.')[0]
opf_res[line_name] = {
i.split(':')[1]: line.GetAttribute(i) for i in | |
<reponame>mehak151/unfurl
#!/usr/bin/env python
# Copyright (c) 2020 <NAME>
# SPDX-License-Identifier: MIT
"""
Applies a Unfurl ensemble
For each configuration, run it if required, then record the result
"""
from __future__ import print_function
from .job import runJob
from .support import Status
from . import __version__, initLogging, getHomeConfigPath, DefaultNames
from . import init as initmod
from .util import filterEnv, getPackageDigest
from .localenv import LocalEnv, Project
import click
import sys
import os
import os.path
import traceback
import logging
import functools
import subprocess
import shlex
import json
_latestJobs = [] # for testing
_args = [] # for testing
def option_group(*options):
return lambda func: functools.reduce(lambda a, b: b(a), options, func)
@click.group()
@click.pass_context
@click.option(
"--home",
envvar="UNFURL_HOME",
type=click.Path(exists=False),
help="Path to .unfurl_home",
)
@click.option("--runtime", envvar="UNFURL_RUNTIME", help="use this runtime")
@click.option(
"--no-runtime",
envvar="UNFURL_NORUNTIME",
default=False,
is_flag=True,
help="ignore runtime settings",
)
@click.option("-v", "--verbose", count=True, help="verbose mode (-vvv for more)")
@click.option(
"-q",
"--quiet",
default=False,
is_flag=True,
help="Only output errors to the stdout",
)
@click.option("--logfile", default=None, help="Log messages to file (at DEBUG level)")
@click.option(
"--tmp",
envvar="UNFURL_TMPDIR",
type=click.Path(exists=True),
help="Directory for saving temporary files",
)
@click.option("--loglevel", envvar="UNFURL_LOGGING", help="log level (overrides -v)")
def cli(ctx, verbose=0, quiet=False, logfile=None, loglevel=None, tmp=None, **kw):
# ensure that ctx.obj exists and is a dict (in case `cli()` is called
# by means other than the `if` block below
ctx.ensure_object(dict)
ctx.obj.update(kw)
if tmp is not None:
os.environ["UNFURL_TMPDIR"] = tmp
levels = [logging.INFO, 15, logging.DEBUG, 5, 5]
if quiet:
effectiveLogLevel = logging.CRITICAL
else:
# TRACE (5)
effectiveLogLevel = levels[min(verbose, 3)]
if loglevel: # UNFURL_LOGGING overrides command line -v
effectiveLogLevel = dict(CRITICAL=50, ERROR=40, WARNING=30, INFO=20, DEBUG=10)[
loglevel.upper()
]
# verbose: 0 == INFO, -1 == CRITICAL, >= 1 == DEBUG
if effectiveLogLevel == logging.CRITICAL:
verbose = -1
elif effectiveLogLevel >= logging.INFO:
verbose = 0
elif effectiveLogLevel == 15:
effectiveLogLevel = logging.DEBUG # XXX add logging.VERBOSE and start using it
verbose = 1
else: # must be DEBUG
verbose = 2
ctx.obj["verbose"] = verbose
initLogging(effectiveLogLevel, logfile)
jobControlOptions = option_group(
click.option(
"--dryrun",
default=False,
is_flag=True,
help="Do not modify anything, just do a dry run.",
),
click.option(
"--commit",
default=False,
is_flag=True,
help="Commit modified files to the instance repository. (Default: false)",
),
click.option(
"--dirty",
type=click.Choice(["abort", "ok", "auto"]),
default="auto",
help="Action if there are uncommitted changes before run. (Default: auto)",
),
click.option("-m", "--message", help="commit message to use"),
click.option(
"--jobexitcode",
type=click.Choice(["error", "degraded", "never"]),
default="never",
help="Set exit code to 1 if job status is not ok.",
),
)
commonJobFilterOptions = option_group(
click.option("--template", help="TOSCA template to target"),
click.option("--instance", help="instance name to target"),
click.option("--query", help="Run the given expression upon job completion"),
click.option("--trace", default=0, help="Set the query's trace level"),
click.option(
"--output",
type=click.Choice(["text", "json", "none"]),
default="text",
help="How to print summary of job run",
),
click.option("--starttime", help="Set the start time of the job."),
click.option(
"--destroyunmanaged",
default=False,
is_flag=True,
help="include unmanaged instances for consideration when destroying",
),
)
@cli.command(short_help="Run and record an ad-hoc command")
@click.pass_context
# @click.argument("action", default="*:upgrade")
@click.option("--ensemble", default="", type=click.Path(exists=False))
# XXX:
# @click.option(
# "--append", default=False, is_flag=True, help="add this command to the previous"
# )
# @click.option(
# "--replace", default=False, is_flag=True, help="replace the previous command"
# )
@jobControlOptions
@commonJobFilterOptions
@click.option("--host", help="host to run the command on")
@click.option("--operation", help="TOSCA operation to run")
@click.option("--module", help="ansible module to run (default: command)")
@click.argument("cmdline", nargs=-1, type=click.UNPROCESSED)
def run(ctx, instance="root", cmdline=None, **options):
"""
Run an ad-hoc command in the context of the given ensemble.
Use "--" to separate the given command line, for example:
> unfurl run -- echo 'hello!'
If --host or --module is set, the ansible configurator will be used. e.g.:
> unfurl run --host=example.com -- echo 'hello!'
"""
options.update(ctx.obj)
options["instance"] = instance
options["cmdline"] = cmdline
return _run(options.pop("ensemble"), options, ctx.info_name)
def _getRuntime(options, ensemblePath):
runtime = options.get("runtime")
localEnv = None
if not runtime:
localEnv = LocalEnv(ensemblePath, options.get("home"))
runtime = localEnv.getRuntime()
return runtime, localEnv
def _run(ensemble, options, workflow=None):
if workflow:
options["workflow"] = workflow
if not options.get("no_runtime"):
runtime, localEnv = _getRuntime(options, ensemble)
if runtime and runtime != ".":
if not localEnv:
localEnv = LocalEnv(ensemble, options.get("home"))
return _runRemote(runtime, options, localEnv)
return _runLocal(ensemble, options)
def _venv(runtime, env):
if env is None:
env = os.environ.copy()
# see virtualenv activate
env.pop("PYTHONHOME", None) # unset if set
env["VIRTUAL_ENV"] = runtime
env["PATH"] = os.path.join(runtime, "bin") + os.pathsep + env.get("PATH", "")
return env
def _remoteCmd(runtime, cmdLine, localEnv):
context = localEnv.getContext()
kind, sep, rest = runtime.partition(":")
if context.get("environment"):
addOnly = kind == "docker"
env = filterEnv(localEnv.mapValue(context["environment"]), addOnly=addOnly)
else:
env = None
if kind == "venv":
pipfileLocation, sep, unfurlLocation = rest.partition(":")
return (
_venv(pipfileLocation, env),
["python", "-m", "unfurl", "--no-runtime"] + cmdLine,
False,
)
# elif docker: docker $container -it run $cmdline
else:
# treat as shell command
cmd = shlex.split(runtime)
return env, cmd + ["--no-runtime"] + cmdLine, True
def _runRemote(runtime, options, localEnv):
logger = logging.getLogger("unfurl")
logger.debug('running command remotely on "%s"', runtime)
cmdLine = _args or sys.argv[1:]
if _args:
print("TESTING: running remote with _args %s" % _args)
env, remote, shell = _remoteCmd(runtime, cmdLine, localEnv)
rv = subprocess.call(remote, env=env, shell=shell)
if options.get("standalone_mode") is False:
return rv
else:
sys.exit(rv)
def _runLocal(ensemble, options):
logger = logging.getLogger("unfurl")
job = runJob(ensemble, options)
_latestJobs.append(job)
if not job:
click.echo("Unable to create job")
elif job.unexpectedAbort:
click.echo("Job unexpected aborted")
if options.get("verbose", 0) > 0:
raise job.unexpectedAbort
else:
jsonSummary = {}
summary = options.get("output")
logger.debug(job.summary())
if summary == "text":
click.echo(job.summary())
elif summary == "json":
jsonSummary = job.jsonSummary()
query = options.get("query")
if query:
result = job.runQuery(query, options.get("trace"))
if summary == "json":
jsonSummary["query"] = query
jsonSummary["result"] = result
else:
click.echo("query: " + query)
click.echo(result)
if jsonSummary:
click.echo(json.dumps(jsonSummary))
if not job or (
"jobexitcode" in options
and options["jobexitcode"] != "never"
and Status[options["jobexitcode"]] <= job.status
):
if options.get("standalone_mode") is False:
return 1
else:
sys.exit(1)
else:
return 0
# XXX update help text sans "configurations"
deployFilterOptions = option_group(
click.option(
"--add", default=True, is_flag=True, help="run newly added configurations"
),
click.option(
"--update",
default=True,
is_flag=True,
help="run configurations that whose spec has changed but don't require a major version change",
),
click.option(
"--repair",
type=click.Choice(["error", "degraded", "missing", "none"]),
default="error",
help="re-run configurations that are in an error or degraded state",
),
click.option(
"--upgrade",
default=False,
is_flag=True,
help="run configurations with major version changes or whose spec has changed",
),
click.option(
"--force",
default=False,
is_flag=True,
help="(re)run operation regardless of instance's status or state",
),
click.option(
"--prune",
default=False,
is_flag=True,
help="destroy instances that are no longer used",
),
)
@cli.command()
@click.pass_context
@click.argument("ensemble", default="", type=click.Path(exists=False))
@commonJobFilterOptions
@deployFilterOptions
@jobControlOptions
def deploy(ctx, ensemble=None, **options):
"""
Deploy the given ensemble
"""
options.update(ctx.obj)
return _run(ensemble, options, ctx.info_name)
@cli.command(short_help="Check the status of each instance")
@click.pass_context
@click.argument("ensemble", default="", type=click.Path(exists=False))
@commonJobFilterOptions
@jobControlOptions
def check(ctx, ensemble=None, **options):
"""
Check and update the status of the ensemble's instances
"""
options.update(ctx.obj)
return _run(ensemble, options, ctx.info_name)
@cli.command(short_help="Run the discover workflow.")
@click.pass_context
@click.argument("ensemble", default="", type=click.Path(exists=False))
@commonJobFilterOptions
@jobControlOptions
def discover(ctx, ensemble=None, **options):
"""
Run the "discover" workflow which updates the ensemble's spec by probing its live instances.
"""
options.update(ctx.obj)
return _run(ensemble, options, ctx.info_name)
@cli.command()
@click.pass_context
@click.argument("ensemble", default="", type=click.Path(exists=False))
@commonJobFilterOptions
@jobControlOptions
def undeploy(ctx, ensemble=None, **options):
"""
Destroy what was deployed.
"""
options.update(ctx.obj)
return _run(ensemble, options, ctx.info_name)
@cli.command()
@click.pass_context
@click.argument("ensemble", default="", type=click.Path(exists=False))
@commonJobFilterOptions
@jobControlOptions
def stop(ctx, ensemble=None, **options):
"""
Stop running instances.
"""
options.update(ctx.obj)
return _run(ensemble, options, ctx.info_name)
@cli.command(short_help="Print the given deployment plan")
@click.pass_context
@click.argument("ensemble", default="", type=click.Path(exists=False))
@commonJobFilterOptions
@deployFilterOptions
@click.option("--workflow", default="deploy", help="plan workflow (default: deploy)")
def plan(ctx, ensemble=None, **options):
"Print the given deployment plan"
options.update(ctx.obj)
options["planOnly"] = True
# XXX show status and task to run including preview of generated templates, cmds to run etc.
return _run(ensemble, options)
@cli.command(short_help="Create a new unfurl project or ensemble")
@click.pass_context
@click.argument("projectdir", default="", type=click.Path(exists=False))
@click.option(
"--mono", default=False, is_flag=True, help="Create one repository for the project."
)
@click.option(
"--existing",
default=False,
is_flag=True,
help="Add project to nearest existing repository.",
)
@click.option(
"--submodule",
default=False,
is_flag=True,
help="Set the ensemble repository as a git submodule.",
)
@click.option(
"--empty", default=False, is_flag=True, help="Don't create a default ensemble."
)
@click.option(
"--template",
type=click.Path(exists=True),
help="Absolute path to a directory of project templates.",
)
def init(ctx, projectdir, **options):
"""
Create a new project or, if [project_dir] exists or is inside a project, create a new ensemble"""
options.update(ctx.obj)
if not projectdir:
# if adding a project to an existing repository use '.unfurl' as the default name
if options.get("existing"):
projectdir = DefaultNames.ProjectDirectory
else: # otherwise use the current directory
projectdir = "."
projectPath = Project.findPath(projectdir)
if projectPath:
# dest is already in a project, so create a new ensemble in it instead of a new project
projectPath = os.path.dirname(projectPath) # strip out unfurl.yaml
# if projectPath is deeper than projectDir (happens if it is .unfurl) set projectDir to that
if len(os.path.abspath(projectPath)) > len(os.path.abspath(projectdir)):
projectdir = projectPath
message = initmod.clone(projectPath, projectdir, **options)
click.echo(message)
return
if os.path.exists(projectdir):
if not os.path.isdir(projectdir):
raise click.ClickException(
'Can not create project in "'
+ projectdir
+ '": file already exists with that name'
)
elif os.listdir(projectdir):
raise click.ClickException(
'Can not create project in "' + projectdir + '": folder is not empty'
)
| |
"""
Base BZT classes
Copyright 2019 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import codecs
import logging
import hashlib
import os
import time
from collections import namedtuple
from bzt import TaurusConfigError
from bzt import ToolError
from bzt.utils import numeric_types, Environment, RequiredTool, PIPE, SoapUIScriptConverter
from bzt.utils import to_json, BetterDict, ensure_is_dict, dehumanize_time
from .templates import FileLister
from .dicts import Scenario
from .names import EXEC, SCENARIO
class EngineModule(object):
"""
Base class for any BZT engine module
:type engine: Engine
:type settings: BetterDict
"""
def __init__(self):
self.log = logging.getLogger('')
self.engine = None
self.settings = BetterDict()
self.parameters = BetterDict()
def prepare(self):
"""
Preparation stage, at which configuration is being read, configs
and tools being prepared. All long preparations and checks should be
made here, to make `startup` stage as fast as possible.
"""
pass
def startup(self):
"""
Startup should be as fast as possible. Launch background processes,
do some API calls for initiation of actual work. Consider making all
checks and preparations on `prepare` stage.
"""
pass
def check(self):
"""
Check if work should be finished
:rtype: bool
:return: True if should be finished
"""
return False
def shutdown(self):
"""
Stop all processes that were started in `startup` stage.
Should also be as fast as possible, deferring all long operations to
`post_process` stage.
"""
pass
def post_process(self):
"""
Do all possibly long analysis and processing on run results
"""
pass
def _should_run(self):
"""
Returns True if provisioning matches run-at
"""
prov = self.engine.config.get(Provisioning.PROV)
runat = self.parameters.get("run-at", None)
if runat is not None and prov != runat:
self.log.debug("Should not run because of non-matching prov: %s != %s", prov, runat)
return False
return True
class Provisioning(EngineModule):
"""
Base class for any provisioning type. Provisioning is the way to
get the resources that will run the job. For example, local provisoning
means using local machine to run executors, remote means using
remote machines with BZT API nodes on them.
:type executors: list[ScenarioExecutor]
"""
PROV = "provisioning"
def __init__(self):
super(Provisioning, self).__init__()
self.extend_configs = False
self.executors = []
self.disallow_empty_execution = True
def prepare(self):
"""
Preparation in provisioning begins with reading executions list
and instantiating ScenarioExecutor classes for them
"""
super(Provisioning, self).prepare()
exc = TaurusConfigError("No 'execution' is configured. Did you forget to pass config files?")
executions = self.engine.config.get(EXEC, [])
if not executions and self.disallow_empty_execution:
raise exc
for execution in executions:
instance = self.engine.instantiate_module(execution.get("executor"))
instance.provisioning = self
instance.execution = execution
self.executors.append(instance)
class Reporter(EngineModule):
"""
This type of modules is responsible for
in-test and post-test results analysis
"""
REP = "reporting"
def should_run(self):
return self._should_run()
class Service(EngineModule):
"""
This type of modules is responsible for
in-test and post-test results analysis
"""
SERV = "services"
def should_run(self):
return self._should_run()
class Aggregator(EngineModule):
def __init__(self, is_functional):
super(Aggregator, self).__init__()
self.is_functional = is_functional
@staticmethod
def converter(data):
return data
class ScenarioExecutor(EngineModule):
"""
:type provisioning: engine.Provisioning
:type execution: BetterDict
"""
EXEC = EXEC # backward compatibility
RAMP_UP = "ramp-up"
HOLD_FOR = "hold-for"
CONCURR = "concurrency"
THRPT = "throughput"
STEPS = "steps"
LOAD_FMT = namedtuple("LoadSpec", "concurrency throughput ramp_up hold iterations duration steps")
def __init__(self):
super(ScenarioExecutor, self).__init__()
self.env = Environment(log=self.log)
self.provisioning = None
self.execution = BetterDict() # FIXME: why have this field if we have `parameters` from base class?
self._cached_scenario = None
self.label = None
self.widget = None
self.reader = None
self.stdout = None
self.stderr = None
self.delay = None
self.start_time = None
self.preprocess_args = lambda x: None
def _get_tool(self, tool, **kwargs):
instance = tool(env=self.env, log=self.log, http_client=self.engine.get_http_client(), **kwargs)
assert isinstance(instance, RequiredTool)
return instance
def has_results(self):
if self.reader and self.reader.buffer:
return True
else:
return False
def get_script_path(self, required=False, scenario=None):
"""
:type required: bool
:type scenario: Scenario
"""
if scenario is None:
scenario = self.get_scenario()
if required:
exc = TaurusConfigError("You must provide script for %s" % self)
script = scenario.get(Scenario.SCRIPT, exc)
else:
script = scenario.get(Scenario.SCRIPT)
if script:
script = self.engine.find_file(script)
scenario[Scenario.SCRIPT] = script
return script
def get_scenario(self, name=None):
"""
Returns scenario dict, extract if scenario is inlined
:return: DictOfDicts
"""
if name is None and self._cached_scenario is not None:
return self._cached_scenario
scenarios = self.engine.config.get("scenarios", force_set=True)
label = self._get_scenario_label(name, scenarios)
exc = TaurusConfigError("Scenario '%s' not found in scenarios: %s" % (label, scenarios.keys()))
scenario_dict = scenarios.get(label, exc)
scenario_obj = Scenario(self.engine, scenario_dict)
if self.engine.provisioning.extend_configs:
script = self.get_script_path(required=False, scenario=scenario_dict)
if script and script.lower().endswith('xml'):
script_content = ''
try:
with codecs.open(script, encoding="UTF-8") as fds:
script_content = fds.read()
except UnicodeDecodeError:
pass
if "con:soapui-project" in script_content:
scenario_obj = self._convert_soap_scenario(scenario_obj, script)
if name is None:
self._cached_scenario = scenario_obj
return scenario_obj
def _convert_soap_scenario(self, scenario_obj, script):
self.log.info("SoapUI project detected")
new_scenario_name, scenario_dict = self._extract_scenario_from_soapui(scenario_obj, script)
self.engine.config["scenarios"].merge({new_scenario_name: scenario_dict})
prev_scenario_name = self.execution["scenario"]
self.execution["scenario"] = new_scenario_name
for execution in self.engine.config.get(EXEC):
if execution.get(SCENARIO) == prev_scenario_name:
execution[SCENARIO] = new_scenario_name
return Scenario(self.engine, scenario_dict)
def _get_scenario_label(self, name, scenarios):
if name is None: # get current scenario
exc = TaurusConfigError("Scenario is not found in execution: %s" % self.execution)
label = self.execution.get('scenario', exc)
is_script = isinstance(label, str) and label not in scenarios and \
os.path.exists(self.engine.find_file(label))
if isinstance(label, list):
msg = "Invalid content of scenario, list type instead of dict or string: %s"
raise TaurusConfigError(msg % label)
if isinstance(label, dict) or is_script:
self.log.debug("Extract %s into scenarios" % label)
if isinstance(label, str):
scenario = BetterDict.from_dict({Scenario.SCRIPT: label})
else:
scenario = label
path = self.get_script_path(scenario=Scenario(self.engine, scenario))
if path:
label = os.path.basename(path)
if not path or label in scenarios:
hash_str = str(hashlib.md5(to_json(scenario).encode()).hexdigest())
label = 'autogenerated_' + hash_str[-10:]
scenarios[label] = scenario
self.execution['scenario'] = label
self.label = label
else: # get scenario by name
label = name
return label
def _extract_scenario_from_soapui(self, base_scenario, script_path):
test_case = base_scenario.get("test-case", None)
converter = SoapUIScriptConverter(self.log)
conv_config = converter.convert_script(script_path)
conv_scenarios = conv_config["scenarios"]
scenario_name, conv_scenario = converter.find_soapui_test_case(test_case, conv_scenarios)
new_name = scenario_name
counter = 1
while new_name in self.engine.config["scenarios"]:
new_name = scenario_name + ("-%s" % counter)
counter += 1
if new_name != scenario_name:
self.log.info("Scenario name '%s' is already taken, renaming to '%s'", scenario_name, new_name)
scenario_name = new_name
merged_scenario = BetterDict.from_dict(conv_scenario)
merged_scenario.merge(base_scenario.data)
for field in [Scenario.SCRIPT, "test-case"]:
if field in merged_scenario:
merged_scenario.pop(field)
return scenario_name, merged_scenario
def get_raw_load(self):
prov_type = self.engine.config.get(Provisioning.PROV)
for param in (ScenarioExecutor.THRPT, ScenarioExecutor.CONCURR):
ensure_is_dict(self.execution, param, prov_type)
throughput = self.execution.get(ScenarioExecutor.THRPT).get(prov_type, None)
concurrency = self.execution.get(ScenarioExecutor.CONCURR).get(prov_type, None)
iterations = self.execution.get("iterations", None)
steps = self.execution.get(ScenarioExecutor.STEPS, None)
hold = self.execution.get(ScenarioExecutor.HOLD_FOR, None)
ramp_up = self.execution.get(ScenarioExecutor.RAMP_UP, None)
return self.LOAD_FMT(concurrency=concurrency, ramp_up=ramp_up, throughput=throughput, hold=hold,
iterations=iterations, duration=None, steps=steps)
def get_load(self):
"""
Helper method to read load specification
"""
def eval_int(value):
try:
return int(value)
except (ValueError, TypeError):
return value
def eval_float(value):
try:
return int(value)
except (ValueError, TypeError):
return value
raw_load = self.get_raw_load()
iterations = eval_int(raw_load.iterations)
ramp_up = raw_load.ramp_up
throughput = eval_float(raw_load.throughput or 0)
concurrency = eval_int(raw_load.concurrency or 0)
steps = eval_int(raw_load.steps)
hold = dehumanize_time(raw_load.hold or 0)
if ramp_up is None:
duration = hold
else:
ramp_up = dehumanize_time(raw_load.ramp_up)
duration = hold + ramp_up
if not iterations:
if duration:
iterations = 0 # infinite
else:
iterations = 1
msg = ''
if not isinstance(concurrency, numeric_types + (type(None),)):
msg += "Invalid concurrency value[%s]: %s " % (type(concurrency).__name__, concurrency)
if not isinstance(throughput, numeric_types + (type(None),)):
msg += "Invalid throughput value[%s]: %s " % (type(throughput).__name__, throughput)
if not isinstance(steps, numeric_types + (type(None),)):
msg += "Invalid throughput value[%s]: %s " % (type(steps).__name__, steps)
if not isinstance(iterations, numeric_types + (type(None),)):
msg += "Invalid throughput value[%s]: %s " % (type(iterations).__name__, iterations)
if msg:
raise TaurusConfigError(msg)
return self.LOAD_FMT(concurrency=concurrency, ramp_up=ramp_up, throughput=throughput, hold=hold,
iterations=iterations, duration=duration, steps=steps)
def get_resource_files(self):
files_list = []
if isinstance(self, FileLister):
files_list.extend(self.resource_files())
files_list.extend(self.execution.get("files", []))
return files_list
def __repr__(self):
return "%s/%s" % (self.execution.get("executor", None), self.label if self.label else id(self))
def prepare(self):
super(ScenarioExecutor, self).prepare()
self.env.set(self.execution.get("env"))
def _execute(self, args, **kwargs):
self.preprocess_args(args)
# for compatibility with other executors
kwargs["stdout"] = kwargs.get("stdout", self.stdout) or PIPE
kwargs["stderr"] = kwargs.get("stderr", self.stderr) or PIPE
kwargs["cwd"] = kwargs.get("cwd", None)
kwargs["env"] = self.env
self.start_time = time.time()
try:
process = self.engine.start_subprocess(args=args, **kwargs)
except OSError as | |
import pandas, numpy
import functools
from plotly import graph_objs as go
from plotly.subplots import make_subplots
X_FIGURE_BUFFER = 0.03
from ...viz import colors, _pick_color
from ... import styles
import logging
_logger = logging.getLogger('EMAT.explore')
def _y_maximum(fig):
return sum(t.y for t in fig.select_traces()).max()
def fig_existing_lines(fig):
lines = []
if 'shapes' in fig['layout']:
for s in fig['layout']['shapes']:
if s['type'] == 'line':
lines.append(s)
return lines
def embolden(text, bold=True):
if text is None:
return None
if bold:
if "<b>" in text:
return text
else:
return f"<b>{text}</b>" if text else text
else:
if "<b>" not in text:
return text
else:
return text.replace("<b>","").replace("</b>","")
def new_histogram_figure(
selection,
data_column,
bins=20,
*,
marker_line_width=None,
selected_color=None,
unselected_color=None,
title_text=None,
on_select=None, # lambda *a: self._on_select_from_histogram(*a,name=col)
on_deselect=None, # lambda *a: self._on_deselect_from_histogram(*a,name=col)
figure_class=None,
box=None,
ref_point=None,
):
"""
Create a new histogram figure for use with the visualizer.
Args:
selection (pandas.Series):
The currently selected subset of the data.
data_column (pandas.Series):
The column of data used to define the histogram.
bins (int or array-like):
The number of histogram bins, or the precomputed bin edges.
marker_line_width:
selected_color:
unselected_color:
title_text:
on_select:
on_deselect:
figure_class ({go.FigureWidget, go.Figure}, optional):
The class type of figure to generate. If not given,
a go.FigureWidget is created.
Returns:
go.FigureWidget or go.Figure
"""
if unselected_color is None:
unselected_color = colors.DEFAULT_BASE_COLOR
if selected_color is None:
selected_color = colors.DEFAULT_HIGHLIGHT_COLOR
if figure_class is None:
figure_class = go.FigureWidget
if bins is None:
bins = 20
bar_heights, bar_x = numpy.histogram(data_column, bins=bins)
bins_left = bar_x[:-1]
bins_width = bar_x[1:] - bar_x[:-1]
bar_heights_select, bar_x = numpy.histogram(data_column[selection], bins=bar_x)
fig = figure_class(
data=[
go.Bar(
x=bins_left,
y=bar_heights_select,
width=bins_width,
name='Inside',
marker_color=selected_color,
marker_line_width=marker_line_width,
hoverinfo='skip',
),
go.Bar(
x=bins_left,
y=bar_heights - bar_heights_select,
width=bins_width,
name='Outside',
marker_color=unselected_color,
marker_line_width=marker_line_width,
hoverinfo='skip',
),
],
layout=dict(
barmode='stack',
showlegend=False,
margin=styles.figure_margins,
yaxis_showticklabels=False,
title_text=title_text,
title_x=0.5,
title_xanchor='center',
selectdirection='h',
dragmode='select',
**styles.figure_dims,
),
)
# fig._bins = bins
# fig._figure_kind = 'histogram'
if on_select is not None:
fig.data[1].on_selection(on_select)
if on_deselect is not None:
fig.data[1].on_deselect(on_deselect)
_y_max = _y_maximum(fig)
fig.layout.yaxis.range = (
-_y_max * 0.03,
_y_max * 1.05,
)
x_range = (
fig.data[0].x[0] - (fig.data[0].width[0] / 2),
fig.data[0].x[-1] + (fig.data[0].width[-1] / 2),
)
x_width = x_range[1] - x_range[0]
# When ref_point is outside the range, expand the
# range to include it.
if ref_point is not None:
if ref_point < x_range[0]:
x_range = (ref_point, x_range[1])
elif ref_point > x_range[1]:
x_range = (x_range[0], ref_point)
# Set the plotted range slightly wider than the
# actual range of the data, to accommodate drawing
# a box just beyond the range if needed.
fig.layout.xaxis.range = (
x_range[0] - x_width * X_FIGURE_BUFFER,
x_range[1] + x_width * X_FIGURE_BUFFER,
)
col = getattr(data_column, 'name', None)
fig = add_boxes_to_figure(box, col, fig, ref_point=ref_point)
return fig
def update_histogram_figure(
fig,
selection,
data_column,
rerange_y=False,
box=None,
ref_point=None,
selected_color=None,
unselected_color=None,
):
"""
Update an existing figure used in the visualizer.
Args:
fig:
selection:
data_column:
Returns:
fig
"""
bins = list(fig['data'][0]['x'])
bins.append(fig['data'][0]['x'][-1] + fig['data'][0]['width'][-1])
bar_heights, bar_x = numpy.histogram(data_column, bins=bins)
bar_heights_select, bar_x = numpy.histogram(data_column[selection], bins=bar_x)
fig['data'][0]['y'] = bar_heights_select
fig['data'][1]['y'] = bar_heights - bar_heights_select
if rerange_y:
_y_max = numpy.max(bar_heights)
fig['layout']['yaxis']['range'] = (
-_y_max * 0.03,
_y_max * 1.05,
)
existing_lines = fig_existing_lines(fig) if ref_point is None else []
col = getattr(data_column, 'name', None)
fig = add_boxes_to_figure(box, col, fig, ref_point=ref_point, existing_shapes=existing_lines)
if unselected_color is not None:
fig['data'][1]['marker']['color'] = unselected_color
if selected_color is not None:
fig['data'][0]['marker']['color'] = selected_color
return fig
def interpret_histogram_selection(name, selection_range, box, data, scope):
select_min, select_max = selection_range
min_value, max_value = None, None
# Extract min and max from scope if possible
if scope is not None and name not in scope.get_measure_names():
min_value = scope[name].min
max_value = scope[name].max
# Extract min and max from .data if still missing
if min_value is None:
min_value = data[name].min()
if max_value is None:
max_value = data[name].max()
close_to_max_value = max_value - 0.03 * (max_value - min_value)
close_to_min_value = min_value + 0.03 * (max_value - min_value)
_logger.debug("name: %s limits: %f - %f", name, close_to_min_value, close_to_max_value)
if select_min <= close_to_min_value:
select_min = None
if select_max >= close_to_max_value:
select_max = None
_logger.debug("name: %s final range: %f - %f", name, select_min or numpy.nan, select_max or numpy.nan)
box.set_bounds(name, select_min, select_max)
return box
def new_frequencies_figure(
selection,
data_column,
labels,
*,
marker_line_width=None,
selected_color=None,
unselected_color=None,
title_text=None,
on_select=None, # lambda *a: self._on_select_from_freq(*a, name=col)
on_deselect=None, # lambda *a: self._on_deselect_from_histogram(*a, name=col)
on_click=None,
figure_class=None,
label_name_map=None,
box=None,
ref_point=None,
):
if unselected_color is None:
unselected_color = colors.DEFAULT_BASE_COLOR
if selected_color is None:
selected_color = colors.DEFAULT_HIGHLIGHT_COLOR
if figure_class is None:
figure_class = go.FigureWidget
if label_name_map is None:
label_name_map = {True: 'True', False: 'False'}
v = data_column.astype(
pandas.CategoricalDtype(categories=labels, ordered=False)
).cat.codes
bar_heights, bar_x = numpy.histogram(v, bins=numpy.arange(0, len(labels) + 1))
bar_heights_select, _ = numpy.histogram(v[selection], bins=numpy.arange(0, len(labels) + 1))
original_labels = labels
labels = [label_name_map.get(i, i) for i in labels]
fig = figure_class(
data=[
go.Bar(
x=labels,
y=bar_heights_select,
name='Inside',
marker_color=selected_color,
marker_line_width=marker_line_width,
hoverinfo='none',
),
go.Bar(
x=labels,
y=bar_heights - bar_heights_select,
name='Outside',
marker_color=unselected_color,
marker_line_width=marker_line_width,
hoverinfo='none',
),
],
layout=dict(
barmode='stack',
showlegend=False,
margin=styles.figure_margins,
yaxis_showticklabels=False,
title_text=title_text,
title_x=0.5,
title_xanchor='center',
selectdirection='h',
dragmode='select',
**styles.figure_dims,
meta=dict(x_tick_values=original_labels),
),
)
if on_select is not None:
fig.data[1].on_selection(on_select)
if on_deselect is not None:
fig.data[1].on_deselect(on_deselect)
if on_click is not None:
fig.data[0].on_click(on_click)
fig.data[1].on_click(on_click)
_y_max = _y_maximum(fig)
fig.layout.yaxis.range = (
-_y_max * 0.03,
_y_max * 1.05,
)
x_range = (
-0.5,
len(fig.data[0].x) - 0.5
)
x_width = x_range[1] - x_range[0]
fig.layout.xaxis.range = (
x_range[0] - x_width * X_FIGURE_BUFFER,
x_range[1] + x_width * X_FIGURE_BUFFER,
)
col = getattr(data_column, 'name', None)
fig = add_boxes_to_figure(box, col, fig, ref_point=ref_point)
return fig
def update_frequencies_figure(
fig,
selection,
data_column,
rerange_y=False,
box=None,
ref_point=None,
selected_color=None,
unselected_color=None,
):
labels = list(fig['layout']['meta']['x_tick_values'])
v = data_column.astype(
pandas.CategoricalDtype(categories=labels, ordered=False)
).cat.codes
bar_heights, bar_x = numpy.histogram(v, bins=numpy.arange(0, len(labels) + 1))
bar_heights_select, _ = numpy.histogram(v[selection], bins=numpy.arange(0, len(labels) + 1))
fig['data'][0]['y'] = bar_heights_select
fig['data'][1]['y'] = bar_heights - bar_heights_select
if rerange_y:
_y_max = numpy.max(bar_heights)
fig['layout']['yaxis']['range'] = (
-_y_max * 0.03,
_y_max * 1.05,
)
existing_lines = fig_existing_lines(fig) if ref_point is None else []
col = getattr(data_column, 'name', None)
fig = add_boxes_to_figure(box, col, fig, ref_point=ref_point, existing_shapes=existing_lines)
if unselected_color is not None:
fig['data'][1]['marker']['color'] = unselected_color
if selected_color is not None:
fig['data'][0]['marker']['color'] = selected_color
return fig
def add_boxes_to_figure(box, col, fig, ref_point=None, existing_shapes=None):
if existing_shapes is None:
existing_shapes = []
box_shapes = []
ref_shapes = []
_y_max = sum(t['y'] for t in fig['data']).max()
y_range = (
-_y_max * 0.02,
_y_max * 1.04,
)
if box is not None and col in box.thresholds:
x_lo, x_hi = None, None
thresh = box.thresholds.get(col)
if isinstance(thresh, Bounds):
x_lo, x_hi = thresh
if isinstance(thresh, set):
x_lo, x_hi = [], []
for tickval, ticktext in enumerate(fig['layout']['meta']['x_tick_values']):
if ticktext in thresh:
x_lo.append(tickval - 0.45)
x_hi.append(tickval + 0.45)
try:
x_range = (
fig['data'][0]['x'][0] - (fig['data'][0]['width'][0] / 2),
fig['data'][0]['x'][-1] + (fig['data'][0]['width'][-1] / 2),
)
except (TypeError, KeyError):
x_range = (
-0.5,
len(fig['data'][0]['x']) + 0.5
)
x_width = x_range[1] - x_range[0]
if x_lo is None:
x_lo = x_range[0] - x_width * 0.02
if x_hi is None:
x_hi = x_range[1] + x_width * 0.02
if not isinstance(x_lo, list):
x_lo = [x_lo]
if not isinstance(x_hi, list):
x_hi = [x_hi]
y_lo, y_hi = None, None
y_width = y_range[1] - y_range[0]
if y_lo is None:
y_lo = y_range[0] - y_width * 0
if y_hi is None:
y_hi = y_range[1] + y_width * 0
if not isinstance(y_lo, list):
y_lo = [y_lo]
if not isinstance(y_hi, list):
y_hi = [y_hi]
x_pairs = list(zip(x_lo, x_hi))
y_pairs = list(zip(y_lo, y_hi))
box_shapes.extend([
# Rectangle background color
go.layout.Shape(
type="rect",
xref="x1",
yref="y1",
x0=x_pair[0],
y0=y_pair[0],
x1=x_pair[1],
y1=y_pair[1],
line=dict(
width=0,
),
fillcolor=colors.DEFAULT_BOX_BG_COLOR,
opacity=0.2,
layer="below",
)
for x_pair in x_pairs
for y_pair in y_pairs
])
box_shapes.extend([
# Rectangle reference to the axes
go.layout.Shape(
type="rect",
xref="x1",
yref="y1",
x0=x_pair[0],
y0=y_pair[0],
x1=x_pair[1],
y1=y_pair[1],
line=dict(
width=2,
color=colors.DEFAULT_BOX_LINE_COLOR,
),
fillcolor='rgba(0,0,0,0)',
opacity=1.0,
)
for x_pair in x_pairs
for y_pair in y_pairs
])
if ref_point is not None:
try:
label_values = list(fig['layout']['meta']['x_tick_values'])
label_text = list(fig['data'][0]['x'])
except:
pass
else:
for x_val, x_txt in zip(label_values, label_text):
ref_point_ = str(ref_point).lower()
if ref_point == x_val or ref_point_ == str(x_val).lower() or ref_point_ == str(x_txt).lower():
ref_point = x_txt
break
ref_shapes.append(
go.layout.Shape(
type="line",
xref="x1",
yref="y1",
x0=ref_point,
y0=y_range[0],
x1=ref_point,
y1=y_range[1],
**colors.DEFAULT_REF_LINE_STYLE,
)
)
if 'title' in fig['layout']:
if box_shapes:
fig['layout']['title']['font']['color'] = colors.DEFAULT_BOX_LINE_COLOR
fig['layout']['title']['text'] = embolden(fig['layout']['title']['text'], True)
else:
fig['layout']['title']['font']['color'] = None
fig['layout']['title']['text'] = embolden(fig['layout']['title']['text'], False)
fig['layout']['shapes'] = existing_shapes + ref_shapes + box_shapes
return fig
from ...viz.perturbation import perturb_categorical
def axis_info(x, range_padding=0.0, epsilon=None, refpoint=None):
if hasattr(x, 'dtype'):
if epsilon is None:
s_ = x.size * 0.01
s_ = s_ / (1 + s_)
epsilon = 0.05 + 0.20 * s_
if isinstance(x.dtype, pandas.CategoricalDtype):
x_categories = x.cat.categories
x_ticktext = list(x_categories)
x_tickvals = list(range(len(x_ticktext)))
x_range = [-epsilon - range_padding, x_tickvals[-1] + epsilon + range_padding]
return x_ticktext, x_tickvals, x_range
if numpy.issubdtype(x.dtype, numpy.bool_):
x_range = [-epsilon - range_padding, 1 + epsilon + range_padding]
return ["False", "True"], [0,1], x_range
x_range = [x.min(), x.max()]
if refpoint is not None:
if refpoint < x_range[0]:
x_range[0] = refpoint
if refpoint > x_range[1]:
x_range[1] = refpoint
x_span = x_range[1] - x_range[0]
if x_span <= 0: x_span = 1
x_range = [x_range[0] - x_span*0.07, x_range[1] + x_span*0.07]
return None, None, x_range
def perturb_categorical_df(df, col=None, suffix="perturb"):
if col is None:
cols = list(df.columns)
else:
cols = [col]
for i in cols:
if f"_{i}_{suffix}" in df.columns: continue
x, x_ticktext, x_tickvals, x_range, _ = perturb_categorical(df[i],
add_variance=(suffix=="perturb"))
if x_ticktext is not None:
df[f"_{i}_{suffix}"] = x
if col is not None:
if f"_{col}_{suffix}" in df.columns:
return df[f"_{col}_{suffix}"]
else:
return df[col]
return df
def _get_or_none(mapping, key):
if mapping is None:
return None
return mapping.get(key, None)
def new_splom_figure(
scope,
data,
rows="LX",
cols="M",
use_gl=True,
mass=250,
row_titles='top',
size=150,
selection=None,
box=None,
refpoint=None,
figure_class=None,
on_select=None, # lambda *a: self._on_select_from_histogram(*a,name=col)
on_deselect=None, # lambda *a: self._on_deselect_from_histogram(*a,name=col)
selected_color=None,
unselected_color=None,
marker_size=3,
):
if unselected_color is None:
unselected_color = colors.DEFAULT_BASE_COLOR
if selected_color is None:
selected_color = colors.DEFAULT_HIGHLIGHT_COLOR
selected_color_str = ", ".join(str(int(i)) for i in colors.interpret_color(selected_color))
unselected_color_str = ", ".join(str(int(i)) for i in colors.interpret_color(unselected_color))
def _make_axis_list(j):
if isinstance(j, str):
if set('XLM').issuperset(j.upper()):
use = []
for i in j.upper():
if i=='X':
use += scope.get_uncertainty_names()
elif i=='L':
use += scope.get_lever_names()
if i=='M':
use += scope.get_measure_names()
return use
return [j]
return j
rows = _make_axis_list(rows)
cols = _make_axis_list(cols)
rows = [i for i in rows if i in data.columns]
cols = [i for i in cols if i in data.columns]
row_titles_top = (row_titles=='top')
subplot_titles = []
specs = []
for rownum, row in enumerate(rows, start=1):
specs.append([])
for colnum, col in enumerate(cols, start=1):
specs[-1].append({
# "type": "xy",
# 'l':0.03,
# 'r':0.03,
# 't':0.03,
# 'b':0.03,
})
if colnum == 1 and row_titles_top:
subplot_titles.append(scope.tagged_shortname(row))
else:
subplot_titles.append(None)
if len(cols)==0 or len(rows)==0:
fig = go.Figure()
else:
fig = make_subplots(
rows=len(rows), cols=len(cols),
shared_xaxes=True,
shared_yaxes=True,
vertical_spacing=(0.18 if row_titles_top else 0.1)/len(rows),
horizontal_spacing=0.1/len(cols),
subplot_titles=subplot_titles,
specs=specs,
)
if row_titles_top:
for rowtitle in fig['layout']['annotations']:
rowtitle['x'] = 0
rowtitle['xanchor'] = 'left'
if size is not None:
fig['layout']['height'] = size * len(rows) + 75
fig['layout']['width'] = size * len(cols) + 100
Scatter = go.Scattergl if use_gl else go.Scatter
marker_opacity = _splom_marker_opacity(
data.index,
selection,
mass=mass,
)
if selection is None:
marker_color = None
else:
marker_color = pandas.Series(data=unselected_color, index=data.index)
marker_color[selection] = selected_color
experiment_name = "Experiment"
if data.index.name:
experiment_name = data.index.name
n = 0
extra_y_ax = len(rows) * len(cols)
for rownum, row in enumerate(rows, start=1):
for colnum, col in enumerate(cols, start=1):
n += 1
x = perturb_categorical_df(data, col)
y = perturb_categorical_df(data, row)
x_ticktext, x_tickvals, x_range = axis_info(data[col], range_padding=0.3,
refpoint=_get_or_none(refpoint, col))
y_ticktext, y_tickvals, y_range = axis_info(data[row], range_padding=0.3,
refpoint=_get_or_none(refpoint, row))
if row == col:
extra_y_ax += 1
import scipy.stats
try:
kde0 = scipy.stats.gaussian_kde(data[~selection][row])
kde1 = scipy.stats.gaussian_kde(data[selection][row])
except TypeError:
kde0 = scipy.stats.gaussian_kde(data[~selection][row].cat.codes)
kde1 = scipy.stats.gaussian_kde(data[selection][row].cat.codes)
x_fill = numpy.linspace(*x_range, 200)
y_0 = kde0(x_fill)
y_1 = kde1(x_fill)
topline = max(y_0.max(), y_1.max())
y_range_kde = (-0.07 * topline, 1.07 * topline)
layout_updates = {}
layout_updates[f'yaxis{extra_y_ax}'] = dict(
domain=fig['layout'][f'yaxis{n}']['domain'],
anchor=f'free',
showticklabels=False,
range=y_range_kde,
)
fig.update_layout(**layout_updates)
fig.add_trace(
go.Scatter(
x=[],
y=[],
mode='markers',
showlegend=False,
),
row=rownum, col=colnum,
)
fig.add_trace(
go.Scatter(
x=x_fill,
y=y_0,
yaxis=f"y{extra_y_ax}",
xaxis=f"x{n}",
showlegend=False,
line_color=f'rgb({unselected_color_str})',
fill='tozeroy',
)
)
fig.add_trace(
go.Scatter(
x=x_fill,
y=y_1,
yaxis=f"y{extra_y_ax}",
xaxis=f"x{n}",
showlegend=False,
line_color=f'rgb({selected_color_str})',
fill='tozeroy',
)
)
else:
if marker_color is None:
color = _pick_color(scope, row, col)
else:
color = marker_color
if x_ticktext is not None or y_ticktext is not None:
hovertemplate = (
f'<b>{scope.shortname(row)}</b>: %{{meta[1]}}<br>' +
f'<b>{scope.shortname(col)}</b>: %{{meta[2]}}' +
f'<extra>{experiment_name} %{{meta[0]}}</extra>'
)
meta = data[[row,col]].reset_index().to_numpy()
else:
hovertemplate = (
f'<b>{scope.shortname(row)}</b>: %{{y}}<br>' +
f'<b>{scope.shortname(col)}</b>: %{{x}}' +
f'<extra>{experiment_name} %{{meta}}</extra>'
)
meta = data.index
fig.add_trace(
Scatter(
x=x,
y=y,
mode='markers',
marker=dict(
size=marker_size,
opacity=marker_opacity,
color=color,
),
showlegend=False,
hovertemplate=hovertemplate,
meta=meta,
),
row=rownum, col=colnum,
)
if on_select is not None:
fig.data[-1].on_selection(functools.partial(on_select, col, row))
if on_deselect is not None:
fig.data[-1].on_deselect(functools.partial(on_deselect, col, row))
if box is not None:
shapes | |
3 9
4 4 16
5 5 25
6 6 36
7 7 49
8 8 64
9 9 81
>>> dff = df[df.x<=2]
>>> dff
# x y
0 0 0
1 1 1
2 2 4
>>> dff = dff.filter(dff.x >=7, mode="or")
>>> dff
# x y
0 0 0
1 1 1
2 2 4
3 7 49
4 8 64
5 9 81
"""
df = self.copy()
df.select(expression, name=FILTER_SELECTION_NAME, mode=mode)
df._cached_filtered_length = None # invalide cached length
df._filter_filled = False
# WARNING: this is a special case where we create a new filter
# the cache mask chunks still hold references to views on the old
# mask, and this new mask will be filled when required
df._selection_masks[FILTER_SELECTION_NAME] = vaex.superutils.Mask(int(df._length_unfiltered))
return df
def __getitem__(self, item):
"""Convenient way to get expressions, (shallow) copies of a few columns, or to apply filtering.
Example:
>>> df['Lz'] # the expression 'Lz
>>> df['Lz/2'] # the expression 'Lz/2'
>>> df[["Lz", "E"]] # a shallow copy with just two columns
>>> df[df.Lz < 0] # a shallow copy with the filter Lz < 0 applied
"""
if isinstance(item, int):
names = self.get_column_names()
return [self.evaluate(name, item, item+1, array_type='python')[0] for name in names]
elif isinstance(item, six.string_types):
if hasattr(self, item) and isinstance(getattr(self, item), Expression):
return getattr(self, item)
# if item in self.virtual_columns:
# return Expression(self, self.virtual_columns[item])
# if item in self._virtual_expressions:
# return self._virtual_expressions[item]
if item not in self.column_names:
self.validate_expression(item)
item = vaex.utils.valid_expression(self.get_column_names(), item)
return Expression(self, item) # TODO we'd like to return the same expression if possible
elif isinstance(item, Expression):
expression = item.expression
return self.filter(expression)
elif isinstance(item, (tuple, list)):
df = self
if isinstance(item[0], slice):
df = df[item[0]]
if len(item) > 1:
if isinstance(item[1], int):
name = self.get_column_names()[item[1]]
return df[name]
elif isinstance(item[1], slice):
names = self.get_column_names().__getitem__(item[1])
return df[names]
for expression in item:
if expression not in self.column_names:
self.validate_expression(expression)
df = self.copy(column_names=item)
return df
elif isinstance(item, slice):
start, stop, step = item.start, item.stop, item.step
start = start or 0
stop = stop or len(self)
if start < 0:
start = len(self)+start
if stop < 0:
stop = len(self)+stop
stop = min(stop, len(self))
assert step in [None, 1]
if self.filtered:
self._fill_filter_mask()
mask = self._selection_masks[FILTER_SELECTION_NAME]
startf, stopf = mask.indices(start, stop-1) # -1 since it is inclusive
assert startf != -1
assert stopf != -1
stopf = stopf+1 # +1 to make it inclusive
start, stop = startf, stopf
df = self.trim()
df.set_active_range(start, stop)
return df.trim()
def __delitem__(self, item):
'''Alias of df.drop(item, inplace=True)'''
if item in self.columns:
name = item
if name in self._depending_columns(columns_exclude=[name]):
raise ValueError(f'Oops, you are trying to remove column {name} while other columns depend on it (use .drop instead)')
self.drop([item], inplace=True)
def _real_drop(self, item):
'''Removes a (virtual) column from the DataFrame.
Note: this does not check if the column is used in a virtual expression or in the filter\
and may lead to issues. It is safer to use :meth:`drop`.
'''
if isinstance(item, Expression):
name = item.expression
else:
name = item
if name in self.columns:
del self.columns[name]
self.column_names.remove(name)
elif name in self.virtual_columns:
del self.virtual_columns[name]
del self._virtual_expressions[name]
self.column_names.remove(name)
else:
matches = difflib.get_close_matches(name, self.get_column_names(hidden=True))
msg = "Column or variable %r does not exist." % name
if matches:
msg += ' Did you mean: ' + " or ".join(map(repr, matches))
raise KeyError(msg)
self.signal_column_changed.emit(self, name, "delete")
if hasattr(self, name):
try:
if isinstance(getattr(self, name), Expression):
delattr(self, name)
except:
pass
@docsubst
def drop(self, columns, inplace=False, check=True):
"""Drop columns (or a single column).
:param columns: List of columns or a single column name
:param inplace: {inplace}
:param check: When true, it will check if the column is used in virtual columns or the filter, and hide it instead.
"""
columns = _ensure_list(columns)
columns = _ensure_strings_from_expressions(columns)
df = self if inplace else self.copy()
depending_columns = df._depending_columns(columns_exclude=columns)
for column in columns:
if check and column in depending_columns:
df._hide_column(column)
else:
df._real_drop(column)
return df
def _hide_column(self, column):
'''Hides a column by prefixing the name with \'__\''''
column = _ensure_string_from_expression(column)
new_name = self._find_valid_name('__' + column)
self._rename(column, new_name)
return new_name
def _find_valid_name(self, initial_name):
'''Finds a non-colliding name by optional postfixing'''
return vaex.utils.find_valid_name(initial_name, used=self.get_column_names(hidden=True))
def _depending_columns(self, columns=None, columns_exclude=None, check_filter=True):
'''Find all depending column for a set of column (default all), minus the excluded ones'''
columns = set(columns or self.get_column_names(hidden=True))
if columns_exclude:
columns -= set(columns_exclude)
depending_columns = set()
for column in columns:
expression = self[str(column)]
depending_columns |= expression.variables()
depending_columns -= set(columns)
if check_filter:
if self.filtered:
selection = self.get_selection(FILTER_SELECTION_NAME)
depending_columns |= selection._depending_columns(self)
return depending_columns
def iterrows(self):
columns = self.get_column_names()
for i in range(len(self)):
yield i, {key: self.evaluate(key, i, i+1, array_type='python')[0] for key in columns}
#return self[i]
def __iter__(self):
"""Iterator over the column names."""
return iter(list(self.get_column_names()))
def _root_nodes(self):
"""Returns a list of string which are the virtual columns that are not used in any other virtual column."""
# these lists (~used as ordered set) keep track of leafes and root nodes
# root nodes
root_nodes = []
leafes = []
def walk(node):
# this function recursively walks the expression graph
if isinstance(node, six.string_types):
# we end up at a leaf
leafes.append(node)
if node in root_nodes: # so it cannot be a root node
root_nodes.remove(node)
else:
node_repr, fname, fobj, deps = node
if node_repr in self.virtual_columns:
# we encountered a virtual column, similar behaviour as leaf
leafes.append(node_repr)
if node_repr in root_nodes:
root_nodes.remove(node_repr)
# resursive part
for dep in deps:
walk(dep)
for column in self.virtual_columns.keys():
if column not in leafes:
root_nodes.append(column)
node = self[column]._graph()
# we don't do the virtual column itself, just it's depedencies
node_repr, fname, fobj, deps = node
for dep in deps:
walk(dep)
return root_nodes
def _graphviz(self, dot=None):
"""Return a graphviz.Digraph object with a graph of all virtual columns"""
from graphviz import Digraph
dot = dot or Digraph(comment='whole dataframe')
root_nodes = self._root_nodes()
for column in root_nodes:
self[column]._graphviz(dot=dot)
return dot
@docsubst
@stat_1d
def _agg(self, aggregator, binners=tuple(), delay=False, progress=None):
"""
:param delay: {delay}
:return: {return_stat_scalar}
"""
tasks, result = aggregator.add_tasks(self, binners, progress=progress)
return self._delay(delay, result)
def _binner(self, expression, limits=None, shape=None, selection=None, progress=None, delay=False):
expression = str(expression)
if limits is not None and not isinstance(limits, (tuple, str)):
limits = tuple(limits)
if expression in self._categories:
N = self._categories[expression]['N']
min_value = self._categories[expression]['min_value']
binner = self._binner_ordinal(expression, N, min_value)
binner = vaex.promise.Promise.fulfilled(binner)
else:
@delayed
def create_binner(limits):
return self._binner_scalar(expression, limits, shape)
binner = create_binner(self.limits(expression, limits, selection=selection, progress=progress, delay=True))
return self._delay(delay, binner)
def _binner_scalar(self, expression, limits, shape):
dtype = self.data_type(expression)
return BinnerScalar(expression, limits[0], limits[1], shape, dtype)
def _binner_ordinal(self, expression, ordinal_count, min_value=0, invert=False):
dtype = self.data_type(expression)
return BinnerOrdinal(expression, min_value, ordinal_count, invert, dtype)
def _binner_hash(self, expression, hash_map_unique):
dtype = self.data_type(expression)
return BinnerHash(expression, hash_map_unique, dtype)
def _create_binners(self, binby, limits, shape, selection=None, progress=None, delay=False):
if isinstance(binby, (list, tuple)):
binbys = binby
else:
binbys = [binby]
binbys = _ensure_strings_from_expressions(binbys)
for expression in binbys:
if expression:
self.validate_expression(expression)
binners = []
if len(binbys):
limits = _expand_limits(limits, len(binbys))
else:
limits = []
shapes = _expand_shape(shape, len(binbys))
for binby, limits1, shape in zip(binbys, limits, shapes):
binners.append(self._binner(binby, limits1, shape, selection, progress=progress, delay=True))
@delayed
def finish(*binners):
return binners
return self._delay(delay, finish(*binners))
@docsubst
def rolling(self, window, trim=False, column=None, fill_value=None, edge="right"):
'''Create a :py:data:`vaex.rolling.Rolling` rolling window object
:param int window: Size of the rolling window.
:param bool trim: {trim}
:param str or list[str] column: Column name or column names of columns affected (None for all)
:param any fill_value: Scalar value to use for data outside of existing rows.
:param str edge: Where the edge of the rolling window is for the current row.
'''
columns = self.get_column_names() if column is None else (column if _issequence(column) else [column])
from .rolling import Rolling
return Rolling(self, window, trim=trim, columns=columns, fill_value=fill_value, edge=edge)
DataFrame.__hidden__ = {}
hidden = [name for name, func in vars(DataFrame).items() if getattr(func, '__hidden__', False)]
for name in hidden:
DataFrame.__hidden__[name] = getattr(DataFrame, name)
delattr(DataFrame, name)
del hidden
class ColumnProxy(collections.abc.MutableMapping):
def __init__(self, df):
self.df = df
@property
def dataset(self):
return self.df.dataset
def __delitem__(self, item):
assert item in self.dataset
self.df._dataset = self.dataset.dropped(item)
def __len__(self):
return len(self.dataset)
def __setitem__(self, item, value):
if isinstance(self.dataset, | |
hresult = SCardIntroduceCardType(hcontext, znewcardName,
znewcardPrimGuid, znewcardPrimGuid + znewcardSecGuid,
znewcardATR, znewcardMask)
if hresult != SCARD_S_SUCCESS:
raise error, 'Failed to introduce card type: ' + SCardGetErrorMessage(hresult)
...
"""
return _scard.SCardIntroduceCardType(hcontext, cardname, primaryprovider, providerlist, atr, mask)
def SCardIntroduceReader(hcontext, readername, devicename):
r"""
SCardIntroduceReader( hcontext, readername, devicename) -> SCARDRETCODE
Parameters
----------
hcontext: context handle return from SCardEstablishContext()
readername: card reader name
devicename: card reader device name
Introduces a reader to the smart card subsystem.
Windows only, not supported by PCSC lite wrapper.
from smartcard.scard import *
...
dummyreader = readers[0] + ' dummy'
hresult = SCardIntroduceReader(hcontext, dummyreader, readers[0])
if hresult != SCARD_S_SUCCESS:
raise error, 'Unable to introduce reader: ' + dummyreader + ' : ' + SCardGetErrorMessage(hresult)
...
"""
return _scard.SCardIntroduceReader(hcontext, readername, devicename)
def SCardIntroduceReaderGroup(hcontext, groupname):
r"""
SCardIntroduceReaderGroup( hcontext, groupname) -> SCARDRETCODE
Parameters
----------
hcontext: context handle return from SCardEstablishContext()
groupname: card reader group name
Introduces a reader group to the smart card subsystem. However, the
reader group is not created until the group is specified when adding
a reader to the smart card database.
Windows only, not supported by PCSC lite wrapper.
from smartcard.scard import *
hresult, hcontext = SCardEstablishContext(SCARD_SCOPE_USER)
hresult = SCardIntroduceReaderGroup(hcontext, 'SCard$MyOwnGroup')
if hresult != SCARD_S_SUCCESS:
raise error, 'Unable to introduce reader group: ' + SCardGetErrorMessage(hresult)
hresult = SCardAddReaderToGroup(hcontext, 'SchlumbergerSema Reflex USB v.2 0', 'SCard$MyOwnGroup')
if hresult != SCARD_S_SUCCESS:
raise error, 'Unable to add reader to group: ' + SCardGetErrorMessage(hresult)
"""
return _scard.SCardIntroduceReaderGroup(hcontext, groupname)
def SCardListInterfaces(hcontext, cardname):
r"""
SCardListInterfaces( hcontext, cardname) -> SCARDRETCODE
Parameters
----------
hcontext: context handle return from SCardEstablishContext()
cardname: friendly name of a card
Provides a list of interfaces supplied by a given card. The caller
supplies the name of a smart card previously introduced to the subsystem,
and receives the list of interfaces supported by the card
Windows only, not supported by PCSC lite wrapper.
from smartcard.scard import *
hresult, hcontext = SCardEstablishContext(SCARD_SCOPE_USER)
hresult, interfaces = SCardListInterfaces(hcontext, 'Schlumberger Cryptoflex 8k v2')
if hresult != SCARD_S_SUCCESS:
raise error, 'Failed to list interfaces: ' + SCardGetErrorMessage(hresult)
...
"""
return _scard.SCardListInterfaces(hcontext, cardname)
def SCardListCards(hcontext, atr, providerlist):
r"""
SCardListCards( hcontext, byte[] atr, GUID[] providerlist) -> SCARDRETCODE
Parameters
----------
hcontext: context handle return from SCardEstablishContext()
atr: card ATR
providerlist: list of GUIDs of interfaces supported by smart card
Searches the smart card database and provides a list of named cards
previously introduced to the system by the user. The caller specifies an
ATR string, a set of interface identifiers (GUIDs), or both. If both an
ATR string and an identifier array are supplied, the cards returned will
match the ATR string supplied and support the interfaces specified.
Windows only, not supported by PCSC lite wrapper.
from smartcard.scard import *
...
slbCryptoFlex8kv2ATR = [ 0x3B, 0x95, 0x15, 0x40, 0x00, 0x68, 0x01, 0x02, 0x00, 0x00 ]
hresult, card = SCardListCards(hcontext, slbCryptoFlex8kv2ATR, [])
if hresult ! =SCARD_S_SUCCESS:
raise error, 'Failure to locate Schlumberger Cryptoflex 8k v2 card: ' + SCardGetErrorMessage(hresult)
hresult, cards = SCardListCards(hcontext, [], [])
if hresult != SCARD_S_SUCCESS:
raise error, 'Failure to list cards: ' + SCardGetErrorMessage(hresult)
print 'Cards: ', cards
...
"""
return _scard.SCardListCards(hcontext, atr, providerlist)
def SCardLocateCards(hcontext, cards, readerstatelist):
r"""
SCardLocateCards( hcontext, cards, tuple[] readerstatelist) -> SCARDRETCODE
Parameters
----------
hcontext: context handle return from SCardEstablishContext()
cards: a list of cards to locate
readerstatelist: in input/output, a list of reader state tuple (readername, state, atr)
Searches the readers listed in the readerstate parameter for a card
with an ATR string that matches one of the card names specified in
mszCards, returning immediately with the result.
Windows only, not supported by PCSC lite wrapper.
from smartcard.scard import *
hresult, hcontext = SCardEstablishContext(SCARD_SCOPE_USER)
hresult, readers = SCardListReaders(hcontext, [])
readerstates = []
cards = ['Schlumberger Cryptoflex 4k', 'Schlumberger Cryptoflex 8k', 'Schlumberger Cryptoflex 8k v2']
for i in xrange(len(readers)):
readerstates += [(readers[i], SCARD_STATE_UNAWARE)]
hresult, newstates = SCardLocateCards(hcontext, cards, readerstates)
for i in newstates:
reader, eventstate, atr = i
print reader,
for b in atr:
print '0x%.2X' % b,
print
if eventstate & SCARD_STATE_ATRMATCH:
print 'Card found'
if eventstate & SCARD_STATE_EMPTY:
print 'Reader empty'
if eventstate & SCARD_STATE_PRESENT:
print 'Card present in reader'
...
"""
return _scard.SCardLocateCards(hcontext, cards, readerstatelist)
def SCardRemoveReaderFromGroup(hcontext, readername, groupname):
r"""
SCardRemoveReaderFromGroup( hcontext, readername, groupname) -> SCARDRETCODE
Parameters
----------
hcontext: context handle return from SCardEstablishContext()
readername: card reader name
groupname: card reader group name
Removes a reader from an existing reader group. This function has no
affect on the reader.
Windows only, not supported by PCSC lite wrapper.
from smartcard.scard import *
hresult, hcontext = SCardEstablishContext(SCARD_SCOPE_USER)
hresult = SCardRemoveReaderFromGroup(hcontext, 'SchlumbergerSema Reflex USB v.2 0', 'SCard$MyOwnGroup')
if hresult != SCARD_S_SUCCESS:
raise error, 'Unable to remove reader from group: ' + SCardGetErrorMessage(hresult)
...
"""
return _scard.SCardRemoveReaderFromGroup(hcontext, readername, groupname)
def SCardIsValidContext(hcontext):
r"""
SCardIsValidContext( hcontext) -> SCARDRETCODE
Parameters
----------
hcontext: context handle return from SCardEstablishContext()
This function determines whether a smart card context handle is still
valid. After a smart card context handle has been set by
SCardEstablishContext(), it may become not valid if the resource manager
service has been shut down.
from smartcard.scard import *
hresult, hcontext = SCardEstablishContext(SCARD_SCOPE_USER)
hresult = SCardIsValidContext(hcontext)
if hresult != SCARD_S_SUCCESS:
raise error, 'Invalid context: ' + SCardGetErrorMessage(hresult)
...
"""
return _scard.SCardIsValidContext(hcontext)
def SCardGetAttrib(hcard, dwAttrId):
r"""
SCardGetAttrib( hcard, dwAttrId) -> SCARDRETCODE
Parameters
----------
hcard: card handle return from SCardConnect()
dwAttrId: value of attribute to get
This function get an attribute from the IFD Handler.
The possible attributes are:
======================================== ======= =======
Attribute Windows PSCS
lite
======================================== ======= =======
SCARD_ATTR_ASYNC_PROTOCOL_TYPES Y
SCARD_ATTR_ATR_STRING Y Y
SCARD_ATTR_CHANNEL_ID Y Y
SCARD_ATTR_CHARACTERISTICS Y Y
SCARD_ATTR_CURRENT_BWT Y Y
SCARD_ATTR_CURRENT_CLK Y Y
SCARD_ATTR_CURRENT_CWT Y Y
SCARD_ATTR_CURRENT_D Y Y
SCARD_ATTR_CURRENT_EBC_ENCODING Y Y
SCARD_ATTR_CURRENT_F Y Y
SCARD_ATTR_CURRENT_IFSC Y Y
SCARD_ATTR_CURRENT_IFSD Y Y
SCARD_ATTR_CURRENT_IO_STATE Y Y
SCARD_ATTR_CURRENT_N Y Y
SCARD_ATTR_CURRENT_PROTOCOL_TYPE Y Y
SCARD_ATTR_CURRENT_W Y Y
SCARD_ATTR_DEFAULT_CLK Y Y
SCARD_ATTR_DEFAULT_DATA_RATE Y Y
SCARD_ATTR_DEVICE_FRIENDLY_NAME_A Y Y
SCARD_ATTR_DEVICE_FRIENDLY_NAME_W Y Y
SCARD_ATTR_DEVICE_IN_USE Y Y
SCARD_ATTR_DEVICE_SYSTEM_NAME_A Y Y
SCARD_ATTR_DEVICE_SYSTEM_NAME_W Y Y
SCARD_ATTR_DEVICE_UNIT Y Y
SCARD_ATTR_ESC_AUTHREQUEST Y Y
SCARD_ATTR_ESC_CANCEL Y Y
SCARD_ATTR_ESC_RESET Y Y
SCARD_ATTR_EXTENDED_BWT Y Y
SCARD_ATTR_ICC_INTERFACE_STATUS Y Y
SCARD_ATTR_ICC_PRESENCE Y Y
SCARD_ATTR_ICC_TYPE_PER_ATR Y Y
SCARD_ATTR_MAXINPUT Y Y
SCARD_ATTR_MAX_CLK Y Y
SCARD_ATTR_MAX_DATA_RATE Y Y
SCARD_ATTR_MAX_IFSD Y Y
SCARD_ATTR_POWER_MGMT_SUPPORT Y Y
SCARD_ATTR_SUPRESS_T1_IFS_REQUEST Y Y
SCARD_ATTR_SYNC_PROTOCOL_TYPES Y
SCARD_ATTR_USER_AUTH_INPUT_DEVICE Y Y
SCARD_ATTR_USER_TO_CARD_AUTH_DEVICE Y Y
SCARD_ATTR_VENDOR_IFD_SERIAL_NO Y Y
SCARD_ATTR_VENDOR_IFD_TYPE Y Y
SCARD_ATTR_VENDOR_IFD_VERSION Y Y
SCARD_ATTR_VENDOR_NAME Y Y
======================================== ======= =======
Not all the dwAttrId values listed above may be implemented in the IFD
Handler you are using. And some dwAttrId values not listed here may be
implemented.
from smartcard.scard import *
... establish context and connect to card ...
hresult, attrib = SCardGetAttrib(hcard, SCARD_ATTR_ATR_STRING)
if hresult == SCARD_S_SUCCESS:
for j in attrib:
print '0x%.2X' % attrib,
...
"""
return _scard.SCardGetAttrib(hcard, dwAttrId)
def SCardSetAttrib(hcard, dwAttrId, ATTRIBUTESIN):
r"""
SCardSetAttrib( hcard, dwAttrId, BYTELIST * ATTRIBUTESIN) -> SCARDRETCODE
Parameters
----------
hcard: card handle return from SCardConnect()
dwAttrId: value of attribute to get
ATTRIBUTESIN: BYTELIST *
This function sets an attribute from the IFD Handler. Not all
attributes are supported by all readers nor can they be set at all
times.
The possible attributes are:
======================================== ======= =======
Attribute Windows PSCS
lite
======================================== ======= =======
SCARD_ATTR_ASYNC_PROTOCOL_TYPES Y
SCARD_ATTR_ATR_STRING Y Y
SCARD_ATTR_CHANNEL_ID Y Y
SCARD_ATTR_CHARACTERISTICS Y Y
SCARD_ATTR_CURRENT_BWT Y Y
SCARD_ATTR_CURRENT_CLK Y Y
SCARD_ATTR_CURRENT_CWT Y Y
SCARD_ATTR_CURRENT_D Y Y
SCARD_ATTR_CURRENT_EBC_ENCODING Y Y
SCARD_ATTR_CURRENT_F Y Y
SCARD_ATTR_CURRENT_IFSC Y Y
SCARD_ATTR_CURRENT_IFSD Y Y
SCARD_ATTR_CURRENT_IO_STATE Y Y
SCARD_ATTR_CURRENT_N Y Y
SCARD_ATTR_CURRENT_PROTOCOL_TYPE Y Y
SCARD_ATTR_CURRENT_W Y Y
SCARD_ATTR_DEFAULT_CLK Y Y
SCARD_ATTR_DEFAULT_DATA_RATE Y Y
SCARD_ATTR_DEVICE_FRIENDLY_NAME_A Y Y
SCARD_ATTR_DEVICE_FRIENDLY_NAME_W Y Y
SCARD_ATTR_DEVICE_IN_USE Y Y
SCARD_ATTR_DEVICE_SYSTEM_NAME_A Y Y
SCARD_ATTR_DEVICE_SYSTEM_NAME_W Y Y
SCARD_ATTR_DEVICE_UNIT Y Y | |
from time import sleep
from functools import wraps, partial
import logging
from pydbus import SystemBus, Variant
from .bzutils import BluezInterfaceObject
from .object_manager import BluezObjectManager
from . import error as bz
from .pydbus_backfill import ProxyMethodAsync
from gi.repository.GLib import Error as GLibError
from xml.etree import ElementTree as ET
class Adapter(BluezInterfaceObject):
iface = "org.bluez.{}1".format(__qualname__)
intro_xml = """<?xml version="1.0" ?>
<!DOCTYPE node
PUBLIC '-//freedesktop//DTD D-BUS Object Introspection 1.0//EN'
'http://www.freedesktop.org/standards/dbus/1.0/introspect.dtd'>
<node>
<interface name="org.freedesktop.DBus.Introspectable">
<method name="Introspect">
<arg direction="out" name="xml" type="s"/>
</method>
</interface>
<interface name="org.bluez.Adapter1">
<method name="StartDiscovery"/>
<method name="SetDiscoveryFilter">
<arg direction="in" name="properties" type="a{sv}"/>
</method>
<method name="StopDiscovery"/>
<method name="RemoveDevice">
<arg direction="in" name="device" type="o"/>
</method>
<method name="GetDiscoveryFilters">
<arg direction="out" name="filters" type="as"/>
</method>
<property access="read" name="Address" type="s"/>
<property access="read" name="AddressType" type="s"/>
<property access="read" name="Name" type="s"/>
<property access="readwrite" name="Alias" type="s"/>
<property access="read" name="Class" type="u"/>
<property access="readwrite" name="Powered" type="b"/>
<property access="readwrite" name="Discoverable" type="b"/>
<property access="readwrite" name="DiscoverableTimeout" type="u"/>
<property access="readwrite" name="Pairable" type="b"/>
<property access="readwrite" name="PairableTimeout" type="u"/>
<property access="read" name="Discovering" type="b"/>
<property access="read" name="UUIDs" type="as"/>
<property access="read" name="Modalias" type="s"/>
</interface>
<interface name="org.freedesktop.DBus.Properties">
<method name="Get">
<arg direction="in" name="interface" type="s"/>
<arg direction="in" name="name" type="s"/>
<arg direction="out" name="value" type="v"/>
</method>
<method name="Set">
<arg direction="in" name="interface" type="s"/>
<arg direction="in" name="name" type="s"/>
<arg direction="in" name="value" type="v"/>
</method>
<method name="GetAll">
<arg direction="in" name="interface" type="s"/>
<arg direction="out" name="properties" type="a{sv}"/>
</method>
<signal name="PropertiesChanged">
<arg name="interface" type="s"/>
<arg name="changed_properties" type="a{sv}"/>
<arg name="invalidated_properties" type="as"/>
</signal>
</interface>
<interface name="org.bluez.GattManager1">
<method name="RegisterApplication">
<arg direction="in" name="application" type="o"/>
<arg direction="in" name="options" type="a{sv}"/>
</method>
<method name="UnregisterApplication">
<arg direction="in" name="application" type="o"/>
</method>
</interface>
<interface name="org.bluez.LEAdvertisingManager1">
<method name="RegisterAdvertisement">
<arg direction="in" name="advertisement" type="o"/>
<arg direction="in" name="options" type="a{sv}"/>
</method>
<method name="UnregisterAdvertisement">
<arg direction="in" name="service" type="o"/>
</method>
<property access="read" name="ActiveInstances" type="y"/>
<property access="read" name="SupportedInstances" type="y"/>
<property access="read" name="SupportedIncludes" type="as"/>
<property access="read" name="SupportedSecondaryChannels" type="as"/>
</interface>
<interface name="org.bluez.Media1">
<method name="RegisterEndpoint">
<arg direction="in" name="endpoint" type="o"/>
<arg direction="in" name="properties" type="a{sv}"/>
</method>
<method name="UnregisterEndpoint">
<arg direction="in" name="endpoint" type="o"/>
</method>
<method name="RegisterPlayer">
<arg direction="in" name="player" type="o"/>
<arg direction="in" name="properties" type="a{sv}"/>
</method>
<method name="UnregisterPlayer">
<arg direction="in" name="player" type="o"/>
</method>
<method name="RegisterApplication">
<arg direction="in" name="application" type="o"/>
<arg direction="in" name="options" type="a{sv}"/>
</method>
<method name="UnregisterApplication">
<arg direction="in" name="application" type="o"/>
</method>
</interface>
<interface name="org.bluez.NetworkServer1">
<method name="Register">
<arg direction="in" name="uuid" type="s"/>
<arg direction="in" name="bridge" type="s"/>
</method>
<method name="Unregister">
<arg direction="in" name="uuid" type="s"/>
</method>
</interface>
</node>
"""
introspection = ET.fromstring(intro_xml)
@staticmethod
def list():
l = []
for c in BluezObjectManager.get_childs(only_direct=True):
try:
name = c.split("/")[-1]
l.append(Adapter(name))
except:
pass
return l
@classmethod
def from_obj(cls, obj):
return cls(obj.split("/")[-1])
@bz.convertBluezError
def __init__(self, name):
try:
super().__init__("/org/bluez/{}".format(name), name)
except (bz.BluezDoesNotExistError, bz.DBusUnknownObjectError):
raise bz.BluezDoesNotExistError(
"Adapter not found '{}'".format(name)
) from None
try:
if not bz.getBluezPropOrRaise(self._proxy, "Powered"):
self._proxy.Powered = True
except (bz.BluezDoesNotExistError, bz.DBusUnknownObjectError):
raise bz.BluezDoesNotExistError(
"Adapter not found '{}'".format(name)
) from None
@bz.convertBluezError
def scan(self, enable=True, filters=None):
"""
enable: enable/disable scanning
filters: dict with scan filters, see bluez 'SetDiscoveryFilter' API:
'UUIDs': list with UUID strings
'Transport': string 'le', 'bredr' or 'auto'
"""
if enable:
if filters and isinstance(filters, dict):
# convert to Variants (for supported)
if "UUIDs" in filters and not isinstance(filters["UUIDs"], Variant):
filters["UUIDs"] = Variant("as", filters["UUIDs"])
if "Transport" in filters and not isinstance(
filters["Transport"], Variant
):
filters["Transport"] = Variant("s", filters["Transport"])
bz.callBluezFunction(self._proxy.SetDiscoveryFilter, filters)
try:
bz.callBluezFunction(self._proxy.StartDiscovery)
except bz.BluezInProgressError:
pass
else:
try:
bz.callBluezFunction(self._proxy.StopDiscovery)
except bz.BluezFailedError:
pass
return bz.getBluezPropOrNone(self._proxy, "Discovering", fail_ret=False)
@property
def scanning(self):
return bz.getBluezPropOrNone(self._proxy, "Discovering", fail_ret=False)
@bz.convertBluezError
def devices(self):
"""
returns list with all scanned/connected/paired devices
"""
l = []
for obj in BluezObjectManager.get_childs(self.obj, only_direct=True):
try:
l.append(Device(adapter=self, obj=obj))
except:
pass
return l
def onDeviceAdded(self, func, *args, init=False, **kwargs):
"""
Registers callback for new device added/discovered
func: callback function(device: Device, properties: dict, *args, **kwargs)
init: set to True, to call func on all already existing devices
"""
om = BluezObjectManager.get()
if func:
def device_added(self_adapter, added_obj, added_if, *cbargs, **cbkwargs):
if Device.iface in added_if:
addr = None
properties = added_if[Device.iface]
if "Address" in added_if[Device.iface]:
addr = added_if[Device.iface]["Address"]
device = Device(self_adapter, addr=addr, obj=added_obj)
if "filter_interfaces" in cbkwargs:
del cbkwargs["filter_interfaces"]
self.logger.debug(
"call device_added: func: %s(%s,%s,%s)",
str(func),
str(device),
str(cbargs),
str(cbkwargs),
)
func(device, properties, *cbargs, **cbkwargs)
self.logger.debug(
"add device_added: func: %s(%s,%s,%s)",
str(device_added),
str(self),
str(args),
str(kwargs),
)
om.onObjectAdded(
self, device_added, *args, filter_interfaces=Device.iface, **kwargs
)
if init:
dev_objs = om.childs(self, only_direct=True)
for dobj in dev_objs:
try:
dev = Device(self, obj=dobj)
props = dev.properties
except:
continue
func(dev, props, *args, **kwargs)
else:
om.onObjectAdded(self, None)
def onDeviceRemoved(self, device, func, *args, **kwargs):
"""
Registers callback for device removed (either removed explicitly or scanning cache timeout)
func: callback function(adapter: Bluez.Adapter, device Bluez.Device, *args, **kwargs)
"""
om = BluezObjectManager.get()
if func:
def device_removed_cb(removed_device, removed_if, *cbargs, **cbkwargs):
if Device.iface in removed_if:
if "filter_interfaces" in cbkwargs:
del cbkwargs["filter_interfaces"]
self.logger.debug(
"call device_removed_cb: func: %s(%s,%s,%s)",
str(func),
str(removed_device),
str(cbargs),
str(cbkwargs),
)
func(self, device, *cbargs, **cbkwargs)
self.logger.debug(
"add device_removed_cb: func: %s(%s,%s,%s)",
str(device_removed_cb),
str(device),
str(args),
str(kwargs),
)
om.onObjectRemoved(
device,
device_removed_cb,
*args,
filter_interfaces=Device.iface,
**kwargs,
)
else:
om.onObjectRemoved(device, None, filter_interface=None)
@bz.convertBluezError
def paired_devices(self):
devs = self.devices()
paired_devs = []
for dev in devs:
try:
if dev.paired:
paired_devs.append(dev)
except Exception:
pass
return paired_devs
@bz.convertBluezError
def remove_device(self, dev_obj):
"""
remove device: disconnect, remove pairing keys, delete gatt db cache (in bluez)
"""
if not dev_obj:
raise ValueError("dev_obj argument is not valid")
if len(dev_obj) == 17 and len(dev_obj.split(":")) == 6:
dev_obj = "{}/dev_{}".format(self.obj, dev_obj.upper().replace(":", "_"))
elif not (
len(dev_obj) == 37
and dev_obj.startswith("/org/bluez/hci")
and len(dev_obj.split("/")) == 5
):
raise ValueError("dev_obj argument is not valid: {}".format(dev_obj))
try:
bz.callBluezFunction(self._proxy.RemoveDevice, dev_obj)
except bz.BluezDoesNotExistError:
pass
def clear(self):
"""
remove all signal subscriptions and delete proxy
"""
BluezObjectManager.get().onObjectAdded(self, None)
self.obj = None
class Device(BluezInterfaceObject):
iface = "org.bluez.{}1".format(__qualname__)
intro_xml = """<?xml version="1.0" ?>
<!DOCTYPE node
PUBLIC '-//freedesktop//DTD D-BUS Object Introspection 1.0//EN'
'http://www.freedesktop.org/standards/dbus/1.0/introspect.dtd'>
<node>
<interface name="org.freedesktop.DBus.Introspectable">
<method name="Introspect">
<arg direction="out" name="xml" type="s"/>
</method>
</interface>
<interface name="org.bluez.Device1">
<method name="Disconnect"/>
<method name="Connect"/>
<method name="ConnectProfile">
<arg direction="in" name="UUID" type="s"/>
</method>
<method name="DisconnectProfile">
<arg direction="in" name="UUID" type="s"/>
</method>
<method name="Pair"/>
<method name="CancelPairing"/>
<property access="read" name="Address" type="s"/>
<property access="read" name="AddressType" type="s"/>
<property access="read" name="Name" type="s"/>
<property access="readwrite" name="Alias" type="s"/>
<property access="read" name="Class" type="u"/>
<property access="read" name="Appearance" type="q"/>
<property access="read" name="Icon" type="s"/>
<property access="read" name="Paired" type="b"/>
<property access="readwrite" name="Trusted" type="b"/>
<property access="readwrite" name="Blocked" type="b"/>
<property access="read" name="LegacyPairing" type="b"/>
<property access="read" name="RSSI" type="n"/>
<property access="read" name="MTU" type="n"/>
<property access="read" name="Connected" type="b"/>
<property access="read" name="UUIDs" type="as"/>
<property access="read" name="Modalias" type="s"/>
<property access="read" name="Adapter" type="o"/>
<property access="read" name="ManufacturerData" type="a{qv}"/>
<property access="read" name="ServiceData" type="a{sv}"/>
<property access="read" name="TxPower" type="n"/>
<property access="read" name="ServicesResolved" type="b"/>
</interface>
<interface name="org.freedesktop.DBus.Properties">
<method name="Get">
<arg direction="in" name="interface" type="s"/>
<arg direction="in" name="name" type="s"/>
<arg direction="out" name="value" type="v"/>
</method>
<method name="Set">
<arg direction="in" name="interface" type="s"/>
<arg direction="in" name="name" type="s"/>
<arg direction="in" name="value" type="v"/>
</method>
<method name="GetAll">
<arg direction="in" name="interface" type="s"/>
<arg direction="out" name="properties" type="a{sv}"/>
</method>
<signal name="PropertiesChanged">
<arg name="interface" type="s"/>
<arg name="changed_properties" type="a{sv}"/>
<arg name="invalidated_properties" type="as"/>
</signal>
</interface>
</node>"""
introspection = ET.fromstring(intro_xml)
@bz.convertBluezError
def __init__(self, adapter=None, addr=None, obj=None):
if not obj and (not adapter and not addr):
raise ValueError("Either 'obj' or 'adapter' and 'addr' must be given")
if adapter and addr:
if isinstance(adapter, str):
tmp_obj = "/org/bluez/{}/dev_{}".format(
adapter, addr.upper().replace(":", "_")
)
adapter = Adapter(adapter)
else:
tmp_obj = "{}/dev_{}".format(
adapter.obj, addr.upper().replace(":", "_")
)
if obj and tmp_obj != obj:
raise ValueError(
"'obj' and 'adapter' and 'addr' given, but do not match"
)
obj = tmp_obj
super().__init__(obj, addr)
if obj and not addr:
if addr:
self.name = addr
else:
self.name = self._getBluezPropOrNone("Address")
if not self.name:
try:
self.name = obj.split("/")[4][4:].replace("_", ":")
except Exception:
pass
if not adapter and obj:
adapter = Adapter(obj.split("/")[3])
self.adapter = adapter
@bz.convertBluezError
def pair(self):
try:
return bz.callBluezFunction(self._proxy.Pair)
except bz.BluezAlreadyExistsError:
self.logger.warning("Already paired: %s", str(self))
return self.paired
return False
@property
def paired(self):
return self._getBluezPropOrNone("Paired", fail_ret=False)
@property
def connected(self):
return self._getBluezPropOrNone("Connected", fail_ret=False)
@bz.convertBluezError
def connect_async(self, done_cb, err_cb, data, timeout=30):
if done_cb:
def _done_cb(obj, res, user_data):
done_cb(self, res, user_data)
else:
_done_cb = None
if err_cb:
def _err_cb(obj, res, user_data):
try:
bz.getDBusError(res)
except Exception as e:
res = e
err_cb(self, res, user_data)
else:
_err_cb = None
self._proxy.ConnectAsync(_done_cb, _err_cb, data, timeout=timeout)
@bz.convertBluezError
def connect(self):
try:
self._proxy.Connect(timeout=30)
except Exception as e:
self.logger.error(str(e))
pass
@bz.convertBluezError
def disconnect(self):
try:
bz.callBluezFunction(self._proxy.Disconnect)
except bz.BluezInProgressError:
return not self.connected
except bz.DBusUnknownObjectError:
pass
return False
@bz.convertBluezError
def remove(self):
if self.obj:
ad_name = self.obj.split("/")[3]
try:
ad = Adapter(ad_name)
ad.remove_device(self.obj)
except bz.BluezError:
pass
# @bz.convertBluezError
# def address(self):
# return self.name
@property
def address(self):
return | |
cls._rest_interaction(rest_communication.patch_entry, *args, **kwargs)
class GenerateHamiltonInputEPP(StepEPP):
# define the rows and columns in the input plate (standard 96 well plate pattern)
plate_rows = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
plate_columns = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']
csv_column_headers = None
output_file_name = None
def __init__(self, argv=None):
""" additional argument required for the location of the Hamilton input file so def __init__ customised."""
super().__init__(argv)
self.hamilton_input = self.cmd_args.hamilton_input
self.shared_drive_path = os.path.abspath(self.cmd_args.shared_drive_path)
assert self.csv_column_headers is not None, 'csv_column_headers needs to be set by the child class'
assert self.output_file_name is not None, 'output_file_name needs to be set by the child class'
assert self._max_nb_input_containers is not None, 'number of permitted input containers needs to be set ' \
'by the child class'
assert self._max_nb_output_containers is not None, 'number of permitted output containers needs to be set ' \
'by the child class'
@staticmethod
def add_args(argparser):
argparser.add_argument(
'-i', '--hamilton_input', type=str, required=True, help='Hamilton input file generated by the LIMS'
)
argparser.add_argument(
'-d', '--shared_drive_path', type=str, required=True,
help='Shared drive path location for Hamilton input file'
)
@staticmethod
def write_csv(filename, csv_array):
"""Write the list of list to the file provided as a csv file"""
with open(filename, 'w', newline='') as csvFile:
writer = csv.writer(csvFile)
writer.writerows(csv_array)
@cached_property
def input_container_names(self):
"""The name of containers from input artifacts.
Disregards standards containers as these are not stored correctly in the LIMS.
Standards are identified as the sample well location is 1:1"""
containers = set()
for art in self.artifacts:
# Check to see if artifact has a container before retrieving the container.
# Artifacts that are not samples will not have containers.
if art.container and art.location[1] != '1:1':
containers.add(art.container.name)
return sorted(containers)
def rsb_barcode(self):
# find the lot number, i.e. barcode, of the RSB reagent.
RSB_template = "LP[0-9]{7}-RSB"
reagent_lots = list(self.process.step.reagent_lots)
rsb_barcode = None
for lot in reagent_lots:
if re.match(RSB_template, lot.lot_number):
rsb_barcode = lot.lot_number
if not rsb_barcode:
raise InvalidStepError(message='Please assign RSB lot before generating Hamilton input.')
return rsb_barcode
@property
def shared_drive_file_path(self):
return os.path.join(self.shared_drive_path, self.output_file_name)
def _generate_csv_dict(self):
"""Provides the lines to write to the csv files in a dictionary
where the key is a well position such as 'A:1' and the value is the line of the csv. """
raise NotImplementedError
def generate_csv_array(self):
"""
Generate the csv array from the implemented csv dictionary.
It sorts the csv lines by column (self.plate_columns) then row (self.plate_rows)
"""
csv_dict = self._generate_csv_dict()
if self.csv_column_headers:
csv_rows = [self.csv_column_headers]
else:
csv_rows = []
counter = 0
for column in self.plate_columns:
for row in self.plate_rows:
if row + ":" + column in csv_dict.keys():
csv_rows.append(csv_dict[row + ":" + column])
counter += 1
if counter == 0:
raise InvalidStepError("No valid keys present in csv_dict. Key format must be row:column e.g. A:1.")
return csv_rows
def _run(self):
"""Generic run that check the number of input and output container
then creates the two csv files ('-hamilton_input.csv' and the one on the shared drive)."""
csv_array = self.generate_csv_array()
# Create and write the Hamilton input file, this must have the hamilton_input argument as the prefix as
# this is used by Clarity LIMS to recognise the file and attach it to the step
self.write_csv(self.hamilton_input + '-hamilton_input.csv', csv_array)
self.write_csv(self.shared_drive_file_path, csv_array)
class Autoplacement(StepEPP):
"""Script for performing autoplacement of samples. If 1 input and 1 output then 1:1 placement. If multiple input plates then
takes all samples from each plate before next plate Loops through all inputs and assigns them to the next available space
by column-row in the output plate"""
output_plate_layout_rows = None
output_plate_layout_columns = None
input_plate_layout_columns = None
input_plate_layout_rows = None
def __init__(self, argv=None):
super().__init__(argv)
assert self.output_plate_layout_rows is not None, 'output_plate_layout_rows needs to be set by the child class'
assert self.output_plate_layout_columns is not None, 'output_plate_layout_columns needs to be set by the child class'
assert self.input_plate_layout_rows is not None, 'input_plate_layout_rows needs to be set by the child class'
assert self.input_plate_layout_columns is not None, 'input_plate_layout_columns needs to be set by the child class'
def generate_input_container_nested_dict(self):
# loop through the inputs, assemble a nested dicitonary {containers:{input.location:output} this can then be
# queried in the order container-row-column so the order of the inputs in the Hamilton input file is
# as efficient as possible.
nested_dict = {}
for art in self.artifacts:
# obtain outputs for the input that are analytes, assume step is not configured to allow replicates
# so will always work with output[0]
output = self.process.outputs_per_input(art.id, Analyte=True)[0]
# add the input_location_output_dict to the input_container_nested dict
if art.container not in nested_dict.keys():
nested_dict[art.container] = {}
# start assembling one of the variables needed to use the set_placement function
# note that .location returns a tuple with the container entity and the well location as the string in position [1]
nested_dict[art.container][art.location[1]] = output
return nested_dict
def generate_output_placement(self, output_container):
output_plate_layout = [(row + ":" + column) for column, row in
itertools.product(self.output_plate_layout_columns, self.output_plate_layout_rows)]
placement = []
# obtain the dictionary containing the source information
input_container_nested_dict = self.generate_input_container_nested_dict()
# create a counter so only use each available well in the output plate once (24 wells available in the 96 well plate
well_counter = 0
# loop through the input containers and place the samples in row-column order - this makes pipetting as efficient
# as possible, particularly if only 1 input plate so 1:1 pipetting
for container in sorted(input_container_nested_dict, key=lambda x: x.name):
for column in self.input_plate_layout_columns:
for row in self.input_plate_layout_rows:
# populate list of tuples for set_placements if well exists in input plate
if row + ":" + column in input_container_nested_dict[container]:
placement.append((input_container_nested_dict[container][row + ":" + column],
(output_container, output_plate_layout[well_counter])))
well_counter += 1
return placement
def _run(self):
# update of container requires list variable containing the containers, only one container will be present in step
# because the container has not yet been fully populated then it must be obtained from the step rather than output
output_container_list = self.process.step.placements.get_selected_containers()
# need a list of tuples for set_placements
output_placement = self.generate_output_placement(output_container_list[0])
# push the output locations to the LIMS
self.process.step.set_placements(output_container_list, output_placement)
class ParseSpectramaxEPP(StepEPP):
_use_load_config = False # prevent the loading of the config
# define the starting well for data parsing e.g. if the standards occupy the first 24 wells, parsing should start from well A4. No semi-colon
# separate row and column
starting_well = None
def __init__(self, argv=None):
""" additional argument required for the location of the Hamilton input file so def __init__ customised."""
super().__init__(argv)
self.spectramax_file = self.cmd_args.spectramax_file
self.sample_concs = {}
self.plate_names = []
self.plates = defaultdict(dict)
assert self.starting_well is not None, 'starting_well needs to be set by the child class'
@staticmethod
def add_args(argparser):
argparser.add_argument('--spectramax_file', type=str, required=True,
help='Spectramax output file from the step')
def parse_spectramax_file(self):
f = self.open_or_download_file(self.spectramax_file, encoding='utf-16', crlf=True)
encountered_unknowns = False
in_unknowns = False
for line in f:
if line.startswith('Group: Unknowns'):
assert not in_unknowns
in_unknowns = True
encountered_unknowns = True
elif line.startswith('~End'):
in_unknowns = False
elif in_unknowns:
if line.startswith('Sample') or line.startswith('Group Summaries'):
pass
else:
split_line = line.split('\t')
self.sample_concs[int(split_line[0])] = (split_line[1], float(split_line[3]))
elif line.startswith('Plate:') and encountered_unknowns:
self.plate_names.append(line.split('\t')[1])
if self.sample_concs[1][0] != self.starting_well:
raise AssertionError(
'Badly formed spectramax file: first well for samples is %s but expected to be %s'
% (str(self.sample_concs[1][0]), str(self.starting_well))
)
self.debug('Found %s samples and %s plates', len(self.sample_concs), len(self.plate_names))
def assign_samples_to_plates(self):
plate_idx = -1
plate_name = None
for i in sorted(self.sample_concs): # go through in ascending order...
coord, conc = self.sample_concs[i]
if coord == self.starting_well: # ... and take the variable starting_well coord as the start of a new plate
plate_idx += 1
plate_name = self.plate_names[plate_idx]
if coord in self.plates[plate_name]:
raise AssertionError(
'Badly formed spectramax file: tried to add coord %s for sample %s to plate %s' % (
coord, i, plate_name
)
)
self.plates[plate_name][coord] = conc
def _add_plates_to_step(self):
# populates the artifacts with the data from result file based on plate and well position. Data uploaded to LIMS with put batch
raise NotImplementedError
def _run(self):
self.parse_spectramax_file()
self.assign_samples_to_plates()
batch_artifacts = self._add_plates_to_step()
self.lims.put_batch(list(batch_artifacts))
def get_workflow_stage(lims, workflow_name, stage_name=None):
workflows = [w for w in lims.get_workflows() if w.name == workflow_name]
if | |
self._offset == self._failure:
self._expected.append('[0-9a-zA-Z_]')
self._offset = index13
if address12 is FAILURE:
address12 = TreeNode(self._input[self._offset:self._offset], self._offset, [])
self._offset = self._offset
else:
address12 = FAILURE
if address12 is not FAILURE:
elements5.append(address12)
else:
elements5 = None
self._offset = index12
else:
elements5 = None
self._offset = index12
if elements5 is None:
address0 = FAILURE
else:
address0 = TreeNode(self._input[index12:self._offset], index12, elements5)
self._offset = self._offset
if address0 is FAILURE:
self._offset = index1
index14, elements6 = self._offset, []
address13 = FAILURE
chunk12, max12 = None, self._offset + 4
if max12 <= self._input_size:
chunk12 = self._input[self._offset:max12]
if chunk12 == 'that':
address13 = TreeNode(self._input[self._offset:self._offset + 4], self._offset, [])
self._offset = self._offset + 4
else:
address13 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'that\'')
if address13 is not FAILURE:
elements6.append(address13)
address14 = FAILURE
index15 = self._offset
chunk13, max13 = None, self._offset + 1
if max13 <= self._input_size:
chunk13 = self._input[self._offset:max13]
if chunk13 is not None and Grammar.REGEX_11.search(chunk13):
address14 = TreeNode(self._input[self._offset:self._offset + 1], self._offset, [])
self._offset = self._offset + 1
else:
address14 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('[0-9a-zA-Z_]')
self._offset = index15
if address14 is FAILURE:
address14 = TreeNode(self._input[self._offset:self._offset], self._offset, [])
self._offset = self._offset
else:
address14 = FAILURE
if address14 is not FAILURE:
elements6.append(address14)
else:
elements6 = None
self._offset = index14
else:
elements6 = None
self._offset = index14
if elements6 is None:
address0 = FAILURE
else:
address0 = TreeNode(self._input[index14:self._offset], index14, elements6)
self._offset = self._offset
if address0 is FAILURE:
self._offset = index1
index16, elements7 = self._offset, []
address15 = FAILURE
chunk14, max14 = None, self._offset + 2
if max14 <= self._input_size:
chunk14 = self._input[self._offset:max14]
if chunk14 == 'at':
address15 = TreeNode(self._input[self._offset:self._offset + 2], self._offset, [])
self._offset = self._offset + 2
else:
address15 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'at\'')
if address15 is not FAILURE:
elements7.append(address15)
address16 = FAILURE
index17 = self._offset
chunk15, max15 = None, self._offset + 1
if max15 <= self._input_size:
chunk15 = self._input[self._offset:max15]
if chunk15 is not None and Grammar.REGEX_12.search(chunk15):
address16 = TreeNode(self._input[self._offset:self._offset + 1], self._offset, [])
self._offset = self._offset + 1
else:
address16 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('[0-9a-zA-Z_]')
self._offset = index17
if address16 is FAILURE:
address16 = TreeNode(self._input[self._offset:self._offset], self._offset, [])
self._offset = self._offset
else:
address16 = FAILURE
if address16 is not FAILURE:
elements7.append(address16)
else:
elements7 = None
self._offset = index16
else:
elements7 = None
self._offset = index16
if elements7 is None:
address0 = FAILURE
else:
address0 = TreeNode(self._input[index16:self._offset], index16, elements7)
self._offset = self._offset
if address0 is FAILURE:
self._offset = index1
index18, elements8 = self._offset, []
address17 = FAILURE
chunk16, max16 = None, self._offset + 5
if max16 <= self._input_size:
chunk16 = self._input[self._offset:max16]
if chunk16 == 'after':
address17 = TreeNode(self._input[self._offset:self._offset + 5], self._offset, [])
self._offset = self._offset + 5
else:
address17 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'after\'')
if address17 is not FAILURE:
elements8.append(address17)
address18 = FAILURE
index19 = self._offset
chunk17, max17 = None, self._offset + 1
if max17 <= self._input_size:
chunk17 = self._input[self._offset:max17]
if chunk17 is not None and Grammar.REGEX_13.search(chunk17):
address18 = TreeNode(self._input[self._offset:self._offset + 1], self._offset, [])
self._offset = self._offset + 1
else:
address18 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('[0-9a-zA-Z_]')
self._offset = index19
if address18 is FAILURE:
address18 = TreeNode(self._input[self._offset:self._offset], self._offset, [])
self._offset = self._offset
else:
address18 = FAILURE
if address18 is not FAILURE:
elements8.append(address18)
else:
elements8 = None
self._offset = index18
else:
elements8 = None
self._offset = index18
if elements8 is None:
address0 = FAILURE
else:
address0 = TreeNode(self._input[index18:self._offset], index18, elements8)
self._offset = self._offset
if address0 is FAILURE:
self._offset = index1
index20, elements9 = self._offset, []
address19 = FAILURE
chunk18, max18 = None, self._offset + 7
if max18 <= self._input_size:
chunk18 = self._input[self._offset:max18]
if chunk18 == 'restart':
address19 = TreeNode(self._input[self._offset:self._offset + 7], self._offset, [])
self._offset = self._offset + 7
else:
address19 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'restart\'')
if address19 is not FAILURE:
elements9.append(address19)
address20 = FAILURE
index21 = self._offset
chunk19, max19 = None, self._offset + 1
if max19 <= self._input_size:
chunk19 = self._input[self._offset:max19]
if chunk19 is not None and Grammar.REGEX_14.search(chunk19):
address20 = TreeNode(self._input[self._offset:self._offset + 1], self._offset, [])
self._offset = self._offset + 1
else:
address20 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('[0-9a-zA-Z_]')
self._offset = index21
if address20 is FAILURE:
address20 = TreeNode(self._input[self._offset:self._offset], self._offset, [])
self._offset = self._offset
else:
address20 = FAILURE
if address20 is not FAILURE:
elements9.append(address20)
else:
elements9 = None
self._offset = index20
else:
elements9 = None
self._offset = index20
if elements9 is None:
address0 = FAILURE
else:
address0 = TreeNode(self._input[index20:self._offset], index20, elements9)
self._offset = self._offset
if address0 is FAILURE:
self._offset = index1
index22, elements10 = self._offset, []
address21 = FAILURE
chunk20, max20 = None, self._offset + 6
if max20 <= self._input_size:
chunk20 = self._input[self._offset:max20]
if chunk20 == 'before':
address21 = TreeNode(self._input[self._offset:self._offset + 6], self._offset, [])
self._offset = self._offset + 6
else:
address21 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'before\'')
if address21 is not FAILURE:
elements10.append(address21)
address22 = FAILURE
index23 = self._offset
chunk21, max21 = None, self._offset + 1
if max21 <= self._input_size:
chunk21 = self._input[self._offset:max21]
if chunk21 is not None and Grammar.REGEX_15.search(chunk21):
address22 = TreeNode(self._input[self._offset:self._offset + 1], self._offset, [])
self._offset = self._offset + 1
else:
address22 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('[0-9a-zA-Z_]')
self._offset = index23
if address22 is FAILURE:
address22 = TreeNode(self._input[self._offset:self._offset], self._offset, [])
self._offset = self._offset
else:
address22 = FAILURE
if address22 is not FAILURE:
elements10.append(address22)
else:
elements10 = None
self._offset = index22
else:
elements10 = None
self._offset = index22
if elements10 is None:
address0 = FAILURE
else:
address0 = TreeNode(self._input[index22:self._offset], index22, elements10)
self._offset = self._offset
if address0 is FAILURE:
self._offset = index1
index24, elements11 = self._offset, []
address23 = FAILURE
chunk22, max22 = None, self._offset + 4
if max22 <= self._input_size:
chunk22 = self._input[self._offset:max22]
if chunk22 == 'when':
address23 = TreeNode(self._input[self._offset:self._offset + 4], self._offset, [])
self._offset = self._offset + 4
else:
address23 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'when\'')
if address23 is not FAILURE:
elements11.append(address23)
address24 = FAILURE
index25 = self._offset
chunk23, max23 = None, self._offset + 1
if max23 <= self._input_size:
chunk23 = self._input[self._offset:max23]
if chunk23 is not None and Grammar.REGEX_16.search(chunk23):
address24 = TreeNode(self._input[self._offset:self._offset + 1], self._offset, [])
self._offset = self._offset + 1
else:
address24 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('[0-9a-zA-Z_]')
self._offset = index25
if address24 is FAILURE:
address24 = TreeNode(self._input[self._offset:self._offset], self._offset, [])
self._offset = self._offset
else:
address24 = FAILURE
if address24 is not FAILURE:
elements11.append(address24)
else:
elements11 = None
self._offset = index24
else:
elements11 = None
self._offset = index24
if elements11 is None:
address0 = FAILURE
else:
address0 = TreeNode(self._input[index24:self._offset], index24, elements11)
self._offset = self._offset
if address0 is FAILURE:
self._offset = index1
index26, elements12 = self._offset, []
address25 = FAILURE
chunk24, max24 = None, self._offset + 7
if max24 <= self._input_size:
chunk24 = self._input[self._offset:max24]
if chunk24 == 'attempt':
address25 = TreeNode(self._input[self._offset:self._offset + 7], self._offset, [])
self._offset = self._offset + 7
else:
address25 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'attempt\'')
if address25 is not FAILURE:
elements12.append(address25)
address26 = FAILURE
index27 = self._offset
chunk25, max25 = None, self._offset + 1
if max25 <= self._input_size:
chunk25 = self._input[self._offset:max25]
if chunk25 is not None and Grammar.REGEX_17.search(chunk25):
address26 = TreeNode(self._input[self._offset:self._offset + 1], self._offset, [])
self._offset = self._offset + 1
else:
address26 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
| |
requirementUseCases(:req)',{'req':reqName},'MySQL error getting redmine use cases');
if len(ucs) == 0:
ucs.append('None')
return ucs
def getRequirementBacklog(self,reqName):
bis = self.responseList('call requirementBacklog(:req)',{'req':reqName},'MySQL error getting backlog items');
if len(bis) == 0:
bis.append('None')
return bis
def environmentRequirements(self,envName):
return self.responseList('call requirementNames(:env)',{'env':envName},'MySQL error getting requirements associated with environment ' + envName)
def deleteTags(self,tagObjt,tagDim):
self.updateDatabase('call deleteTags(:obj,:dim)',{'obj':tagObjt,'dim':tagDim},'MySQL error deleting tags')
def deleteDataFlowTags(self,dfName,fromType,fromName,toType,toName,envName):
self.updateDatabase('call deleteDataFlowTags(:dfName, :fromType, :fromName, :toType, :toName, :envName)',{'dfName' : dfName, 'fromType' : fromType, 'fromName' : fromName, 'toType' : toType, 'toName' : toName, 'envName' : envName},'MySQL error deleting data flow tags')
def addTags(self,dimObjt,dimName,tags):
self.deleteTags(dimObjt,dimName)
curs = self.conn.connection().connection.cursor()
for tag in set(tags):
try:
curs.execute('call addTag(%s,%s,%s)',[dimObjt,tag,dimName])
except OperationalError as e:
raise DatabaseProxyException('MySQL error adding tag (message: ' + format(e))
except DatabaseError as e:
raise DatabaseProxyException('MySQL error adding ' + dimName + ' ' + dimObjt + ' tag ' + tag + ': ' + format(e))
curs.close()
def addDataFlowTags(self,dfName,fromType,fromName,toType,toName,envName,tags):
self.deleteDataFlowTags(dfName,fromType,fromName,toType,toName,envName)
curs = self.conn.connection().connection.cursor()
for tag in set(tags):
try:
curs.execute('call addDataFlowTag(%s,%s,%s,%s,%s,%s,%s)',[dfName,fromType,fromName,toType,toName,envName,tag])
except OperationalError as e:
raise DatabaseProxyException('MySQL error adding dataflow tag (message: ' + format(e))
except DatabaseError as e:
raise DatabaseProxyException('MySQL error adding dataflow tag ' + tag + ': ' + format(e))
curs.close()
def getTags(self,dimObjt,dimName):
return self.responseList('call getTags(:obj,:name)',{'obj':dimObjt,'name':dimName},'MySQL error getting tags')
def getDataFlowTags(self,dfName,fromType,fromName,toType,toName,envName):
return self.responseList('call getDataFlowTags(:dfName,:fromType,:fromName,:toType,:toName,:envName)',{'dfName':dfName,'fromType':fromType,'fromName':fromName,'toType':toType,'toName':toName,'envName':envName},'MySQL error getting data flow tags')
def deleteTag(self,tagId): self.deleteObject(tagId,'tag')
def componentView(self,cvName):
interfaces = self.responseList('call componentViewInterfaces(:cv)',{'cv':cvName},'MySQL error getting component view interfaces')
connectors = self.componentViewConnectors(cvName)
return (interfaces,connectors)
def componentViewConnectors(self,cvName):
return self.responseList('call componentViewConnectors(:cv)',{'cv':cvName},'MySQL error getting component view connectors')
def addComponentToView(self,cId,cvId): self.updateDatabase('call addComponentToView(:cId,:cvId)',{'cId':cId,'cvId':cvId},'MySQL error adding component to view')
def addComponent(self,parameters,cvId = -1):
componentId = self.newId()
componentName = parameters.name()
componentDesc = parameters.description()
structure = parameters.structure()
requirements = parameters.requirements()
goals = parameters.goals()
assocs = parameters.associations()
session = self.updateDatabase('call addComponent(:id,:name,:desc)',{'id':componentId,'name':componentName,'desc':componentDesc},'MySQL error adding component',None,False)
if cvId != -1:
self.updateDatabase('call addComponentToView(:compId,:cvId)',{'compId':componentId,'cvId':cvId},'MySQL error adding component to view',session,False)
self.commitDatabase(session)
for ifName,ifType,arName,pName in parameters.interfaces():
self.addComponentInterface(componentId,ifName,ifType,arName,pName)
self.addComponentStructure(componentId,structure)
self.addComponentRequirements(componentId,requirements)
self.addComponentGoals(componentId,goals)
self.addComponentAssociations(componentId,assocs)
def updateComponent(self,parameters,cvId = -1):
componentId = parameters.id()
componentName = parameters.name()
componentDesc = parameters.description()
structure = parameters.structure()
requirements = parameters.requirements()
goals = parameters.goals()
assocs = parameters.associations()
session = self.updateDatabase('call deleteComponentComponents(:comp)',{'comp':componentId},'MySQL error deleting component components',None,False)
if (componentId != -1):
self.updateDatabase('call updateComponent(:id,:name,:desc)',{'id':componentId,'name':componentName,'desc':componentDesc},'MySQL error updating component',session)
else:
componentId = self.newId()
self.updateDatabase('call addComponent(:id,:name,:desc)',{'id':componentId,'name':componentName,'desc':componentDesc},'MySQL error adding component',session,False)
if cvId != -1:
self.updateDatabase('call addComponentToView(:compId,:cvId)',{'compId':componentId,'cvId':cvId},'MySQL error adding component to view',session,False)
self.commitDatabase(session)
for ifName,ifType,arName,pName in parameters.interfaces():
self.addComponentInterface(componentId,ifName,ifType,arName,pName)
self.addComponentStructure(componentId,structure)
self.addComponentRequirements(componentId,requirements)
self.addComponentGoals(componentId,goals)
self.addComponentAssociations(componentId,assocs)
def addComponentInterface(self,componentId,ifName,ifType,arName,pName):
self.updateDatabase('call addComponentInterface(:compId,:ifName,:ifType,:arName,:pName)',{'compId':componentId,'ifName':ifName,'ifType':ifType,'arName':arName,'pName':pName},'MySQL error adding component interface')
def addConnector(self,parameters):
connId = self.newId()
cName = parameters.name()
cvName = parameters.view()
fromName = parameters.fromName()
fromRole = parameters.fromRole()
fromIf = parameters.fromInterface()
toName = parameters.toName()
toIf = parameters.toInterface()
toRole = parameters.toRole()
conAsset = parameters.asset()
pName = parameters.protocol()
arName = parameters.accessRight()
self.updateDatabase('call addConnector(:connId,:cvName,:cName,:fName,:fRole,:fIf,:tName,:tIf,:tRole,:conAsset,:pName,:arName)',{'connId':connId,'cvName':cvName,'cName':cName,'fName':fromName,'fRole':fromRole,'fIf':fromIf,'tName':toName,'tIf':toIf,'tRole':toRole,'conAsset':conAsset,'pName':pName,'arName':arName},'MySQL error adding connector')
def getInterfaces(self,dimObjt,dimName):
rows = self.responseList('call getInterfaces(:obj,:name)',{'obj':dimObjt,'name':dimName},'MySQL error getting interfaces')
ifs = []
for ifName,ifTypeId,arName,prName in rows:
ifType = 'provided'
if (ifTypeId == 1): ifType = 'required'
ifs.append((ifName,ifType,arName,prName))
return ifs
def addInterfaces(self,dimObjt,dimName,ifs):
try:
self.deleteInterfaces(dimObjt,dimName)
for ifName,ifType,arName,pName in ifs:
self.addInterface(dimObjt,ifName,ifType,arName,pName,dimName)
except OperationalError as e:
exceptionText = 'MySQL error adding interfaces to ' + dimName + ' ' + dimObjt + ' (message:' + format(e) + ')'
raise DatabaseProxyException(exceptionText)
except DatabaseError as e:
id,msg = e
exceptionText = 'MySQL error adding interfaces to ' + dimName + ' ' + dimObjt + ' (id:' + str(id) + ',message:' + msg + ')'
raise DatabaseProxyException(exceptionText)
def deleteInterfaces(self,ifName,ifDim):
self.updateDatabase('call deleteInterfaces(:name,:dim)',{'name':ifName,'dim':ifDim},'MySQL error deleting interfaces')
def addInterface(self,ifObjt,ifName,ifType,arName,pName,ifDim):
self.updateDatabase('call addInterface(:ifObj,:ifName,:ifType,:arName,:pName,:ifDim)',{'ifObj':ifObjt,'ifName':ifName,'ifType':ifType,'arName':arName,'pName':pName,'ifDim':ifDim},'MySQL error adding interface')
def addComponentStructure(self,componentId,componentStructure):
for headAsset,headAdornment,headNav,headNry,headRole,tailRole,tailNry,tailNav,tailAdornment,tailAsset in componentStructure:
self.addComponentAssetAssociation(componentId,headAsset,headAdornment,headNav,headNry,headRole,tailRole,tailNry,tailNav,tailAdornment,tailAsset)
def addComponentAssetAssociation(self,componentId,headAsset,headAdornment,headNav,headNry,headRole,tailRole,tailNry,tailNav,tailAdornment,tailAsset):
assocId = self.newId()
self.updateDatabase('call addComponentStructure(:aId,:cId,:hAss,:hAd,:hNav,:hNry,:hRole,:tRole,:tNry,:tNav,:tAd,:tAss)',{'aId':assocId,'cId':componentId,'hAss':headAsset,'hAd':headAdornment,'hNav':headNav,'hNry':headNry,'hRole':headRole,'tRole':tailRole,'tNry':tailNry,'tNav':tailNav,'tAd':tailAdornment,'tAss':tailAsset},'MySQL error adding component asset association')
def componentStructure(self,componentId):
return self.responseList('call getComponentStructure(:comp)',{'comp':componentId},'MySQL error getting structure for component')
def addComponentRequirements(self,componentId,componentRequirements):
for idx,reqName in enumerate(componentRequirements):
self.addComponentRequirement(idx+1,componentId,reqName)
def addComponentRequirement(self,reqLabel,componentId,reqName): self.updateDatabase('call addComponentRequirement(:reqLbl,:comp,:req)',{'reqLbl':reqLabel,'comp':componentId,'req':reqName},'MySQL error adding component requirement')
def getComponentViews(self,constraintId = -1):
cvRows = self.responseList('call getComponentView(:cons)',{'cons':constraintId},'MySQL error getting component view')
cvs = {}
for cvId,cvName,cvSyn in cvRows:
viewComponents = self.componentViewComponents(cvId)
components = []
for componentId,componentName,componentDesc in viewComponents:
componentInterfaces = self.componentInterfaces(componentId)
componentStructure = self.componentStructure(componentId)
componentReqs = self.componentRequirements(componentId)
componentGoals = self.componentGoals(componentId)
goalAssocs = self.componentGoalAssociations(componentId)
comParameters = ComponentParameters(componentName,componentDesc,componentInterfaces,componentStructure,componentReqs,componentGoals,goalAssocs)
comParameters.setId(componentId)
components.append(comParameters)
connectors = self.componentViewConnectors(cvName)
asm = self.attackSurfaceMetric(cvName)
parameters = ComponentViewParameters(cvName,cvSyn,[],[],[],[],[],components,connectors,asm)
cv = ObjectFactory.build(cvId,parameters)
cvs[cvName] = cv
return cvs
def componentRequirements(self,componentId):
return self.responseList('call getComponentRequirements(:comp)',{'comp':componentId},'MySQL error getting component requirements')
def componentInterfaces(self,componentId):
rows = self.responseList('call componentInterfaces(:comp)',{'comp':componentId},'MySQL error getting component interfaces')
ifs = []
for compName,ifName,ifTypeId,arName,prName in rows:
ifType = 'provided'
if (ifTypeId == 1):
ifType = 'required'
ifs.append((ifName,ifType,arName,prName))
return ifs
def addComponentView(self,parameters):
parameters.validate();
cvId = self.newId()
cvName = parameters.name()
cvSyn = parameters.synopsis()
cvValueTypes = parameters.metricTypes()
cvRoles = parameters.roles()
cvAssets = parameters.assets()
cvReqs = parameters.requirements()
cvGoals = parameters.goals()
cvComs = parameters.components()
cvCons = parameters.connectors()
self.updateDatabase('call addComponentView(:id,:name,:syn)',{'id':cvId,'name':cvName,'syn':cvSyn},'MySQL error adding component view')
for vtParameters in cvValueTypes:
vtId = self.existingObject(vtParameters.name(),vtParameters.type())
if vtId == -1:
self.addValueType(vtParameters)
for rParameters in cvRoles:
rId = self.existingObject(rParameters.name(),'role')
if rId == -1:
self.addRole(rParameters)
for taParameters in cvAssets:
taId = self.existingObject(taParameters.name(),'template_asset')
if taId == -1:
self.addTemplateAsset(taParameters)
for trParameters in cvReqs:
trId = self.existingObject(trParameters.name(),'template_requirement')
if trId == -1:
self.addTemplateRequirement(trParameters)
for tgParameters in cvGoals:
tgId = self.existingObject(tgParameters.name(),'template_goal')
if tgId == -1:
self.addTemplateGoal(tgParameters)
for comParameters in cvComs:
cId = self.existingObject(comParameters.name(),'component')
if cId == -1:
self.addComponent(comParameters,cvId)
else:
comParameters.setId(cId)
self.addComponentToView(cId,cvId)
self.mergeComponent(comParameters)
for conParameters in cvCons:
self.addConnector(conParameters)
return cvId
def updateComponentView(self,parameters):
parameters.validate();
cvId = parameters.id()
cvName = parameters.name()
cvSyn = parameters.synopsis()
cvAssets = parameters.assets()
cvReqs = parameters.requirements()
cvComs = parameters.components()
cvCons = parameters.connectors()
session = self.updateDatabase('call deleteComponentViewComponents(:id)',{'id':cvId},'MySQL error deleting component view components',None,False)
self.updateDatabase('call updateComponentView(:id,:name,:syn)',{'id':cvId,'name':cvName,'syn':cvSyn},'MySQL error updating component view',session)
for taParameters in cvAssets:
self.updateTemplateAsset(taParameters)
for trParameters in cvReqs: self.updateTemplateRequirement(trParameters)
for comParameters in cvComs:
self.addComponent(comParameters,cvId)
for conParameters in cvCons:
self.addConnector(conParameters)
return cvId
def deleteComponentView(self,cvId):
self.deleteObject(cvId,'component_view')
def componentViewComponents(self,cvId):
return self.responseList('call getComponents(:id)',{'id':cvId},'MySQL error getting components')
def componentViewWeaknesses(self,cvName,envName):
rows = self.responseList('call componentViewWeaknesses(:cv,:env)',{'cv':cvName,'env':envName},'MySQL error getting component view weaknesses')
thrDict = {}
vulDict = {}
for cName,taName,aName,targetName,targetType in rows:
t = None
if targetType == 'threat':
if targetName not in thrDict: t = WeaknessTarget(targetName)
else: t = thrDict[targetName]
t.addTemplateAsset(taName)
t.addAsset(aName)
t.addComponent(cName)
thrDict[targetName] = t
else:
if targetName not in vulDict:
t = WeaknessTarget(targetName)
else:
t = vulDict[targetName]
t.addTemplateAsset(taName)
t.addAsset(aName)
t.addComponent(cName)
vulDict[targetName] = t
return (thrDict,vulDict)
def componentAssets(self,cvName,reqName = ''):
return self.responseList('call componentAssets(:cv,:req)',{'cv':cvName,'req':reqName},'MySQL error getting component assets')
def componentGoalAssets(self,cvName,goalName = ''): return self.responseList('call componentGoalAssets(:cv,:goal)',{'cv':cvName,'goal':goalName},'MySQL error getting component goal assets')
def existingObject(self,objtName,dimName):
argDict = {'objt':objtName,'dim':dimName}
callTxt = 'call existing_object(:objt,:dim)'
if (dimName == 'persona_characteristic' or dimName == 'task_characteristic'):
callTxt = 'call existing_characteristic(:objt,:dim)'
return self.responseList(callTxt,argDict,'MySQL error checking existence of object')[0]
def situateComponentView(self,cvName,envName,acDict,assetParametersList,targets,obstructParameters):
for assetParameters in assetParametersList:
assetName = assetParameters.name()
assetId = self.existingObject(assetName,'asset')
if assetId == -1:
assetId = self.addAsset(assetParameters)
for cName in acDict[assetName]:
self.situateComponentAsset(cName,assetId)
self.situateComponentViewRequirements(cvName)
self.situateComponentViewGoals(cvName,envName)
self.situateComponentViewGoalAssociations(cvName,envName)
for target in targets: self.addComponentViewTargets(target,envName)
for op in obstructParameters: self.addGoalAssociation(op)
def situateComponentAsset(self,componentName,assetId):
self.updateDatabase('call situateComponentAsset(:ass,:comp)',{'ass':assetId,'comp':componentName},'MySQL error situating component asset')
def addComponentViewTargets(self,target,envName):
session = self.conn()
for componentName in target.components():
self.updateDatabase('call addComponentTarget(:comp,:asset,:name,:effectiveness,:rationale,:env)',{'comp':componentName,'asset':target.asset(),'name':target.name(),'effectiveness':target.effectiveness(),'rationale':target.rationale(),'env':envName},'MySQL error adding component target',session,False)
self.commitDatabase(session)
def assetComponents(self,assetName,envName):
return self.responseList('call assetComponents(:ass,:env)',{'ass':assetName,'env':envName},'MySQL error getting asset components')
def addTemplateRequirement(self,parameters):
reqId = self.newId()
reqName = parameters.name()
reqAsset = parameters.asset()
reqType = parameters.type()
reqDesc = parameters.description()
reqRat = parameters.rationale()
reqFC = parameters.fitCriterion()
self.updateDatabase('call addTemplateRequirement(:id,:name,:asset,:type,:desc,:rat,:fc)',{'id':reqId,'name':reqName,'asset':reqAsset,'type':reqType,'desc':reqDesc,'rat':reqRat,'fc':reqFC},'MySQL error adding template requirement')
return reqId
def updateTemplateRequirement(self,parameters):
reqId = parameters.id()
reqName = parameters.name()
reqAsset = parameters.asset()
reqType = parameters.type()
reqDesc = parameters.description()
reqRat = parameters.rationale()
reqFC = parameters.fitCriterion()
self.updateDatabase('call updateTemplateRequirement(:id,:name,:asset,:type,:desc,:rat,:fc)',{'id':reqId,'name':reqName,'asset':reqAsset,'type':reqType,'desc':reqDesc,'rat':reqRat,'fc':reqFC},'MySQL error updating template requirement')
def getTemplateRequirements(self,constraintId = -1):
rows = self.responseList('call getTemplateRequirements(:const)',{'const':constraintId},'MySQL error getting template requirements')
templateReqs = {}
for reqId,reqName,assetName,reqType,reqDesc,reqRat,reqFC in rows:
parameters = TemplateRequirementParameters(reqName,assetName,reqType,reqDesc,reqRat,reqFC)
templateReq = ObjectFactory.build(reqId,parameters)
templateReqs[reqName] = templateReq
return templateReqs
def deleteTemplateRequirement(self,reqId):
self.deleteObject(reqId,'template_requirement')
def componentViewRequirements(self,cvName): return self.responseList('call componentViewRequirements(:cv)',{'cv':cvName},'MySQL error getting component view requirements')
def componentViewGoals(self,cvName): return self.responseList('call componentViewGoals(:cv)',{'cv':cvName},'MySQL error getting component view goals')
def situateComponentViewRequirements(self,cvName):
self.updateDatabase('call situateComponentViewRequirements(:cv)',{'cv':cvName},'MySQL error situating component view requirements')
def getComponents(self,constraintId = -1):
componentRows = self.responseList('call getAllComponents(:const)',{'const':constraintId},'MySQL error getting components')
components = {}
for componentId,componentName,componentDesc in componentRows:
componentInterfaces = self.componentInterfaces(componentId)
componentStructure = self.componentStructure(componentId)
componentReqs = self.componentRequirements(componentId)
componentGoals = self.componentGoals(componentId)
assocs = self.componentGoalAssociations(componentId)
comParameters = ComponentParameters(componentName,componentDesc,componentInterfaces,componentStructure,componentReqs,componentGoals,assocs)
comParameters.setId(componentId)
component = ObjectFactory.build(componentId,comParameters)
components[componentName] = component
return components
def personasImpact(self,cvName,envName):
rows = self.responseList('call personasImpact(:cv,:env)',{'cv':cvName,'env':envName},'MySQL error getting personas impact')
pImpact = []
for c1,c2 in rows: pImpact.append((c1,str(c2)))
return pImpact
def taskUseCases(self,taskName): return self.responseList('call taskUseCases(:task)',{'task':taskName},'MySQL error getting task use cases')
def usecaseComponents(self,ucName): return self.responseList('call usecaseComponents(:useCase)',{'useCase':ucName},'MySQL error getting use case components')
def attackSurfaceMetric(self,cvName):
return self.responseList('call | |
be configured.
Only packets addressed to these ports will be forwarded to the backends
configured with this forwarding rule.
You can only use one of ports and portRange, or allPorts.
The three are mutually exclusive.
You may specify a maximum of up to 5 ports, which can be non-contiguous.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] region: A reference to the region where the regional forwarding rule resides.
This field is not applicable to global forwarding rules.
:param pulumi.Input[str] service_label: An optional prefix to the service name for this Forwarding Rule.
If specified, will be the first label of the fully qualified service
name.
The label must be 1-63 characters long, and comply with RFC1035.
Specifically, the label must be 1-63 characters long and match the
regular expression `a-z?` which means the first
character must be a lowercase letter, and all following characters
must be a dash, lowercase letter, or digit, except the last
character, which cannot be a dash.
This field is only used for INTERNAL load balancing.
:param pulumi.Input[str] subnetwork: The subnetwork that the load balanced IP should belong to for this
Forwarding Rule. This field is only used for INTERNAL load balancing.
If the network specified is in auto subnet mode, this field is
optional. However, if the network is in custom subnet mode, a
subnetwork must be specified.
:param pulumi.Input[str] target: The URL of the target resource to receive the matched traffic.
The target must live in the same region as the forwarding rule.
The forwarded traffic must be of a type appropriate to the target
object.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[ForwardingRuleArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A ForwardingRule resource. A ForwardingRule resource specifies which pool
of target virtual machines to forward a packet to if it matches the given
[IPAddress, IPProtocol, portRange] tuple.
To get more information about ForwardingRule, see:
* [API documentation](https://cloud.google.com/compute/docs/reference/v1/forwardingRules)
* How-to Guides
* [Official Documentation](https://cloud.google.com/compute/docs/load-balancing/network/forwarding-rules)
## Example Usage
### Internal Http Lb With Mig Backend
```python
import pulumi
import pulumi_gcp as gcp
# Internal HTTP load balancer with a managed instance group backend
# VPC network
ilb_network = gcp.compute.Network("ilbNetwork", auto_create_subnetworks=False,
opts=pulumi.ResourceOptions(provider=google_beta))
# proxy-only subnet
proxy_subnet = gcp.compute.Subnetwork("proxySubnet",
ip_cidr_range="10.0.0.0/24",
region="europe-west1",
purpose="INTERNAL_HTTPS_LOAD_BALANCER",
role="ACTIVE",
network=ilb_network.id,
opts=pulumi.ResourceOptions(provider=google_beta))
# backend subnet
ilb_subnet = gcp.compute.Subnetwork("ilbSubnet",
ip_cidr_range="10.0.1.0/24",
region="europe-west1",
network=ilb_network.id,
opts=pulumi.ResourceOptions(provider=google_beta))
# health check
default_region_health_check = gcp.compute.RegionHealthCheck("defaultRegionHealthCheck",
region="europe-west1",
http_health_check=gcp.compute.RegionHealthCheckHttpHealthCheckArgs(
port_specification="USE_SERVING_PORT",
),
opts=pulumi.ResourceOptions(provider=google_beta))
# instance template
instance_template = gcp.compute.InstanceTemplate("instanceTemplate",
machine_type="e2-small",
tags=["http-server"],
network_interfaces=[gcp.compute.InstanceTemplateNetworkInterfaceArgs(
network=ilb_network.id,
subnetwork=ilb_subnet.id,
access_configs=[gcp.compute.InstanceTemplateNetworkInterfaceAccessConfigArgs()],
)],
disks=[gcp.compute.InstanceTemplateDiskArgs(
source_image="debian-cloud/debian-10",
auto_delete=True,
boot=True,
)],
metadata={
"startup-script": \"\"\"#! /bin/bash
set -euo pipefail
export DEBIAN_FRONTEND=noninteractive
apt-get update
apt-get install -y nginx-light jq
NAME=$(curl -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/hostname")
IP=$(curl -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip")
METADATA=$(curl -f -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/attributes/?recursive=True" | jq 'del(.["startup-script"])')
cat <<EOF > /var/www/html/index.html
<pre>
Name: $NAME
IP: $IP
Metadata: $METADATA
</pre>
EOF
\"\"\",
},
opts=pulumi.ResourceOptions(provider=google_beta))
# MIG
mig = gcp.compute.RegionInstanceGroupManager("mig",
region="europe-west1",
versions=[gcp.compute.RegionInstanceGroupManagerVersionArgs(
instance_template=instance_template.id,
name="primary",
)],
base_instance_name="vm",
target_size=2,
opts=pulumi.ResourceOptions(provider=google_beta))
# backend service
default_region_backend_service = gcp.compute.RegionBackendService("defaultRegionBackendService",
region="europe-west1",
protocol="HTTP",
load_balancing_scheme="INTERNAL_MANAGED",
timeout_sec=10,
health_checks=[default_region_health_check.id],
backends=[gcp.compute.RegionBackendServiceBackendArgs(
group=mig.instance_group,
balancing_mode="UTILIZATION",
capacity_scaler=1,
)],
opts=pulumi.ResourceOptions(provider=google_beta))
# URL map
default_region_url_map = gcp.compute.RegionUrlMap("defaultRegionUrlMap",
region="europe-west1",
default_service=default_region_backend_service.id,
opts=pulumi.ResourceOptions(provider=google_beta))
# HTTP target proxy
default_region_target_http_proxy = gcp.compute.RegionTargetHttpProxy("defaultRegionTargetHttpProxy",
region="europe-west1",
url_map=default_region_url_map.id,
opts=pulumi.ResourceOptions(provider=google_beta))
# forwarding rule
google_compute_forwarding_rule = gcp.compute.ForwardingRule("googleComputeForwardingRule",
region="europe-west1",
ip_protocol="TCP",
load_balancing_scheme="INTERNAL_MANAGED",
port_range="80",
target=default_region_target_http_proxy.id,
network=ilb_network.id,
subnetwork=ilb_subnet.id,
network_tier="PREMIUM",
opts=pulumi.ResourceOptions(provider=google_beta,
depends_on=[proxy_subnet]))
# allow all access from IAP and health check ranges
fw_iap = gcp.compute.Firewall("fw-iap",
direction="INGRESS",
network=ilb_network.id,
source_ranges=[
"172.16.58.3/22",
"172.16.31.10/16",
"192.168.3.11/20",
],
allows=[gcp.compute.FirewallAllowArgs(
protocol="tcp",
)],
opts=pulumi.ResourceOptions(provider=google_beta))
# allow http from proxy subnet to backends
fw_ilb_to_backends = gcp.compute.Firewall("fw-ilb-to-backends",
direction="INGRESS",
network=ilb_network.id,
source_ranges=["10.0.0.0/24"],
target_tags=["http-server"],
allows=[gcp.compute.FirewallAllowArgs(
protocol="tcp",
ports=[
"80",
"443",
"8080",
],
)],
opts=pulumi.ResourceOptions(provider=google_beta))
# test instance
vm_test = gcp.compute.Instance("vm-test",
zone="europe-west1-b",
machine_type="e2-small",
network_interfaces=[gcp.compute.InstanceNetworkInterfaceArgs(
network=ilb_network.id,
subnetwork=ilb_subnet.id,
)],
boot_disk=gcp.compute.InstanceBootDiskArgs(
initialize_params=gcp.compute.InstanceBootDiskInitializeParamsArgs(
image="debian-cloud/debian-10",
),
),
opts=pulumi.ResourceOptions(provider=google_beta))
```
### Internal Tcp Udp Lb With Mig Backend
```python
import pulumi
import pulumi_gcp as gcp
# Internal TCP/UDP load balancer with a managed instance group backend
# VPC
ilb_network = gcp.compute.Network("ilbNetwork", auto_create_subnetworks=False,
opts=pulumi.ResourceOptions(provider=google_beta))
# backed subnet
ilb_subnet = gcp.compute.Subnetwork("ilbSubnet",
ip_cidr_range="10.0.1.0/24",
region="europe-west1",
network=ilb_network.id,
opts=pulumi.ResourceOptions(provider=google_beta))
# health check
default_region_health_check = gcp.compute.RegionHealthCheck("defaultRegionHealthCheck",
region="europe-west1",
http_health_check=gcp.compute.RegionHealthCheckHttpHealthCheckArgs(
port=80,
),
opts=pulumi.ResourceOptions(provider=google_beta))
# instance template
instance_template = gcp.compute.InstanceTemplate("instanceTemplate",
machine_type="e2-small",
tags=[
"allow-ssh",
"allow-health-check",
],
network_interfaces=[gcp.compute.InstanceTemplateNetworkInterfaceArgs(
network=ilb_network.id,
subnetwork=ilb_subnet.id,
access_configs=[gcp.compute.InstanceTemplateNetworkInterfaceAccessConfigArgs()],
)],
disks=[gcp.compute.InstanceTemplateDiskArgs(
source_image="debian-cloud/debian-10",
auto_delete=True,
boot=True,
)],
metadata={
"startup-script": \"\"\"#! /bin/bash
set -euo pipefail
export DEBIAN_FRONTEND=noninteractive
apt-get update
apt-get install -y nginx-light jq
NAME=$(curl -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/hostname")
IP=$(curl -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip")
METADATA=$(curl -f -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/attributes/?recursive=True" | jq 'del(.["startup-script"])')
cat <<EOF > /var/www/html/index.html
<pre>
Name: $NAME
IP: $IP
Metadata: $METADATA
</pre>
EOF
\"\"\",
},
opts=pulumi.ResourceOptions(provider=google_beta))
# MIG
mig = gcp.compute.RegionInstanceGroupManager("mig",
region="europe-west1",
versions=[gcp.compute.RegionInstanceGroupManagerVersionArgs(
instance_template=instance_template.id,
name="primary",
)],
base_instance_name="vm",
target_size=2,
opts=pulumi.ResourceOptions(provider=google_beta))
# backend service
default_region_backend_service = gcp.compute.RegionBackendService("defaultRegionBackendService",
region="europe-west1",
protocol="TCP",
load_balancing_scheme="INTERNAL",
health_checks=[default_region_health_check.id],
backends=[gcp.compute.RegionBackendServiceBackendArgs(
group=mig.instance_group,
balancing_mode="CONNECTION",
)],
opts=pulumi.ResourceOptions(provider=google_beta))
# forwarding rule
google_compute_forwarding_rule = gcp.compute.ForwardingRule("googleComputeForwardingRule",
backend_service=default_region_backend_service.id,
region="europe-west1",
ip_protocol="TCP",
load_balancing_scheme="INTERNAL",
all_ports=True,
allow_global_access=True,
network=ilb_network.id,
subnetwork=ilb_subnet.id,
opts=pulumi.ResourceOptions(provider=google_beta))
# allow all access from health check ranges
fw_hc = gcp.compute.Firewall("fwHc",
direction="INGRESS",
network=ilb_network.id,
source_ranges=[
"172.16.58.3/22",
"172.16.31.10/16",
"192.168.3.11/20",
],
allows=[gcp.compute.FirewallAllowArgs(
protocol="tcp",
)],
source_tags=["allow-health-check"],
opts=pulumi.ResourceOptions(provider=google_beta))
# allow communication within the subnet
fw_ilb_to_backends = gcp.compute.Firewall("fwIlbToBackends",
direction="INGRESS",
network=ilb_network.id,
source_ranges=["10.0.1.0/24"],
allows=[
gcp.compute.FirewallAllowArgs(
protocol="tcp",
),
gcp.compute.FirewallAllowArgs(
protocol="udp",
),
gcp.compute.FirewallAllowArgs(
protocol="icmp",
),
],
opts=pulumi.ResourceOptions(provider=google_beta))
# allow SSH
fw_ilb_ssh = gcp.compute.Firewall("fwIlbSsh",
direction="INGRESS",
network=ilb_network.id,
allows=[gcp.compute.FirewallAllowArgs(
protocol="tcp",
ports=["22"],
)],
source_tags=["allow-ssh"],
opts=pulumi.ResourceOptions(provider=google_beta))
# test instance
vm_test = gcp.compute.Instance("vmTest",
zone="europe-west1-b",
machine_type="e2-small",
network_interfaces=[gcp.compute.InstanceNetworkInterfaceArgs(
network=ilb_network.id,
subnetwork=ilb_subnet.id,
)],
boot_disk=gcp.compute.InstanceBootDiskArgs(
initialize_params=gcp.compute.InstanceBootDiskInitializeParamsArgs(
image="debian-cloud/debian-10",
),
),
opts=pulumi.ResourceOptions(provider=google_beta))
```
### Forwarding Rule Externallb
```python
import pulumi
import pulumi_gcp as gcp
hc = gcp.compute.RegionHealthCheck("hc",
check_interval_sec=1,
timeout_sec=1,
region="us-central1",
tcp_health_check=gcp.compute.RegionHealthCheckTcpHealthCheckArgs(
port=80,
),
opts=pulumi.ResourceOptions(provider=google_beta))
backend = gcp.compute.RegionBackendService("backend",
region="us-central1",
load_balancing_scheme="EXTERNAL",
health_checks=[hc.id],
opts=pulumi.ResourceOptions(provider=google_beta))
# Forwarding rule for External Network Load Balancing using Backend Services
default = gcp.compute.ForwardingRule("default",
region="us-central1",
port_range="80",
backend_service=backend.id,
opts=pulumi.ResourceOptions(provider=google_beta))
```
### Forwarding Rule Global Internallb
```python
import pulumi
import pulumi_gcp as gcp
hc = gcp.compute.HealthCheck("hc",
check_interval_sec=1,
timeout_sec=1,
tcp_health_check=gcp.compute.HealthCheckTcpHealthCheckArgs(
port=80,
))
backend = gcp.compute.RegionBackendService("backend",
region="us-central1",
health_checks=[hc.id])
default_network = gcp.compute.Network("defaultNetwork", auto_create_subnetworks=False)
default_subnetwork = gcp.compute.Subnetwork("defaultSubnetwork",
ip_cidr_range="10.0.0.0/16",
region="us-central1",
network=default_network.id)
# Forwarding rule for Internal Load Balancing
default_forwarding_rule = gcp.compute.ForwardingRule("defaultForwardingRule",
region="us-central1",
load_balancing_scheme="INTERNAL",
backend_service=backend.id,
all_ports=True,
allow_global_access=True,
network=default_network.name,
subnetwork=default_subnetwork.name)
```
### Forwarding Rule Basic
```python
import pulumi
import pulumi_gcp as gcp
default_target_pool = gcp.compute.TargetPool("defaultTargetPool")
default_forwarding_rule = gcp.compute.ForwardingRule("defaultForwardingRule",
target=default_target_pool.id,
port_range="80")
```
### Forwarding Rule L3 Default
```python
import pulumi
import pulumi_gcp as gcp
health_check = gcp.compute.RegionHealthCheck("healthCheck",
region="us-central1",
tcp_health_check=gcp.compute.RegionHealthCheckTcpHealthCheckArgs(
port=80,
),
opts=pulumi.ResourceOptions(provider=google_beta))
service = gcp.compute.RegionBackendService("service",
region="us-central1",
health_checks=[health_check.id],
protocol="UNSPECIFIED",
load_balancing_scheme="EXTERNAL",
opts=pulumi.ResourceOptions(provider=google_beta))
fwd_rule = gcp.compute.ForwardingRule("fwdRule",
backend_service=service.id,
ip_protocol="L3_DEFAULT",
all_ports=True,
opts=pulumi.ResourceOptions(provider=google_beta))
```
### Forwarding Rule Internallb
```python
import pulumi
import pulumi_gcp as gcp
hc = gcp.compute.HealthCheck("hc",
check_interval_sec=1,
timeout_sec=1,
tcp_health_check=gcp.compute.HealthCheckTcpHealthCheckArgs(
port=80,
))
backend = gcp.compute.RegionBackendService("backend",
region="us-central1",
health_checks=[hc.id])
default_network = gcp.compute.Network("defaultNetwork", auto_create_subnetworks=False)
default_subnetwork = gcp.compute.Subnetwork("defaultSubnetwork",
ip_cidr_range="10.0.0.0/16",
region="us-central1",
network=default_network.id)
# Forwarding rule for Internal Load Balancing
default_forwarding_rule = gcp.compute.ForwardingRule("defaultForwardingRule",
region="us-central1",
load_balancing_scheme="INTERNAL",
backend_service=backend.id,
all_ports=True,
network=default_network.name,
subnetwork=default_subnetwork.name)
```
### Forwarding Rule Http Lb
```python
import pulumi
import pulumi_gcp as gcp
debian_image = gcp.compute.get_image(family="debian-9",
project="debian-cloud")
default_network = gcp.compute.Network("defaultNetwork",
auto_create_subnetworks=False,
routing_mode="REGIONAL",
opts=pulumi.ResourceOptions(provider=google_beta))
default_subnetwork = gcp.compute.Subnetwork("defaultSubnetwork",
ip_cidr_range="10.1.2.0/24",
region="us-central1",
network=default_network.id,
opts=pulumi.ResourceOptions(provider=google_beta))
instance_template = gcp.compute.InstanceTemplate("instanceTemplate",
machine_type="e2-medium",
network_interfaces=[gcp.compute.InstanceTemplateNetworkInterfaceArgs(
network=default_network.id,
subnetwork=default_subnetwork.id,
)],
disks=[gcp.compute.InstanceTemplateDiskArgs(
source_image=debian_image.self_link,
auto_delete=True,
boot=True,
)],
tags=[
"allow-ssh",
"load-balanced-backend",
],
opts=pulumi.ResourceOptions(provider=google_beta))
rigm = gcp.compute.RegionInstanceGroupManager("rigm",
region="us-central1",
versions=[gcp.compute.RegionInstanceGroupManagerVersionArgs(
instance_template=instance_template.id,
name="primary",
)],
base_instance_name="internal-glb",
target_size=1,
opts=pulumi.ResourceOptions(provider=google_beta))
fw1 = gcp.compute.Firewall("fw1",
network=default_network.id,
source_ranges=["10.1.2.0/24"],
allows=[
gcp.compute.FirewallAllowArgs(
protocol="tcp",
),
gcp.compute.FirewallAllowArgs(
protocol="udp",
),
gcp.compute.FirewallAllowArgs(
protocol="icmp",
),
],
direction="INGRESS",
opts=pulumi.ResourceOptions(provider=google_beta))
fw2 = gcp.compute.Firewall("fw2",
network=default_network.id,
source_ranges=["0.0.0.0/0"],
allows=[gcp.compute.FirewallAllowArgs(
protocol="tcp",
ports=["22"],
)],
target_tags=["allow-ssh"],
direction="INGRESS",
opts=pulumi.ResourceOptions(provider=google_beta,
depends_on=[fw1]))
fw3 = gcp.compute.Firewall("fw3",
network=default_network.id,
source_ranges=[
"172.16.58.3/22",
"172.16.31.10/16",
],
allows=[gcp.compute.FirewallAllowArgs(
protocol="tcp",
)],
target_tags=["load-balanced-backend"],
direction="INGRESS",
opts=pulumi.ResourceOptions(provider=google_beta,
depends_on=[fw2]))
fw4 = gcp.compute.Firewall("fw4",
network=default_network.id,
source_ranges=["10.129.0.0/26"],
target_tags=["load-balanced-backend"],
allows=[
gcp.compute.FirewallAllowArgs(
protocol="tcp",
ports=["80"],
),
gcp.compute.FirewallAllowArgs(
protocol="tcp",
ports=["443"],
),
gcp.compute.FirewallAllowArgs(
protocol="tcp",
ports=["8000"],
),
],
direction="INGRESS",
opts=pulumi.ResourceOptions(provider=google_beta,
depends_on=[fw3]))
default_region_health_check = gcp.compute.RegionHealthCheck("defaultRegionHealthCheck",
region="us-central1",
http_health_check=gcp.compute.RegionHealthCheckHttpHealthCheckArgs(
port_specification="USE_SERVING_PORT",
),
opts=pulumi.ResourceOptions(provider=google_beta,
depends_on=[fw4]))
default_region_backend_service = gcp.compute.RegionBackendService("defaultRegionBackendService",
load_balancing_scheme="INTERNAL_MANAGED",
backends=[gcp.compute.RegionBackendServiceBackendArgs(
group=rigm.instance_group,
balancing_mode="UTILIZATION",
capacity_scaler=1,
)],
region="us-central1",
protocol="HTTP",
timeout_sec=10,
health_checks=[default_region_health_check.id],
opts=pulumi.ResourceOptions(provider=google_beta))
default_region_url_map = gcp.compute.RegionUrlMap("defaultRegionUrlMap",
region="us-central1",
default_service=default_region_backend_service.id,
opts=pulumi.ResourceOptions(provider=google_beta))
default_region_target_http_proxy = gcp.compute.RegionTargetHttpProxy("defaultRegionTargetHttpProxy",
region="us-central1",
url_map=default_region_url_map.id,
opts=pulumi.ResourceOptions(provider=google_beta))
proxy = gcp.compute.Subnetwork("proxy",
ip_cidr_range="10.129.0.0/26",
region="us-central1",
network=default_network.id,
purpose="INTERNAL_HTTPS_LOAD_BALANCER",
role="ACTIVE",
opts=pulumi.ResourceOptions(provider=google_beta))
# Forwarding rule for Internal Load Balancing
default_forwarding_rule = gcp.compute.ForwardingRule("defaultForwardingRule",
region="us-central1",
ip_protocol="TCP",
load_balancing_scheme="INTERNAL_MANAGED",
port_range="80",
target=default_region_target_http_proxy.id,
network=default_network.id,
subnetwork=default_subnetwork.id,
network_tier="PREMIUM",
opts=pulumi.ResourceOptions(provider=google_beta,
depends_on=[proxy]))
```
### Forwarding Rule Regional Http Xlb
```python
import pulumi
import pulumi_gcp as gcp
debian_image = gcp.compute.get_image(family="debian-9",
project="debian-cloud")
default_network = gcp.compute.Network("defaultNetwork",
auto_create_subnetworks=False,
routing_mode="REGIONAL",
opts=pulumi.ResourceOptions(provider=google_beta))
| |
<filename>Cartwheel/lib/Python26/Lib/site-packages/wx-2.8-msw-unicode/wx/tools/Editra/src/ed_pages.py
###############################################################################
# Name: ed_pages.py #
# Purpose: The main editor notebook #
# Author: <NAME> <<EMAIL>> #
# Copyright: (c) 2008 <NAME> <<EMAIL>> #
# License: wxWindows License #
###############################################################################
"""
This class implements Editra's main notebook control.
@summary: Editra's main notebook class
"""
__author__ = "<NAME> <<EMAIL>>"
__svnid__ = "$Id: ed_pages.py 60523 2009-05-05 18:49:31Z CJP $"
__revision__ = "$Revision: 60523 $"
#--------------------------------------------------------------------------#
# Dependancies
import os
import glob
import cPickle
import wx
# Editra Libraries
import ed_glob
from profiler import Profile_Get
import ed_editv
import syntax.synglob as synglob
import syntax.syntax as syntax
import ed_search
import util
import ed_msg
import ed_txt
import ed_mdlg
import ebmlib
import eclib
from extern import flatnotebook as FNB
#--------------------------------------------------------------------------#
# Globals
_ = wx.GetTranslation
#--------------------------------------------------------------------------#
class EdPages(FNB.FlatNotebook):
"""Editras editor buffer botebook
@todo: allow for tab styles to be configurable (maybe)
"""
def __init__(self, parent, id_num):
"""Initialize a notebook with a blank text control in it
@param parent: parent window of the notebook
@param id_num: this notebooks id
"""
FNB.FlatNotebook.__init__(self, parent, id_num,
style=FNB.FNB_FF2 |
FNB.FNB_X_ON_TAB |
FNB.FNB_SMART_TABS |
FNB.FNB_DROPDOWN_TABS_LIST |
FNB.FNB_ALLOW_FOREIGN_DND
)
# Notebook attributes
self.LOG = wx.GetApp().GetLog()
self.DocMgr = ed_editv.EdEditorView.DOCMGR
self._searchctrl = ed_search.SearchController(self, self.GetCurrentCtrl)
self._searchctrl.SetLookinChoices(Profile_Get('SEARCH_LOC',
default=list()))
self._searchctrl.SetFileFilters(Profile_Get('SEARCH_FILTER', default=''))
self.pg_num = -1 # Track new pages (aka untitled docs)
self.control = None
self.frame = self.GetTopLevelParent() # MainWindow
self._index = dict() # image list index
self._ses_load = False
self._menu = None
# Set Additional Style Parameters
self.SetNonActiveTabTextColour(wx.Colour(102, 102, 102))
ed_icon = ed_glob.CONFIG['SYSPIX_DIR'] + u"editra.png"
self.SetNavigatorIcon(wx.Bitmap(ed_icon, wx.BITMAP_TYPE_PNG))
# Setup the ImageList and the default image
imgl = wx.ImageList(16, 16)
txtbmp = wx.ArtProvider.GetBitmap(str(synglob.ID_LANG_TXT), wx.ART_MENU)
self._index[synglob.ID_LANG_TXT] = imgl.Add(txtbmp)
robmp = wx.ArtProvider.GetBitmap(str(ed_glob.ID_READONLY), wx.ART_MENU)
self._index[ed_glob.ID_READONLY] = imgl.Add(robmp)
self.SetImageList(imgl)
# Notebook Events
self.Bind(FNB.EVT_FLATNOTEBOOK_PAGE_CHANGING, self.OnPageChanging)
self.Bind(FNB.EVT_FLATNOTEBOOK_PAGE_CHANGED, self.OnPageChanged)
self.Bind(FNB.EVT_FLATNOTEBOOK_PAGE_CLOSING, self.OnPageClosing)
self.Bind(FNB.EVT_FLATNOTEBOOK_PAGE_CLOSED, self.OnPageClosed)
self.Bind(wx.stc.EVT_STC_CHANGE, self.OnUpdatePageText)
self._pages.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self._pages.Bind(wx.EVT_LEFT_DCLICK, self.OnLeftDClick)
self._pages.Bind(wx.EVT_MIDDLE_UP, self.OnMClick)
self.Bind(FNB.EVT_FLATNOTEBOOK_PAGE_CONTEXT_MENU, self.OnTabMenu)
self.Bind(wx.EVT_MENU, self.OnMenu)
self.Bind(wx.EVT_IDLE, self.OnIdle)
# Message handlers
ed_msg.Subscribe(self.OnThemeChanged, ed_msg.EDMSG_THEME_CHANGED)
ed_msg.Subscribe(self.OnThemeChanged, ed_msg.EDMSG_THEME_NOTEBOOK)
ed_msg.RegisterCallback(self.OnDocPointerRequest, ed_msg.EDREQ_DOCPOINTER)
# Add a blank page
self.NewPage()
#---- End Init ----#
def __del__(self):
ed_msg.Unsubscribe(self.OnThemeChanged)
ed_msg.UnRegisterCallback(self.OnDocPointerRequest)
#---- Function Definitions ----#
def _HandleEncodingError(self, control):
"""Handle trying to reload the file the file with a different encoding
Until it suceeds or gives up.
@param control: stc
@return: bool
"""
# Loop while the load fails prompting to try a new encoding
tried = None
fname = control.GetFileName().strip(os.sep)
fname = fname.split(os.sep)[-1]
while True:
doc = control.GetDocument()
doc.ClearLastError()
if tried is None:
enc = doc.GetEncoding()
if enc is None:
enc = ed_txt.DEFAULT_ENCODING
else:
enc = tried
msg = _("The correct encoding of '%s' could not be determined.\n\n"
"Choose an encoding and select Ok to open the file with the chosen encoding.\n"
"Click Cancel to abort opening the file") % fname
# On some systems it seems that default encoding ends up being
# None so default to utf-8 for choices.
if enc is None:
enc = 'utf_8'
dlg = eclib.EncodingDialog(self, msg=msg,
title=_("Choose an Encoding"),
default=enc)
bmp = wx.ArtProvider.GetBitmap(str(ed_glob.ID_DOCPROP),
wx.ART_OTHER)
if bmp.IsOk():
dlg.SetBitmap(bmp)
dlg.CenterOnParent()
result = dlg.ShowModal()
enc = dlg.GetEncoding()
dlg.Destroy()
# Don't want to open it in another encoding
if result == wx.ID_CANCEL:
return False
else:
control.SetEncoding(enc)
tried = enc
ok = control.LoadFile(control.GetFileName())
if ok:
return True
else:
# Artifically add a short pause, because if its not there
# the dialog will be shown again so fast it wont seem
# like reloading the file was even tried.
wx.Sleep(1)
def _NeedOpen(self, path):
"""Check if a file needs to be opened. If the file is already open in
the notebook a dialog will be opened to ask if the user wants to reopen
the file again. If the file is not open and exists or the user chooses
to reopen the file again the function will return True else it will
return False.
@param path: file to check for
@return: bool
"""
result = wx.ID_YES
if self.HasFileOpen(path):
mdlg = wx.MessageDialog(self,
_("File is already open in an existing "
"page.\nDo you wish to open it again?"),
_("Open File") + u"?",
wx.YES_NO | wx.NO_DEFAULT | \
wx.ICON_INFORMATION)
result = mdlg.ShowModal()
mdlg.Destroy()
if result == wx.ID_NO:
self.GotoPage(path)
elif os.path.exists(path) and not os.path.isfile(path):
result = wx.ID_NO
else:
pass
return result == wx.ID_YES
def AddPage(self, page, text=u'', select=True, imgId=-1):
"""Add a page to the notebook"""
if not len(text):
self.pg_num += 1
text = _("Untitled - %d") % self.pg_num
page.SetTabLabel(text)
super(EdPages, self).AddPage(page, text, select, imgId)
sel = self.GetSelection()
self.EnsureVisible(sel)
self.UpdateIndexes()
def DocDuplicated(self, path):
"""Check for if the given path is open elswhere and duplicate the
docpointer.
@param path: string
"""
doc = ed_msg.RequestResult(ed_msg.EDREQ_DOCPOINTER, [self, path])
if hasattr(doc, 'GetDocPointer'):
self.OpenDocPointer(doc.GetDocPointer(), doc.GetDocument())
return True
else:
return False
def GetCurrentCtrl(self):
"""Returns the control of the currently selected
page in the notebook.
@return: window object contained in current page or None
"""
if hasattr(self, 'control'):
return self.control
else:
return None
def GetFileNames(self):
"""Gets the name of all open files in the notebook
@return: list of file names
"""
rlist = list()
for buff in self.GetTextControls():
fname = buff.GetFileName()
if fname != wx.EmptyString:
rlist.append(fname)
return rlist
def GetFindDialog(self):
"""Get the active find dialog or None if one is not active
@return: FindDialog or None
"""
return self._searchctrl.GetDialog()
def GetMenuHandlers(self):
"""Get the (id, evt_handler) tuples that this window should
handle.
@return: list of tuples
"""
rlist = [(ed_glob.ID_FIND, self._searchctrl.OnShowFindDlg),
(ed_glob.ID_FIND_REPLACE, self._searchctrl.OnShowFindDlg),
(ed_glob.ID_FIND_NEXT, self._searchctrl.OnFind),
(ed_glob.ID_FIND_PREVIOUS, self._searchctrl.OnFind),
(ed_glob.ID_FIND_SELECTED, self._searchctrl.OnFindSelected)]
return rlist
def GetUiHandlers(self):
"""Get the update ui handlers that this window supplies
@return: list of tuples
"""
return [(ed_glob.ID_FIND_NEXT, self._searchctrl.OnUpdateFindUI),
(ed_glob.ID_FIND_PREVIOUS, self._searchctrl.OnUpdateFindUI)]
def InsertPage(self, index, page, text, select=True, imageId=-1):
"""Insert a page into the notebook"""
super(EdPages, self).InsertPage(index, page, text, select, imageId)
self.UpdateIndexes()
def SaveSessionFile(self, session):
"""Save the current open files to the given session file
@param session: path to session file
@return: tuple (error desc, error msg) or None
"""
try:
f_handle = open(session, 'wb')
except (IOError, OSError), msg:
return (_("Error Loading Session File"), unicode(msg))
try:
sdata = dict(win1=self.GetFileNames())
cPickle.dump(sdata, f_handle)
finally:
f_handle.close()
return None
def LoadSessionFile(self, session):
"""Load files from saved session data in profile
@param session: session filename
@return: tuple (error desc, error msg), or None if no error
"""
self._ses_load = True
if os.path.exists(session):
try:
f_handle = open(session)
except IOError:
f_handle = None
else:
f_handle = None
# Invalid file
if f_handle is None:
return (_("Invalid File"), _("Session file doesn't exist."))
# Load and validate file
try:
try:
flist = cPickle.load(f_handle)
# TODO: Extend in future to support loading sessions
# for mutiple windows.
flist = flist.get('win1', list())
for item in flist:
if type(item) not in (unicode, str):
raise TypeError('Invalid item in unpickled sequence')
except (cPickle.UnpicklingError, TypeError), e:
dlg.Destroy()
return (_('Invalid file'),
_('Selected file is not a valid session file'))
finally:
f_handle.close()
if not len(flist):
return (_("Empty File"), _("Session file is empty."))
# Close current files
self.CloseAllPages()
missingfns = []
for loadfn in flist:
if os.path.exists(loadfn) and os.access(loadfn, os.R_OK):
self.OpenPage(os.path.dirname(loadfn),
os.path.basename(loadfn))
# Give feedback as files are loaded
self.Update()
else:
missingfns.append(loadfn)
if missingfns:
rmsg = (_("Missing session files"),
_("Some files in saved session could not be found on disk:\n")+
u'\n'.join(missingfns))
return rmsg
self._ses_load = False
if self.GetPageCount() == 0:
self.NewPage()
return None
def NewPage(self):
"""Create a new notebook page with a blank text control
@postcondition: a new page with an untitled document is opened
"""
self.Freeze()
self.control = ed_editv.EdEditorView(self, wx.ID_ANY)
self.control.SetEncoding(Profile_Get('ENCODING'))
self.LOG("[ed_pages][evt] New Page Created ID: %d" % self.control.GetId())
self.AddPage(self.control)
self.SetPageImage(self.GetSelection(), str(self.control.GetLangId()))
# Set the control up the the preferred default lexer
dlexer = Profile_Get('DEFAULT_LEX', 'str', 'Plain Text')
ext_reg = syntax.ExtensionRegister()
ext_lst = ext_reg.get(dlexer, ['txt', ])
self.control.FindLexer(ext_lst[0])
# Set the modified callback notifier
doc = self.control.GetDocument()
doc.AddModifiedCallback(self.control.FireModified)
self.Thaw()
def OnMenu(self, evt):
"""Handle context menu events
@param evt: wx.MenuEvent
"""
ctab = self.GetCurrentPage()
if ctab is not None:
ctab.OnTabMenu(evt)
else:
evt.Skip()
def OnDocPointerRequest(self, args):
"""Get a buffer that has the same file open as the requested path.
@param args: [sender, path]
@return: EdEditorView reference or ed_msg.NullValue
"""
sender, path = args
if sender != self:
for buf in self.GetTextControls():
if buf.GetFileName() == path:
return buf
return ed_msg.NullValue()
def OnLeftDClick(self, evt):
"""Handle left double clicks and open new tab when in empty area.
@param evt: wx.EVT_LEFT_DCLICK
"""
where, tabIdx = self._pages.HitTest(evt.GetPosition())
if where == FNB.FNB_NOWHERE:
self.NewPage()
elif where == FNB.FNB_TAB:
# Maximize Editor
self.GetTopLevelParent().OnMaximizeEditor(None)
else:
evt.Skip()
| |
import frappe
from frappe import _
@frappe.whitelist()
def create_verify(doc, method):
applicant = frappe.get_doc("Applicant", doc.applicant_id)
# frappe.errprint(doc)
if doc.name1 == "Employment Check1":
emp1_id = frappe.db.get_value("Verify Employment Check1", {
"applicant_id": doc.applicant_id})
if emp1_id:
verify_emp1 = frappe.get_doc("Verify Employment Check1", emp1_id)
else:
verify_emp1 = frappe.new_doc("Verify Employment Check1")
verify_emp1.update({
"applicant_id": doc.applicant_id,
"customer": doc.customer,
"checks_group": doc.checks_group,
"applicant_name": doc.applicant_name,
"employment_check1_id": doc.name,
"status": "Allocation Pending",
"client_tat": doc.tat,
"emp_code": applicant.client_employee_code
})
verify_emp1.save(ignore_permissions=True)
if doc.name1 == "Employment Check2":
emp2_id = frappe.db.get_value("Verify Employment Check2", {
"applicant_id": doc.applicant_id})
if emp2_id:
verify_emp2 = frappe.get_doc("Verify Employment Check2", emp2_id)
else:
verify_emp2 = frappe.new_doc("Verify Employment Check2")
verify_emp2.update({
"applicant_id": doc.applicant_id,
"customer": doc.customer,
"checks_group": doc.checks_group,
"applicant_name": doc.applicant_name,
"employment_check2_id": doc.name,
"status": "Allocation Pending",
"client_tat": doc.tat,
"emp_code": applicant.client_employee_code
})
verify_emp2.save(ignore_permissions=True)
if doc.name1 == "Employment Check3":
emp3_id = frappe.db.get_value("Verify Employment Check3", {
"applicant_id": doc.applicant_id})
if emp3_id:
verify_emp3 = frappe.get_doc("Verify Employment Check3", emp3_id)
else:
verify_emp3 = frappe.new_doc("Verify Employment Check3")
verify_emp3.update({
"employment_check3_id": doc.name,
"applicant_id": doc.applicant_id,
"customer": doc.customer,
"checks_group": doc.checks_group,
"applicant_name": doc.applicant_name,
"status": "Allocation Pending",
"client_tat": doc.tat,
"emp_code": applicant.client_employee_code
})
verify_emp3.save(ignore_permissions=True)
if doc.name1 == "Employment Check4":
emp4_id = frappe.db.get_value("Verify Employment Check4", {
"applicant_id": doc.applicant_id})
if emp4_id:
verify_emp4 = frappe.get_doc("Verify Employment Check4", emp4_id)
else:
verify_emp4 = frappe.new_doc("Verify Employment Check4")
verify_emp4.update({
"employment_check4_id": doc.name,
"applicant_id": doc.applicant_id,
"customer": doc.customer,
"checks_group": doc.checks_group,
"applicant_name": doc.applicant_name,
"status": "Allocation Pending",
"client_tat": doc.tat,
"emp_code": applicant.client_employee_code
})
verify_emp4.save(ignore_permissions=True)
if doc.name1 == "Education Check1":
edu1_id = frappe.db.get_value("Verify Education Check1", {
"applicant_id": doc.applicant_id})
if edu1_id:
verify_edu1 = frappe.get_doc("Verify Education Check1", edu1_id)
else:
verify_edu1 = frappe.new_doc("Verify Education Check1")
verify_edu1.update({
"education_check1_id": doc.name,
"applicant_id": doc.applicant_id,
"customer": doc.customer,
"checks_group": doc.checks_group,
"applicant_name": doc.applicant_name,
"status": "Allocation Pending",
"client_tat": doc.tat,
"emp_code": applicant.client_employee_code
})
verify_edu1.save(ignore_permissions=True)
if doc.name1 == "Education Check2":
edu2_id = frappe.db.get_value("Verify Education Check2", {
"applicant_id": doc.applicant_id})
if edu2_id:
verify_edu2 = frappe.get_doc("Verify Education Check2", edu2_id)
else:
verify_edu2 = frappe.new_doc("Verify Education Check2")
frappe.errprint(verify_edu2)
verify_edu2.update({
"education_check2_id": doc.name,
"applicant_id": doc.applicant_id,
"customer": doc.customer,
"checks_group": doc.checks_group,
"applicant_name": doc.applicant_name,
"status": "Allocation Pending",
"client_tat": doc.tat,
"emp_code": applicant.client_employee_code
})
verify_edu2.save(ignore_permissions=True)
if doc.name1 == "Education Check3":
edu3_id = frappe.db.get_value("Verify Education Check3", {
"applicant_id": doc.applicant_id})
if edu3_id:
verify_edu3 = frappe.get_doc("Verify Education Check3", edu3_id)
else:
verify_edu3 = frappe.new_doc("Verify Education Check3")
verify_edu3.update({
"education_check3_id": doc.name,
"applicant_id": doc.applicant_id,
"customer": doc.customer,
"checks_group": doc.checks_group,
"applicant_name": doc.applicant_name,
"status": "Allocation Pending",
"client_tat": doc.tat,
"emp_code": applicant.client_employee_code
})
verify_edu3.save(ignore_permissions=True)
if doc.name1 == "Education Check4":
edu4_id = frappe.db.get_value("Verify Education Check4", {
"applicant_id": doc.applicant_id})
if edu4_id:
verify_edu4 = frappe.get_doc("Verify Education Check4", edu4_id)
else:
verify_edu4 = frappe.new_doc("Verify Education Check4")
verify_edu4.update({
"education_check4_id": doc.name,
"applicant_id": doc.applicant_id,
"customer": doc.customer,
"checks_group": doc.checks_group,
"applicant_name": doc.applicant_name,
"status": "Allocation Pending",
"client_tat": doc.tat,
"emp_code": applicant.client_employee_code
})
verify_edu4.save(ignore_permissions=True)
if doc.name1 == "Reference Check1":
ref1_id = frappe.db.get_value("Verify Reference Check1", {
"applicant_id": doc.applicant_id})
if ref1_id:
verify_ref1 = frappe.get_doc("Verify Reference Check1", ref1_id)
else:
verify_ref1 = frappe.new_doc("Verify Reference Check1")
verify_ref1.update({
"reference_check1_id": doc.name,
"applicant_id": doc.applicant_id,
"customer": doc.customer,
"checks_group": doc.checks_group,
"applicant_name": doc.applicant_name,
"status": "Allocation Pending",
"client_tat": doc.tat,
"emp_code": applicant.client_employee_code
})
verify_ref1.save(ignore_permissions=True)
if doc.name1 == "Reference Check2":
ref2_id = frappe.db.get_value("Verify Reference Check2", {
"applicant_id": doc.applicant_id})
if ref2_id:
verify_ref2 = frappe.get_doc("Verify Reference Check2", ref2_id)
else:
verify_ref2 = frappe.new_doc("Verify Reference Check2")
verify_ref2.update({
"reference_check2_id": doc.name,
"applicant_id": doc.applicant_id,
"customer": doc.customer,
"checks_group": doc.checks_group,
"applicant_name": doc.applicant_name,
"status": "Allocation Pending",
"client_tat": doc.tat,
"emp_code": applicant.client_employee_code
})
verify_ref2.save(ignore_permissions=True)
if doc.name1 == "Reference Check3":
ref3_id = frappe.db.get_value("Verify Reference Check3", {
"applicant_id": doc.applicant_id})
if ref3_id:
verify_ref3 = frappe.get_doc("Verify Reference Check3", ref3_id)
else:
verify_ref3 = frappe.new_doc("Verify Reference Check3")
verify_ref3.update({
"reference_check3_id": doc.name,
"applicant_id": doc.applicant_id,
"customer": doc.customer,
"checks_group": doc.checks_group,
"applicant_name": doc.applicant_name,
"status": "Allocation Pending",
"client_tat": doc.tat,
"emp_code": applicant.client_employee_code
})
verify_ref3.save(ignore_permissions=True)
if doc.name1 == "Reference Check4":
ref4_id = frappe.db.get_value("Verify Reference Check4", {
"applicant_id": doc.applicant_id})
if ref4_id:
verify_ref4 = frappe.get_doc("Verify Reference Check4", ref4_id)
else:
verify_ref4 = frappe.new_doc("Verify Reference Check4")
verify_ref4.update({
"reference_check4_id": doc.name,
"applicant_id": doc.applicant_id,
"customer": doc.customer,
"checks_group": doc.checks_group,
"applicant_name": doc.applicant_name,
"status": "Allocation Pending",
"client_tat": doc.tat,
"emp_code": applicant.client_employee_code
})
verify_ref4.save(ignore_permissions=True)
if doc.name1 == "Address Check1":
add1_id = frappe.db.get_value("Verify Address Check1", {
"applicant_id": doc.applicant_id})
if add1_id:
verify_add1 = frappe.get_doc("Verify Address Check1", add1_id)
else:
verify_add1 = frappe.new_doc("Verify Address Check1")
verify_add1.update({
"address_check1_id": doc.name,
"applicant_id": doc.applicant_id,
"customer": doc.customer,
"checks_group": doc.checks_group,
"applicant_name": doc.applicant_name,
"status": "Allocation Pending",
"client_tat": doc.tat,
"emp_code": applicant.client_employee_code
})
verify_add1.save(ignore_permissions=True)
if doc.name1 == "Address Check2":
add2_id = frappe.db.get_value("Verify Address Check2", {
"applicant_id": doc.applicant_id})
if add2_id:
verify_add2 = frappe.get_doc("Verify Address Check2", add2_id)
else:
verify_add2 = frappe.new_doc("Verify Address Check2")
verify_add2.update({
"applicant_id": doc.applicant_id,
"customer": doc.customer,
"checks_group": doc.checks_group,
"applicant_name": doc.applicant_name,
"address_check2_id": doc.name,
"status": "Allocation Pending",
"client_tat": doc.tat,
"emp_code": applicant.client_employee_code
})
verify_add2.save(ignore_permissions=True)
if doc.name1 == "Address Check3":
add3_id = frappe.db.get_value("Verify Address Check3", {
"applicant_id": doc.applicant_id})
if add3_id:
verify_add3 = frappe.get_doc("Verify Address Check3", add3_id)
else:
verify_add3 = frappe.new_doc("Verify Address Check3")
verify_add3.update({
"address_check3_id": doc.name,
"applicant_id": doc.applicant_id,
"customer": doc.customer,
"checks_group": doc.checks_group,
"applicant_name": doc.applicant_name,
"status": "Allocation Pending",
"client_tat": doc.tat,
"emp_code": applicant.client_employee_code
})
verify_add3.save(ignore_permissions=True)
if doc.name1 == "Address Check4":
add4_id = frappe.db.get_value("Verify Address Check4", {
"applicant_id": doc.applicant_id})
if add4_id:
verify_add4 = frappe.get_doc("Verify Address Check4", add4_id)
else:
verify_add4 = frappe.new_doc("Verify Address Check4")
verify_add4.update({
"address_check4_id": doc.name,
"applicant_id": doc.applicant_id,
"customer": doc.customer,
"checks_group": doc.checks_group,
"applicant_name": doc.applicant_name,
"status": "Allocation Pending",
"client_tat": doc.tat,
"emp_code": applicant.client_employee_code
})
verify_add4.save(ignore_permissions=True)
if doc.name1 == "Family Check1":
fmly1_id = frappe.db.get_value("Verify Family Check1", {
"applicant_id": doc.applicant_id})
if fmly1_id:
verify_fmly1 = frappe.get_doc("Verify Family Check1", fmly1_id)
else:
verify_fmly1 = frappe.new_doc("Verify Family Check1")
verify_fmly1.update({
"family_check1_id": doc.name,
"applicant_id": doc.applicant_id,
"customer": doc.customer,
"checks_group": doc.checks_group,
"applicant_name": doc.applicant_name,
"status": "Allocation Pending",
"client_tat": doc.tat,
"emp_code": applicant.client_employee_code
})
verify_fmly1.save(ignore_permissions=True)
if doc.name1 == "Family Check2":
fmly2_id = frappe.db.get_value("Verify Family Check2", {
"applicant_id": doc.applicant_id})
if fmly2_id:
verify_fmly2 = frappe.get_doc("Verify Family Check2", fmly2_id)
else:
verify_fmly2 = frappe.new_doc("Verify Family Check2")
verify_fmly2.update({
"family_check2_id": doc.name,
"applicant_id": doc.applicant_id,
"customer": doc.customer,
"checks_group": doc.checks_group,
"applicant_name": doc.applicant_name,
"status": "Allocation Pending",
"client_tat": doc.tat,
"emp_code": applicant.client_employee_code
})
verify_fmly2.save(ignore_permissions=True)
if doc.name1 == "Family Check3":
fmly3_id = frappe.db.get_value("Verify Family Check3", {
"applicant_id": doc.applicant_id})
if fmly3_id:
verify_fmly3 = frappe.get_doc("Verify Family Check3", fmly3_id)
else:
verify_fmly3 = frappe.new_doc("Verify Family Check3")
verify_fmly3.update({
"family_check3_id": doc.name,
"applicant_id": doc.applicant_id,
"customer": doc.customer,
"checks_group": doc.checks_group,
"applicant_name": doc.applicant_name,
"status": "Allocation Pending",
"client_tat": doc.tat,
"emp_code": applicant.client_employee_code
})
verify_fmly3.save(ignore_permissions=True)
if doc.name1 == "Family Check4":
fmly4_id = frappe.db.get_value("Verify Family Check4", {
"applicant_id": doc.applicant_id})
if fmly4_id:
verify_fmly4 = frappe.get_doc("Verify Family Check4", fmly4_id)
else:
verify_fmly4 = frappe.new_doc("Verify Family Check4")
verify_fmly4.update({
"family_check4_id": doc.name,
"applicant_id": doc.applicant_id,
"customer": doc.customer,
"checks_group": doc.checks_group,
"applicant_name": doc.applicant_name,
"status": "Allocation Pending",
"client_tat": doc.tat,
"emp_code": applicant.client_employee_code
})
verify_fmly4.save(ignore_permissions=True)
if doc.name1 == "Civil Check":
# frappe.errprint("hi")
cvl_id = frappe.db.get_value("Verify Civil Check", {
"applicant_id": doc.applicant_id})
if cvl_id:
verify_cvl = frappe.get_doc("Verify Civil Check", cvl_id)
else:
verify_cvl = frappe.new_doc("Verify Civil Check")
verify_cvl.update({
"civil_check_id": doc.name,
"applicant_id": doc.applicant_id,
"customer": doc.customer,
"checks_group": doc.checks_group,
"applicant_name": doc.applicant_name,
"status": "Allocation Pending",
"client_tat": doc.tat,
"emp_code": applicant.client_employee_code
})
verify_cvl.save(ignore_permissions=True)
if doc.name1 == "Criminal Check":
cmv_id = frappe.db.get_value("Verify Criminal Check", {
"applicant_id": doc.applicant_id})
if cmv_id:
verify_cmv = frappe.get_doc("Verify Criminal Check", cmv_id)
else:
verify_cmv = frappe.new_doc("Verify Criminal Check")
verify_cmv.update({
"criminal_check_id": doc.name,
"applicant_id": doc.applicant_id,
"customer": doc.customer,
"checks_group": doc.checks_group,
"applicant_name": doc.applicant_name,
"status": "Allocation Pending",
"client_tat": doc.tat,
"emp_code": applicant.client_employee_code
})
verify_cmv.save(ignore_permissions=True)
if doc.name1 == "ID Check1":
pv_id = frappe.db.get_value(
"Verify ID Check1", {"applicant_id": doc.applicant_id})
if pv_id:
verify_pv = frappe.get_doc("Verify ID Check1", pv_id)
else:
verify_pv = frappe.new_doc("Verify ID Check1")
verify_pv.update({
"id_check": doc.id_check,
"id_check_id": doc.name,
"applicant_id": doc.applicant_id,
"customer": doc.customer,
"checks_group": doc.checks_group,
"applicant_name": doc.applicant_name,
"status": "Allocation Pending",
"client_tat": doc.tat,
"emp_code": applicant.client_employee_code
})
verify_pv.save(ignore_permissions=True)
if doc.name1 == "ID Check2":
acv_id = frappe.db.get_value(
"Verify ID Check2", {"applicant_id": doc.applicant_id})
if acv_id:
verify_acv = frappe.get_doc("Verify ID Check2", acv_id)
else:
verify_acv = frappe.new_doc("Verify ID Check2")
verify_acv.update({
"id_check": doc.id_check,
"id_check_id": doc.name,
"applicant_id": doc.applicant_id,
"customer": doc.customer,
"checks_group": doc.checks_group,
"applicant_name": doc.applicant_name,
"status": "Allocation Pending",
"client_tat": doc.tat,
"emp_code": applicant.client_employee_code
})
verify_acv.save(ignore_permissions=True)
if doc.name1 == "ID Check3":
dlv_id = frappe.db.get_value(
"Verify ID Check3", {"applicant_id": doc.applicant_id})
if dlv_id:
verify_dlv = frappe.get_doc("Verify ID Check3", dlv_id)
else:
verify_dlv = frappe.new_doc("Verify ID Check3")
verify_dlv.update({
"id_check": doc.id_check,
"id_check_id": doc.name,
"applicant_id": doc.applicant_id,
"customer": doc.customer,
"checks_group": doc.checks_group,
"applicant_name": doc.applicant_name,
"status": "Allocation Pending",
"client_tat": doc.tat,
"emp_code": applicant.client_employee_code
})
verify_dlv.save(ignore_permissions=True)
if doc.name1 == "ID Check4":
pass_id = frappe.db.get_value(
"Verify ID Check4", {"applicant_id": doc.applicant_id})
if pass_id:
verify_pass = frappe.get_doc("Verify ID Check4", pass_id)
else:
verify_pass = frappe.new_doc("Verify ID Check4")
verify_pass.update({
"id_check": doc.id_check,
"id_check_id": doc.name,
"applicant_id": doc.applicant_id,
"customer": doc.customer,
"checks_group": doc.checks_group,
"applicant_name": doc.applicant_name,
"status": "Allocation Pending",
"client_tat": doc.tat,
"emp_code": applicant.client_employee_code
})
verify_pass.save(ignore_permissions=True)
if doc.name1 == "ID Check5":
rcv_id = frappe.db.get_value(
"Verify ID Check5", {"applicant_id": doc.applicant_id})
if rcv_id:
verify_rcv = frappe.get_doc("Verify ID Check5", rcv_id)
else:
verify_rcv = frappe.new_doc("Verify ID Check5")
verify_rcv.update({
"id_check": doc.id_check,
"id_check_id": doc.name,
"applicant_id": doc.applicant_id,
| |
import math
import numpy as np
import torch
from torch import optim
from torch import nn
import torch.utils.data
from torch.nn import (
BatchNorm1d,
Dropout,
LeakyReLU,
Linear,
Module,
ReLU,
Sequential,
Sigmoid,
)
from torch.autograd import Variable
import warnings
from .data_sampler import DataSampler
from ctgan.synthesizers import CTGANSynthesizer
from snsynth.preprocessors.data_transformer import BaseTransformer
from .privacy_utils import weights_init, pate, moments_acc
class Discriminator(Module):
def __init__(self, input_dim, discriminator_dim, loss, pac=10):
super(Discriminator, self).__init__()
torch.cuda.manual_seed(0)
torch.manual_seed(0)
dim = input_dim * pac
# print ('now dim is {}'.format(dim))
self.pac = pac
self.pacdim = dim
seq = []
for item in list(discriminator_dim):
seq += [Linear(dim, item), LeakyReLU(0.2), Dropout(0.5)]
dim = item
seq += [Linear(dim, 1)]
if loss == "cross_entropy":
seq += [Sigmoid()]
self.seq = Sequential(*seq)
def dragan_penalty(self, real_data, device="cpu", pac=10, lambda_=10):
# real_data = torch.from_numpy(real_data).to(device)
alpha = (
torch.rand(real_data.shape[0], 1, device=device)
.squeeze()
.expand(real_data.shape[0])
)
delta = torch.normal(
mean=0.0, std=float(pac), size=real_data.shape, device=device
) # 0.5 * real_data.std() * torch.rand(real_data.shape)
x_hat = Variable(
(alpha * real_data.T + (1 - alpha) * (real_data + delta).T).T,
requires_grad=True,
)
pred_hat = self(x_hat.float())
gradients = torch.autograd.grad(
outputs=pred_hat,
inputs=x_hat,
grad_outputs=torch.ones(pred_hat.size(), device=device),
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
dragan_penalty = lambda_ * ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return dragan_penalty
def forward(self, input):
assert input.size()[0] % self.pac == 0
return self.seq(input.view(-1, self.pacdim))
class Residual(Module):
def __init__(self, i, o):
super(Residual, self).__init__()
self.fc = Linear(i, o)
self.bn = BatchNorm1d(o)
self.relu = ReLU()
def forward(self, input):
out = self.fc(input)
out = self.bn(out)
out = self.relu(out)
return torch.cat([out, input], dim=1)
class Generator(Module):
def __init__(self, embedding_dim, generator_dim, data_dim):
super(Generator, self).__init__()
dim = embedding_dim
seq = []
for item in list(generator_dim):
seq += [Residual(dim, item)]
dim += item
seq.append(Linear(dim, data_dim))
self.seq = Sequential(*seq)
def forward(self, input):
data = self.seq(input)
return data
class PATECTGAN(CTGANSynthesizer):
def __init__(
self,
embedding_dim=128,
generator_dim=(256, 256),
discriminator_dim=(256, 256),
generator_lr=2e-4,
generator_decay=1e-6,
discriminator_lr=2e-4,
discriminator_decay=1e-6,
batch_size=500,
discriminator_steps=1,
log_frequency=False,
verbose=False,
epochs=300,
pac=1,
cuda=True,
epsilon=1,
binary=False,
regularization=None,
loss="cross_entropy",
teacher_iters=5,
student_iters=5,
sample_per_teacher=1000,
delta=None,
noise_multiplier=1e-3,
preprocessor_eps=1,
moments_order=100,
category_epsilon_pct=0.1,
):
assert batch_size % 2 == 0
self._embedding_dim = embedding_dim
self._generator_dim = generator_dim
self._discriminator_dim = discriminator_dim
self._generator_lr = generator_lr
self._generator_decay = generator_decay
self._discriminator_lr = discriminator_lr
self._discriminator_decay = discriminator_decay
self._batch_size = batch_size
self._discriminator_steps = discriminator_steps
self._log_frequency = log_frequency
self._verbose = verbose
self._epochs = epochs
self.pac = pac
self.preprocessor_eps = preprocessor_eps
self.epsilon = epsilon - preprocessor_eps
self._category_epsilon_pct = category_epsilon_pct
self.verbose = verbose
self.loss = loss
# PATE params
self.regularization = regularization if self.loss != "wasserstein" else "dragan"
self.teacher_iters = teacher_iters
self.student_iters = student_iters
self.pd_cols = None
self.pd_index = None
self.binary = binary
self.sample_per_teacher = sample_per_teacher
self.noise_multiplier = noise_multiplier
self.moments_order = moments_order
self.delta = delta
if not cuda or not torch.cuda.is_available():
device = "cpu"
elif isinstance(cuda, str):
device = cuda
else:
device = "cuda"
self._device = torch.device(device)
if self._log_frequency:
warnings.warn(
"log_frequency is selected. This may result in oversampling frequent "
"categories, which could cause privacy leaks."
)
def train(
self,
data,
categorical_columns=None,
ordinal_columns=None,
update_epsilon=None,
transformer=BaseTransformer,
continuous_columns_lower_upper=None,
):
if update_epsilon:
self.epsilon = update_epsilon - self.preprocessor_eps
for col in categorical_columns:
if str(data[col].dtype).startswith("float"):
raise ValueError(
"It looks like you are passing in a vector of continuous values"
f"to a categorical column at [{col}]."
"Please discretize and pass in categorical columns with"
"unsigned integer or string category names."
)
sample_per_teacher = (
self.sample_per_teacher if self.sample_per_teacher < len(data) else 1000
)
self.num_teachers = int(len(data) / sample_per_teacher) + 1
self._transformer = transformer(self.preprocessor_eps)
self._transformer.fit(
data,
discrete_columns=categorical_columns,
continuous_columns_lower_upper=continuous_columns_lower_upper,
)
train_data = self._transformer.transform(data)
data_partitions = np.array_split(train_data, self.num_teachers)
data_dim = self._transformer.output_dimensions
sampler_eps = 0.0
if categorical_columns and self._category_epsilon_pct:
sampler_eps = self.epsilon * self._category_epsilon_pct
per_col_sampler_eps = sampler_eps / len(categorical_columns)
self.epsilon = self.epsilon - sampler_eps
else:
per_col_sampler_eps = None
self.cond_generator = DataSampler(
train_data,
self._transformer.output_info_list,
self._log_frequency,
per_column_epsilon=per_col_sampler_eps,
)
spent = self.cond_generator.total_spent
if spent > sampler_eps and not np.isclose(spent, sampler_eps):
raise AssertionError(
f"The data sampler used {spent} epsilon and was budgeted for {sampler_eps}"
)
# create conditional generator for each teacher model
# Note: Previously, there existed a ConditionalGenerator object in CTGAN
# - that functionality has been subsumed by DataSampler, but switch is
# essentially 1 for 1
# don't need to count eps for each teacher, because these are disjoint partitions
cached_probs = self.cond_generator.discrete_column_category_prob
cond_generator = [
DataSampler(
d,
self._transformer.output_info_list,
self._log_frequency,
per_column_epsilon=None,
discrete_column_category_prob=cached_probs,
)
for d in data_partitions
]
self._generator = Generator(
self._embedding_dim + self.cond_generator.dim_cond_vec(),
self._generator_dim,
data_dim,
).to(self._device)
discriminator = Discriminator(
data_dim + self.cond_generator.dim_cond_vec(),
self._discriminator_dim,
self.loss,
self.pac,
).to(self._device)
student_disc = discriminator
student_disc.apply(weights_init)
teacher_disc = [discriminator for i in range(self.num_teachers)]
for i in range(self.num_teachers):
teacher_disc[i].apply(weights_init)
optimizerG = optim.Adam(
self._generator.parameters(),
lr=self._generator_lr,
betas=(0.5, 0.9),
weight_decay=self._generator_decay,
)
optimizer_s = optim.Adam(student_disc.parameters(), lr=2e-4, betas=(0.5, 0.9))
optimizer_t = [
optim.Adam(
teacher_disc[i].parameters(),
lr=self._discriminator_lr,
betas=(0.5, 0.9),
weight_decay=self._discriminator_decay,
)
for i in range(self.num_teachers)
]
noise_multiplier = self.noise_multiplier
alphas = torch.tensor(
[0.0 for i in range(self.moments_order)], device=self._device
)
l_list = 1 + torch.tensor(range(self.moments_order), device=self._device)
eps = torch.zeros(1)
mean = torch.zeros(self._batch_size, self._embedding_dim, device=self._device)
std = mean + 1
real_label = 1
fake_label = 0
criterion = nn.BCELoss() if (self.loss == "cross_entropy") else self.w_loss
if self.verbose:
print(
"using loss {} and regularization {}".format(
self.loss, self.regularization
)
)
iteration = 0
if self.delta is None:
self.delta = 1 / (train_data.shape[0] * np.sqrt(train_data.shape[0]))
while eps.item() < self.epsilon:
iteration += 1
eps = min((alphas - math.log(self.delta)) / l_list)
if eps.item() > self.epsilon:
if iteration == 1:
raise ValueError(
"Inputted epsilon parameter is too small to"
+ " create a private dataset. Try increasing epsilon and rerunning."
)
break
# train teacher discriminators
for t_2 in range(self.teacher_iters):
for i in range(self.num_teachers):
partition_data = data_partitions[i]
data_sampler = DataSampler(
partition_data,
self._transformer.output_info_list,
self._log_frequency,
per_column_epsilon=None,
discrete_column_category_prob=cached_probs,
)
fakez = torch.normal(mean, std=std).to(self._device)
condvec = cond_generator[i].sample_condvec(self._batch_size)
if condvec is None:
c1, m1, col, opt = None, None, None, None
real = data_sampler.sample_data(self._batch_size, col, opt)
else:
c1, m1, col, opt = condvec
c1 = torch.from_numpy(c1).to(self._device)
m1 = torch.from_numpy(m1).to(self._device)
fakez = torch.cat([fakez, c1], dim=1)
perm = np.arange(self._batch_size)
np.random.shuffle(perm)
real = data_sampler.sample_data(
self._batch_size, col[perm], opt[perm]
)
c2 = c1[perm]
fake = self._generator(fakez)
fakeact = self._apply_activate(fake)
real = torch.from_numpy(real.astype("float32")).to(self._device)
if c1 is not None:
fake_cat = torch.cat([fakeact, c1], dim=1)
real_cat = torch.cat([real, c2], dim=1)
else:
real_cat = real
fake_cat = fake
optimizer_t[i].zero_grad()
y_all = torch.cat(
[teacher_disc[i](fake_cat), teacher_disc[i](real_cat)]
)
label_fake = torch.full(
(int(self._batch_size / self.pac), 1),
fake_label,
dtype=torch.float,
device=self._device,
)
label_true = torch.full(
(int(self._batch_size / self.pac), 1),
real_label,
dtype=torch.float,
device=self._device,
)
labels = torch.cat([label_fake, label_true])
error_d = criterion(y_all.squeeze(), labels.squeeze())
error_d.backward()
if self.regularization == "dragan":
pen = teacher_disc[i].dragan_penalty(
real_cat, device=self._device
)
pen.backward(retain_graph=True)
optimizer_t[i].step()
###
# train student discriminator
for t_3 in range(self.student_iters):
data_sampler = DataSampler(
train_data,
self._transformer.output_info_list,
self._log_frequency,
per_column_epsilon=None,
discrete_column_category_prob=cached_probs,
)
fakez = torch.normal(mean=mean, std=std)
condvec = self.cond_generator.sample_condvec(self._batch_size)
if condvec is None:
c1, m1, col, opt = None, None, None, None
real = data_sampler.sample_data(self._batch_size, col, opt)
else:
c1, m1, col, opt = condvec
c1 = torch.from_numpy(c1).to(self._device)
m1 = torch.from_numpy(m1).to(self._device)
fakez = torch.cat([fakez, c1], dim=1)
perm = np.arange(self._batch_size)
np.random.shuffle(perm)
real = data_sampler.sample_data(
self._batch_size, col[perm], opt[perm]
)
c2 = c1[perm]
fake = self._generator(fakez)
fakeact = self._apply_activate(fake)
if c1 is not None:
fake_cat = torch.cat([fakeact, c1], dim=1)
else:
fake_cat = fakeact
fake_data = fake_cat
###
predictions, votes = pate(
fake_data, teacher_disc, noise_multiplier, device=self._device
)
output = student_disc(fake_data.detach())
# update moments accountant
alphas = alphas + moments_acc(
self.num_teachers,
votes,
noise_multiplier,
l_list,
device=self._device,
)
loss_s = criterion(
output.squeeze(), predictions.float().to(self._device).squeeze()
)
optimizer_s.zero_grad()
loss_s.backward()
if self.regularization == "dragan":
vals = torch.cat([predictions, fake_data], axis=1)
ordered = vals[vals[:, 0].sort()[1]]
data_list = torch.split(
ordered, predictions.shape[0] - int(predictions.sum().item())
)
synth_cat = torch.cat(data_list[1:], axis=0)[:, 1:]
pen = student_disc.dragan_penalty(synth_cat, device=self._device)
pen.backward(retain_graph=True)
optimizer_s.step()
# print ('iterator {i}, student discriminator loss is {j}'.format(i=t_3, j=loss_s))
# train generator
fakez = torch.normal(mean=mean, std=std)
condvec = self.cond_generator.sample_condvec(self._batch_size)
if condvec is None:
c1, m1, col, opt = None, None, None, None
else:
c1, m1, col, opt = condvec
c1 = torch.from_numpy(c1).to(self._device)
m1 = torch.from_numpy(m1).to(self._device)
fakez = torch.cat([fakez, c1], dim=1)
fake = self._generator(fakez)
fakeact = self._apply_activate(fake)
if c1 is not None:
y_fake = student_disc(torch.cat([fakeact, c1], dim=1))
else:
y_fake = student_disc(fakeact)
if condvec is None:
cross_entropy = 0
else:
cross_entropy = self._cond_loss(fake, c1, m1)
if self.loss == "cross_entropy":
label_g = torch.full(
(int(self._batch_size / self.pac), 1),
real_label,
dtype=torch.float,
device=self._device,
)
loss_g = criterion(y_fake.squeeze(), label_g.float().squeeze())
loss_g = loss_g + cross_entropy
else:
loss_g = -torch.mean(y_fake) + cross_entropy
optimizerG.zero_grad()
| |
<reponame>tellor-io/algorandTellorV2
from time import time
import pytest
from algosdk import constants
from algosdk import encoding
from algosdk.algod import AlgodClient
from algosdk.error import AlgodHTTPError
from algosdk.logic import get_application_address
from conftest import App
from src.scripts.scripts import Scripts
from src.utils.accounts import Accounts
from src.utils.util import getAppGlobalState
def test_2_feeds(scripts: Scripts, deployed_contract: App, client: AlgodClient):
"""
Medianizer -- ensure that medianizer functions
with less than 5 feeds
"""
value = 3500
timestamp = int(time() - 3599)
median = (3500 + 3550) / 2
for i in range(2):
scripts.feed_app_id = deployed_contract.feed_ids[i]
feed_id = scripts.feed_app_id
scripts.feed_app_address = get_application_address(feed_id)
scripts.stake()
query_id = "1"
scripts.report(query_id, value, timestamp)
value += 50
timestamp += 10
state = getAppGlobalState(client, deployed_contract.medianizer_id)
state = getAppGlobalState(client, deployed_contract.medianizer_id)
assert state[b"median"] == median
assert state[b"median_timestamp"] == pytest.approx(time(), 200)
def test_accuracy_bytes_slicing(scripts: Scripts, deployed_contract: App, client: AlgodClient):
"""
After a report is submitted, the `last_value` global var
should contain an accurate value and timestamp
"""
scripts.feed_app_id = deployed_contract.feed_ids[0]
feed_id = scripts.feed_app_id
scripts.feed_app_address = get_application_address(feed_id)
scripts.stake()
query_id = b"1"
value = 40000
timestamp = int(time() - 500)
scripts.report(query_id, value, timestamp)
state = getAppGlobalState(client, feed_id)
last_value_and_timestamp = state[b"last_value"]
assert len(last_value_and_timestamp) == 16
on_chain_timestamp = last_value_and_timestamp[:8]
on_chain_value = last_value_and_timestamp[8:]
assert int.from_bytes(on_chain_value, "big") == value
assert int.from_bytes(on_chain_timestamp, "big") == timestamp
def test_early_withdraw_attempt(scripts: Scripts, deployed_contract: App, client: AlgodClient):
"""
Shouldn't be able to withdraw stake from contract
before the 1 day interval
"""
scripts.feed_app_id = deployed_contract.feed_ids[0]
feed_id = scripts.feed_app_id
scripts.feed_app_address = get_application_address(feed_id)
scripts.stake()
state = getAppGlobalState(client, feed_id)
assert state[b"staking_status"] == 1
scripts.request_withdraw()
res = scripts.withdraw_dry(timestamp=int(time()) + 86000) # 1 day minus 400 seconds
assert res["txns"][0]["app-call-messages"][1] == "REJECT"
def test_median_computation(scripts: Scripts, deployed_contract: App, client: AlgodClient):
"""
Medianizer -- deploy 5 feeds, submit to 5 feeds,
ensure median from contract matches median calculated from APIs
"""
value = 3500
timestamp = int(time() - 500)
median_time = timestamp + 20
median = 3600
for i in deployed_contract.feed_ids:
scripts.feed_app_id = i
feed_id = scripts.feed_app_id
scripts.feed_app_address = get_application_address(feed_id)
scripts.stake()
query_id = "1"
scripts.report(query_id, value, timestamp)
value += 50
timestamp += 10
state = getAppGlobalState(client, deployed_contract.medianizer_id)
state = getAppGlobalState(client, deployed_contract.medianizer_id)
assert state[b"median"] == median
assert state[b"median_timestamp"] == median_time
def test_median_update(scripts: Scripts, deployed_contract: App, client: AlgodClient):
"""
If the median is updated, the timestamp of median is the
timestamp of the API call
"""
value = 3500
timestamp = int(time() - 500)
timestamps = []
for i in range(3):
scripts.feed_app_id = deployed_contract.feed_ids[i]
feed_id = scripts.feed_app_id
scripts.feed_app_address = get_application_address(feed_id)
scripts.stake()
query_id = "1"
scripts.report(query_id, value, timestamp)
timestamps.append(timestamp)
value += 50
timestamp += 10
state = getAppGlobalState(client, deployed_contract.medianizer_id)
state = getAppGlobalState(client, deployed_contract.medianizer_id)
assert state[b"median_timestamp"] == pytest.approx(timestamps[1], 200)
def test_not_staked_report_attempt(scripts: Scripts, deployed_contract: App, client: AlgodClient):
"""
Accounts should not be permitted to report
if they have not send a stake to the contract
"""
state = getAppGlobalState(client, deployed_contract.feed_ids[0])
assert state[b"staking_status"] == 0
assert state[b"reporter_address"] == b""
query_id = "1"
value = 3500
timestamp = int(time())
with pytest.raises(AlgodHTTPError):
scripts.report(query_id, value, timestamp) # expect failure/reversion
def test_old_timestamp(scripts: Scripts, deployed_contract: App):
"""
Timestamp older than an hour should be rejected
"""
scripts.feed_app_id = deployed_contract.feed_ids[0]
feed_id = scripts.feed_app_id
scripts.feed_app_address = get_application_address(feed_id)
scripts.stake()
query_id = "1"
value = 3500
timestamp = int(time() - 3610)
with pytest.raises(AlgodHTTPError):
scripts.report(query_id, value, timestamp)
def test_only_one_staker(scripts: Scripts, accounts: Accounts, deployed_contract: App, client: AlgodClient):
"""
An account can't replace another account as the reporter
in other words, a second account
can't stake if another account is staked
"""
scripts.feed_app_id = deployed_contract.feed_ids[0]
feed_id = scripts.feed_app_id
scripts.feed_app_address = get_application_address(feed_id)
scripts.stake()
state = getAppGlobalState(client, feed_id)
assert state[b"reporter_address"] == encoding.decode_address(accounts.reporter.getAddress())
scripts.reporter = accounts.bad_actor
with pytest.raises(AlgodHTTPError):
scripts.stake()
def test_overflow_in_create(scripts: Scripts, accounts: Accounts):
"""
Contract deployment should revert if
bytes inputs are longer than 128 bytes
"""
too_long_query_id = "a" * 129
query_data = "my query_id is invalid because it is >128 bytes in length"
with pytest.raises(AlgodHTTPError):
scripts.deploy_tellor_flex(
query_id=too_long_query_id,
query_data=query_data,
timestamp_freshness=3600,
multisigaccounts_sk=accounts.multisig_signers_sk,
)
def test_report_after_request_withdraw(scripts: Scripts, deployed_contract: App):
"""
reporter can't report after requesting to withdraw
"""
scripts.feed_app_id = deployed_contract.feed_ids[0]
feed_id = scripts.feed_app_id
scripts.feed_app_address = get_application_address(feed_id)
scripts.stake()
scripts.request_withdraw()
query_id = "1"
value = 3500
timestamp = int(time())
with pytest.raises(AlgodHTTPError):
scripts.report(query_id, value, timestamp)
def test_report_wrong_query_id(scripts: Scripts, deployed_contract: App, client: AlgodClient):
"""
Reporter should not be able to report to the wrong
query_id. the transaction should revert if they pass in
to report() a different query_id
"""
scripts.feed_app_id = deployed_contract.feed_ids[0]
feed_id = scripts.feed_app_id
scripts.feed_app_address = get_application_address(feed_id)
scripts.stake()
state = getAppGlobalState(client, feed_id)
assert state[b"staking_status"] == 1
assert state[b"query_id"] == b"1"
query_id = b"2"
value = 3500
timestamp = int(time() - 1000)
with pytest.raises(AlgodHTTPError):
scripts.report(query_id, value, timestamp)
def test_reporting_after_requesting_withdraw(scripts: Scripts, deployed_contract: App, client: AlgodClient):
"""
Reporter can't report once withdraw requested
"""
scripts.feed_app_id = deployed_contract.feed_ids[0]
feed_id = scripts.feed_app_id
scripts.feed_app_address = get_application_address(feed_id)
scripts.stake()
scripts.request_withdraw()
state = getAppGlobalState(client, feed_id)
assert state[b"staking_status"] == 2
query_id = b"1"
value = b"the data I put on-chain 1234"
timestamp = int(time() - 1000)
with pytest.raises(AlgodHTTPError):
scripts.report(query_id, value, timestamp)
def test_reporting_without_staking(scripts: Scripts, deployed_contract: App, client: AlgodClient):
"""
Can't report if not staked
"""
scripts.feed_app_id = deployed_contract.feed_ids[0]
feed_id = scripts.feed_app_id
scripts.feed_app_address = get_application_address(feed_id)
state = getAppGlobalState(client, feed_id)
assert state[b"staking_status"] == 0
assert state[b"query_id"] == b"1"
query_id = state[b"query_id"]
value = 3500
timestamp = int(time() - 1000)
with pytest.raises(AlgodHTTPError):
scripts.report(query_id, value, timestamp)
def test_reporter_clearing_algo_from_contract(scripts: Scripts, deployed_contract: App, client: AlgodClient):
"""
Reporter shouldn't empty contract of all ALGO on claiming a tip
"""
tip_amt = 300000
# get app id and app address
scripts.feed_app_id = deployed_contract.feed_ids[0]
feed_id = scripts.feed_app_id
scripts.feed_app_address = get_application_address(feed_id)
app_address = scripts.feed_app_address
# get app balance before any staking
app_balance_b4_staking = client.account_info(app_address).get("amount")
# assert that app doesn't have any balance initially
assert app_balance_b4_staking == 0
# add tip to the contract
scripts.tip(tip_amt)
# check app balance after a tip has been added
app_balance_after_tipping = client.account_info(app_address).get("amount")
# assert app balance is same as tip amount after a tip is added
assert app_balance_after_tipping == tip_amt
# reporter adds a stake to the app
scripts.stake()
# get state of the after reporter stakes
state = getAppGlobalState(client, feed_id)
stake_amt = state[b"stake_amount"]
# check app balance after reporter adds stake
app_balance_after_staking = client.account_info(app_address).get("amount")
# app balance should equal the tip amount plus stake amount
assert app_balance_after_staking == tip_amt + stake_amt
query_id = b"1"
value = 3500
timestamp = int(time() - 1000)
# reporter submits value and is tipped instantaneously
scripts.report(query_id, value, timestamp)
# get app balance after reporter submits a value
app_balance_after_report = client.account_info(app_address).get("amount")
# app balance should be reduced by only the tip amount after reporter takes the tip
assert app_balance_after_report == pytest.approx((tip_amt + stake_amt - tip_amt - constants.MIN_TXN_FEE * 3), 400)
#
def test_reporter_double_stake(scripts: Scripts, deployed_contract: App, client: AlgodClient):
"""
An account shouln't be able to stake twice
"""
scripts.feed_app_id = deployed_contract.feed_ids[0]
feed_id = scripts.feed_app_id
scripts.feed_app_address = get_application_address(feed_id)
scripts.stake()
state = getAppGlobalState(client, feed_id)
assert state[b"staking_status"] == 1 # if 1, account is now staked
with pytest.raises(AlgodHTTPError):
scripts.stake()
def test_reporter_tip_receipt(scripts: Scripts, accounts: Accounts, deployed_contract: App, client: AlgodClient):
"""
Reporter receives correct tip amount after multiple consecutive tips
"""
tip_amt = 300000
# get app id and app address
scripts.feed_app_id = deployed_contract.feed_ids[0]
feed_id = scripts.feed_app_id
scripts.feed_app_address = get_application_address(feed_id)
# add tip to the contract multiple times
scripts.tip(tip_amt)
scripts.tip(tip_amt)
scripts.tip(tip_amt)
# reporter adds a stake to the app
scripts.stake()
# get reporter balance before any reporting
reporter_balance_b4_staking = client.account_info(accounts.reporter.getAddress()).get("amount")
query_id = b"1"
value = 3500
timestamp = int(time() - 1000)
# reporter submits value and is tipped instantaneously
scripts.report(query_id, value, timestamp)
# get reporter balance after submiting a value
reporter_balance_after_report = client.account_info(accounts.reporter.getAddress()).get("amount")
# reporter balance should increase by 3 times the tip amount minus 2% fee
tip_amt = (tip_amt * 98) / 100
assert reporter_balance_after_report == pytest.approx(
(reporter_balance_b4_staking + (tip_amt * 3) - constants.MIN_TXN_FEE), 400
)
def test_request_withdraw_without_staking(scripts: Scripts, deployed_contract: App, client: AlgodClient):
"""
Shouldn't be able to request a withdraw without staking
"""
scripts.feed_app_id = deployed_contract.feed_ids[0]
feed_id = scripts.feed_app_id
scripts.feed_app_address = get_application_address(feed_id)
state = getAppGlobalState(client, deployed_contract.feed_ids[0])
assert state[b"staking_status"] == 0
assert state[b"reporter_address"] == b""
with pytest.raises(AlgodHTTPError):
scripts.request_withdraw()
def test_stake_amount(scripts: Scripts, deployed_contract: App, client: AlgodClient):
"""
Reporter should only be able to stake
with the amount set in the contract
"""
scripts.feed_app_id = deployed_contract.feed_ids[0]
feed_id = scripts.feed_app_id
scripts.feed_app_address = get_application_address(feed_id)
state = getAppGlobalState(client, feed_id)
stake_amount = state[b"stake_amount"]
# | |
beam.
d : float
Diameter of an incoming parallel beam.
plot_fan : bool or int
Plot a fan of rays.
axis : matplotlib.Axis
label : str
Label of the plotted line
Keyword Arguments
-----------------
kws passed to plt.plot(**pltkws)
Returns
-------
maplotlib.figure
"""
try:
heights = iter(h)
except TypeError:
heights = [h]
if plot_statics:
ax = self.plot_statics(axis=axis)
else:
ax = self.plot_axis
if not any([a in pltkws for a in ['c', 'col', 'color']]):
cycle_colors = True
n = 1 * plot_fan
for h in heights:
if parallel:
d = d or 1.0
hs = list(linspace(h - d/2, h + d/2, n if n > 1 else 2))
while True:
if cycle_colors:
pltkws['color'] = next(ax._get_lines.prop_cycler)['color']
try:
hin = hs.pop(0)
dist, r1 = trace_ray((hin, 0), self.sequence)
ax.plot(dist, r1[0, :], label=label or f'r = ({hin:1.2f}, 0)', **pltkws)
self.set_max_y(max(abs(r1[0, :])))
except:
# even
break
try:
dist, r2 = trace_ray((hs.pop(-1), 0), self.sequence)
ax.plot(dist, r2[0, :], **pltkws)
self.set_max_y(max(abs(r2[0, :])))
except:
# odd
break
else:
if self.has_aperture():
# get distance and aperture of first aperture
_, d, aperture = get_first_aperture(self.sequence)
a1, a2 = get_angle_lim(h, d, aperture)
else:
try:
_, d = get_lens_pos(self.sequence)[0]
except:
d = inf
a1 = -h/d
a2 = 0
if n > 0:
angles = list(linspace(a1, a2, n))
else:
angles = [a1, a2]
while True:
if cycle_colors:
pltkws['color'] = next(ax._get_lines.prop_cycler)['color']
try:
ang = angles.pop(0)
ray1 = (h, ang)
dist, r1 = trace_ray(ray1, self.sequence)
ax.plot(dist, r1[0, :], label=label or f'r =({h:1.2f}, {ang:1.2f})', **pltkws)
self.set_max_y(max(abs(r1[0, :])))
except:
# even
break
try:
ray2 = (h, angles.pop(-1))
dist, r2 = trace_ray(ray2, self.sequence)
ax.plot(dist, r2[0, :], **pltkws)
self.set_max_y(max(abs(r2[0, :])))
except:
# odd
break
return ax
def plot_ray(self, ray, axis=None, **pltkws):
"""
Plot a ray.
Arguments
---------
ray : tuple(height, angle)
Returns
-------
lines : plotted lines in axis
"""
if not self._statics_drawn:
ax = self.plot_statics(axis=axis)
else:
ax = self.plot_axis
if not any([a in pltkws for a in ['c', 'col', 'color']]):
pltkws['color'] = next(ax._get_lines.prop_cycler)['color']
if not 'label' in pltkws:
label = f'r =({ray[0]:1.2f}, {ray[1]:1.2f})'
else:
label = pltkws.pop('label')
dist, r = trace_ray(ray, self.sequence)
lines = ax.plot(dist, r[0, :], label=label, **pltkws)
self.set_max_y(max(abs(r[0, :])))
return lines
@property
def lens_positions(self):
pos = list(zip(*get_lens_pos(self.sequence)))[1]
return pos
def print_ope_sequence(self):
"""
Return a list of names of the OPEs in the sequence.
"""
n = len(self.sequence)
print('|'.join(f'{idx:^7d}' for idx in range(n)))
print('|'.join(f'{name:^7s}' for name in self.names))
@property
def names(self):
return [ope.name for ope in self.sequence]
@property
def aperture_positions(self):
pos = list(zip(*get_aperture_pos(self.sequence)))[1]
return pos
@property
def aperture_sizes(self):
""" Return the aperture (half) sizes."""
aps = [ope.aperture for ope in self.sequence if ope.has_aperture()]
return aps
def adjust_ylims(self):
"""
Adjusts the y limits of the plot according to the apertures
and rays.
"""
max_y = self.max_y
max_a = get_max_aperture(self.sequence)
y = max([max_y, max_a])
self.plot_axis.set_ylim([-0.9 * y, 0.9 * y])
def get_idx_aperture_stop(self):
"""
Find the optical element that defines the aperture stop of the
system.
Returns the index of the OPE in the sequence.
"""
if get_max_aperture(self.sequence) is None:
print('No apertures found.')
out = None
else:
angle = 1e-10
ctr = 0
while True:
ctr += 1
_, rays = trace_ray((0, angle), self.sequence)
if all(isfinite(rays[0])):
break
else:
angle = angle / 2
if ctr > 100:
raise Exception('Unable to trace ray through sequence.')
ratio = 0.0
for idx, ope in enumerate(self.sequence):
ratio_ = abs(rays[0, idx+1]) / ope.aperture
if ratio_ > ratio:
ratio = ratio_
out = idx
return out
def has_aperture(self):
if any(ope.has_aperture() for ope in self.sequence):
return True
else:
return False
def get_aperture_stop_position(self, verbose=False):
"""
Reduce sequence upto aperture and get distance from lens pos
function.
"""
_, d = get_aperture_pos(self.sequence[:self.get_idx_aperture_stop() + 1])[-1]
if verbose:
print(f'aperture stop position = {d:1.2f}')
return d
def get_aperture_stop_size(self, verbose=False):
"""
Return the (half) aperture stop size.
"""
a = self.sequence[self.get_idx_aperture_stop()].aperture
if verbose:
print(f'aperture half-diameter = {a:1.2f}')
return a
def indicate_aperture_stop(self, axis=None, color='orangered', verbose=False):
"""
Draw the aperture stop in the ray tracing diagram.
"""
if axis:
ax = axis
else:
ax = self.plot_axis
plt_kws = dict(linewidth=2.0, linestyle='-', color=color)
x = self.get_aperture_stop_position(verbose=verbose)
a = self.get_aperture_stop_size(verbose=verbose)
y_max = self._get_y_max(verbose=verbose)
ax.plot([x, x], [a, y_max], **plt_kws)
ax.plot([x, x], [-y_max, -a], **plt_kws)
def calc_entrance_pupil_position(self, verbose=False):
""" sequence of OPEs preceeding the aperture stop """
sequence_prec = self.sequence[:self.get_idx_aperture_stop()]
d_ap = self.get_aperture_stop_position(verbose=verbose)
x = d_ap
mag = 1.0
for idx, lens_pos in get_lens_pos(sequence_prec)[::-1]:
# object distance
d_obj = x - lens_pos
# image distance
d_img = get_image_pos(d_obj, sequence_prec[idx].focal_length)
if verbose > 1:
print(f'imaging lens position = {lens_pos:1.2f}')
print(f'x_before = {x:1.2f}')
print(f'd_obj = {d_obj:1.2f}')
print(f'd_img = {d_img:1.2f}')
x = lens_pos - d_img
mag = mag * d_img / d_obj
if verbose > 1:
print(f'x_after = {x:1.2f}')
return x
def calc_entrance_pupil_size(self, verbose=False):
"""
Return the size of the entrance pupil.
"""
sequence_prec = self.sequence[:self.get_idx_aperture_stop()]
d_ap = self.get_aperture_stop_position(verbose=verbose)
x = d_ap
mag = 1.0
for idx, lens_pos in get_lens_pos(sequence_prec)[::-1]:
# object distance
d_obj = x - lens_pos
# image distance
d_img = get_image_pos(d_obj, sequence_prec[idx].focal_length)
if verbose > 1:
print(f'imaging lens position = {lens_pos:1.2f}')
print(f'magnification = {mag:1.2f}')
print(f'd_obj = {d_obj:1.2f}')
print(f'd_img = {d_img:1.2f}')
x = lens_pos - d_img
mag = mag * d_img / d_obj
if verbose > 1:
print(f'magnification_after = {mag:1.2f}')
en_pupil = self.get_aperture_stop_size() * abs(mag)
return en_pupil
def draw_entrance_pupil(self, axis=None, color='orangered', verbose=False):
"""
Draw the apparent entrance pupil.
"""
if axis:
ax = axis
else:
ax = self.plot_axis
x = self.calc_entrance_pupil_position(verbose=verbose)
a = self.calc_entrance_pupil_size(verbose=verbose)
y_max = self.max_y
plt_kws = dict(linewidth=1.2, linestyle='-', color=color)
ax.plot([x, x], [a, y_max], **plt_kws)
ax.plot([x, x], [-y_max, -a], **plt_kws)
def _get_y_max(self, verbose=False):
"""
Return maximum y_value in plot.
"""
if get_max_aperture(self.sequence):
y_max = 2 * get_max_aperture(self.sequence)
else:
y_max = 2 * self.max_y
if verbose > 1:
print(f'y_max = {y_max:1.2f}')
return y_max
def get_NA(self):
"""
Return the NA of the optical system.
This is the entrance pupil size divided by the entrance pupil
distance.
"""
d = self.calc_entrance_pupil_position()
a = self.calc_entrance_pupil_size()
return a/d
def trace_backward(self, idx, height, no_apertures=False, extra_distance=100.0, plot=True):
"""
Trace a ray backward throught the optical system
starting at the given index.
Returns
-------
(positions, rays1, rays2)
"""
# get only the sequence until the aperture and reverse it
if no_apertures:
seq_back = remove_apertures(self.sequence[:idx][::-1])
else:
seq_back = self.sequence[:idx][::-1]
seq_back.append(OPE(d=extra_distance))
# trace a ray from the given height or the edge of the
# aperture with angle zero toward the next lense
positions, rays1 = trace_ray((height, 0), seq_back)
pos_back = get_pos_at_idx(idx, self.sequence) - positions
if plot:
pltkws = dict(linestyle='--', color='Grey')
self.plot_axis.plot(pos_back, rays1[0], **pltkws)
## trace a ray from the given height or the edge of the
## aperture toward the center of the next lens
# get index and position of the first element in the backward
# sequence
try:
_, d = get_lens_pos(seq_back)[0]
except IndexError:
print('Nothing to track.')
return None
positions, rays2 = trace_ray((height, -height / d), seq_back)
if plot:
self.plot_axis.plot(pos_back, rays2[0], **pltkws)
return (pos_back, rays1, rays2)
def trace_forward(self, idx, height, no_apertures=False, extra_distance=100.0, plot=True):
"""
Trace a ray throught the optical system starting at the given
index.
Returns
-------
(positions, rays1, rays2)
"""
# get the sequence until the aperture and reverse it
if no_apertures:
seq = remove_apertures(self.sequence[idx:])
else:
seq = self.sequence[idx:]
pos_offset = get_pos_at_idx(idx - 1, self.sequence)
seq.append(OPE(d=extra_distance))
# trace a ray from the given height or the edge of the
# aperture with angle zero toward the next lens
positions, rays1 = trace_ray((height, 0), seq)
positions += pos_offset
if plot:
pltkws = dict(linestyle='--', color='Grey')
self.plot_axis.plot(positions, rays1[0], **pltkws)
## trace a ray from the given height or the edge of the
## aperture toward the center of the next lens
# get index and position of the first element in the backward
# sequence
try:
_, d = get_lens_pos(seq)[0]
except IndexError:
print('Nothing to track.')
return None
positions, rays2 = trace_ray((height, -height / d), seq)
positions += pos_offset
if plot:
self.plot_axis.plot(positions, rays2[0], **pltkws)
return (positions, rays1, rays2)
def _check_aperture_stop_rays_backward(self, extra_distance=100.0):
"""
Re-trace the aperture stop.
"""
idx = self.get_idx_aperture_stop()
a | |
instance_uuid, None,
mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual("in-use", vol['status'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id,
volume=volume)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('available', vol['status'])
self.volume.delete_volume(self.context, volume_id, volume=volume)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_detach_invalid_attachment_id(self):
"""Make sure if the attachment id isn't found we raise."""
attachment_id = "notfoundid"
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
multiattach=False,
**self.volume_params)
self.volume.detach_volume(self.context, volume['id'],
attachment_id)
volume = db.volume_get(self.context, volume['id'])
self.assertEqual('available', volume['status'])
instance_uuid = '12345678-1234-5678-1234-567812345678'
attached_host = 'fake_host'
mountpoint = '/dev/fake'
tests_utils.attach_volume(self.context, volume['id'],
instance_uuid, attached_host,
mountpoint)
self.volume.detach_volume(self.context, volume['id'],
attachment_id)
volume = db.volume_get(self.context, volume['id'])
self.assertEqual('in-use', volume['status'])
def test_detach_no_attachments(self):
self.volume_params['status'] = 'detaching'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
multiattach=False,
**self.volume_params)
self.volume.detach_volume(self.context, volume['id'])
volume = db.volume_get(self.context, volume['id'])
self.assertEqual('available', volume['status'])
def test_run_attach_detach_volume_for_instance_no_attachment_id(self):
"""Make sure volume can be attached and detached from instance."""
mountpoint = "/dev/sdf"
# attach volume to the instance then to detach
instance_uuid = '12345678-1234-5678-1234-567812345678'
instance_uuid_2 = '12345678-4321-8765-4321-567812345678'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
multiattach=True,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id, volume=volume)
attachment = self.volume.attach_volume(self.context, volume_id,
instance_uuid, None,
mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
attachment2 = self.volume.attach_volume(self.context, volume_id,
instance_uuid_2, None,
mountpoint, 'ro')
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id,
volume=volume)
self.assertRaises(exception.InvalidVolume,
self.volume.detach_volume,
self.context, volume_id)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('in-use', vol['status'])
self.volume.detach_volume(self.context, volume_id, attachment2['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('available', vol['status'])
attachment = self.volume.attach_volume(self.context, volume_id,
instance_uuid, None,
mountpoint, 'ro')
vol = db.volume_get(self.context, volume_id)
self.assertEqual('in-use', vol['status'])
self.volume.detach_volume(self.context, volume_id)
vol = db.volume_get(self.context, volume_id)
self.assertEqual('available', vol['status'])
self.volume.delete_volume(self.context, volume_id, volume=volume)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_detach_multiattach_volume_for_instances(self):
"""Make sure volume can be attached to multiple instances."""
mountpoint = "/dev/sdf"
# attach volume to the instance then to detach
instance_uuid = '12345678-1234-5678-1234-567812345678'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
multiattach=True,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id, volume=volume)
attachment = self.volume.attach_volume(self.context, volume_id,
instance_uuid, None,
mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
instance2_uuid = '12345678-1234-5678-1234-567812345000'
mountpoint2 = "/dev/sdx"
attachment2 = self.volume.attach_volume(self.context, volume_id,
instance2_uuid, None,
mountpoint2, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertEqual('attached', attachment2['attach_status'])
self.assertEqual(mountpoint2, attachment2['mountpoint'])
self.assertEqual(instance2_uuid, attachment2['instance_uuid'])
self.assertIsNone(attachment2['attached_host'])
self.assertNotEqual(attachment, attachment2)
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id,
volume=volume)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('in-use', vol['status'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id,
volume=volume)
self.volume.detach_volume(self.context, volume_id, attachment2['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('available', vol['status'])
self.volume.delete_volume(self.context, volume_id, volume=volume)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_twice_multiattach_volume_for_instances(self):
"""Make sure volume can be attached to multiple instances."""
mountpoint = "/dev/sdf"
# attach volume to the instance then to detach
instance_uuid = '12345678-1234-5678-1234-567812345699'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
multiattach=True,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id, volume=volume)
attachment = self.volume.attach_volume(self.context, volume_id,
instance_uuid, None,
mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
mountpoint2 = "/dev/sdx"
attachment2 = self.volume.attach_volume(self.context, volume_id,
instance_uuid, None,
mountpoint2, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertIsNone(attachment2)
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id,
volume=volume)
def test_attach_detach_not_multiattach_volume_for_instances(self):
"""Make sure volume can't be attached to more than one instance."""
mountpoint = "/dev/sdf"
# attach volume to the instance then to detach
instance_uuid = '12345678-1234-5678-1234-567812345678'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
multiattach=False,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id, volume=volume)
attachment = self.volume.attach_volume(self.context, volume_id,
instance_uuid, None,
mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertFalse(vol['multiattach'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
instance2_uuid = '12345678-1234-5678-1234-567812345000'
mountpoint2 = "/dev/sdx"
self.assertRaises(exception.InvalidVolume,
self.volume.attach_volume,
self.context,
volume_id,
instance2_uuid,
None,
mountpoint2, 'ro')
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id,
volume=volume)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('available', vol['status'])
self.volume.delete_volume(self.context, volume_id, volume=volume)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_detach_volume_for_host(self):
"""Make sure volume can be attached and detached from host."""
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(
self.context,
admin_metadata={'readonly': 'False'},
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id, volume=volume)
attachment = self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint, 'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual('fake-host', attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='False', attached_mode='rw')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual('rw', conn_info['data']['access_mode'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id,
volume=volume)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual("available", vol['status'])
self.volume.delete_volume(self.context, volume_id, volume=volume)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_detach_multiattach_volume_for_hosts(self):
"""Make sure volume can be attached and detached from hosts."""
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(
self.context,
admin_metadata={'readonly': 'False'},
multiattach=True,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id, volume=volume)
attachment = self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint, 'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual('fake-host', attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='False', attached_mode='rw')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual('rw', conn_info['data']['access_mode'])
mountpoint2 = "/dev/sdx"
attachment2 = self.volume.attach_volume(self.context, volume_id, None,
'fake_host2', mountpoint2,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertEqual('attached', attachment2['attach_status'])
self.assertEqual(mountpoint2, attachment2['mountpoint'])
self.assertIsNone(attachment2['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual('fake-host2', attachment2['attached_host'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id,
volume=volume)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual("in-use", vol['status'])
self.volume.detach_volume(self.context, volume_id, attachment2['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual("available", vol['status'])
self.volume.delete_volume(self.context, volume_id, volume=volume)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_twice_multiattach_volume_for_hosts(self):
"""Make sure volume can be attached and detached from hosts."""
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(
self.context,
admin_metadata={'readonly': 'False'},
multiattach=True,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id, volume=volume)
attachment = self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint, 'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual('fake-host', attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='False', attached_mode='rw')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual('rw', conn_info['data']['access_mode'])
mountpoint2 = "/dev/sdx"
attachment2 = self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint2,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertIsNone(attachment2)
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id, volume=volume)
def test_run_attach_detach_not_multiattach_volume_for_hosts(self):
"""Make sure volume can't be attached to more than one host."""
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(
self.context,
admin_metadata={'readonly': 'False'},
multiattach=False,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id, volume=volume)
attachment = self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint, 'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertFalse(vol['multiattach'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual('fake-host', attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='False', attached_mode='rw')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictMatch(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual('rw', conn_info['data']['access_mode'])
mountpoint2 = "/dev/sdx"
self.assertRaises(exception.InvalidVolume,
self.volume.attach_volume,
self.context,
volume_id,
None,
'fake_host2',
mountpoint2,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual('fake-host', attachment['attached_host'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id,
volume=volume)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('available', vol['status'])
self.volume.delete_volume(self.context, volume_id, volume=volume)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_detach_volume_with_attach_mode(self):
instance_uuid = '12345678-1234-5678-1234-567812345678'
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
**self.volume_params)
volume_id = volume['id']
db.volume_update(self.context, volume_id, {'status': 'available', })
self.volume.attach_volume(self.context, volume_id, instance_uuid,
None, mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
attachment = vol['volume_attachment'][0]
self.assertEqual('in-use', vol['status'])
self.assertEqual('attached', vol['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item | |
ins_code, atom_name, alt_conf_internal=""):
res_info = residue_info(imol, chain_id, resno, "")
if (not res_info):
return False
else:
ret = get_atom_from_residue(atom_name, res_info, alt_conf_internal)
return ret
#
def residue_info_dialog_displayed_qm():
if (residue_info_dialog_is_displayed == 1):
return True
else:
return False
# multi_read pdb reads all the files matching
# @code{@emph{glob_pattern}} in
# directory @code{@emph{dir}}. Typical usage of this might be:
# @code{multi_read_pdb("a*.pdb",".")}
# BL says: in windows dir needs the 'C:/' pattern, '/c/'won't work
#
def multi_read_pdb(glob_pattern, dir):
import glob, os
patt = os.path.normpath(dir+'/*.'+glob_pattern)
all_files = glob.glob(patt)
for file in all_files:
print "BL INFO:: reading ", file
read_pdb(file)
# read_pdb_all reads all the "*.pdb" files in the current directory.
#
def read_pdb_all():
import glob, os
recentre_status = recentre_on_read_pdb()
set_recentre_on_read_pdb(0)
patt = os.path.normpath(os.path.abspath(".")+'/*.pdb')
all_files = glob.glob(patt)
for file in all_files:
print "BL INFO:: reading ", file
read_pdb(file)
set_recentre_on_read_pdb(recentre_status)
# return False if dir_name is a file or we can't do the mkdir
def coot_mkdir(dir_name):
import os
if (os.path.isfile(dir_name)):
return False
else:
if (os.path.isdir(dir_name)):
return True
else:
os.mkdir(dir_name)
return True
# return the view matrix (useful for molscript, perhaps).
# BL says: like all matrices is a python list [...]
#
def view_matrix():
return [get_view_matrix_element(row_number,column_number) for row_number in range(3) for column_number in range(3)]
# return the transposed view matrix (useful for molscript, perhaps).
# BL says: like all matrices is a python list [...]
#
def view_matrix_transp():
return [get_view_matrix_element(column_number,row_number) for row_number in range(3) for column_number in range(3)]
# return the view quaternion
#
def view_quaternion():
ret = map(get_view_quaternion_internal,[0,1,2,3])
return ret
# Return the view number
#
def add_view(position, quaternion, zoom, view_name):
args = position + quaternion
args.append(zoom)
args.append(view_name)
ret = add_view_raw(*args)
return ret
# Convert a view matrix to a view quaternion to set Coot view internals.
#
def matrix2quaternion(m00, m10, m20, m01, m11, m21, m02, m12, m22):
import math
# From an idea by "Christian" at euclidianspace.com. The
# rotation matrix is special orthogonal, so (1 + trace) > 0. So
# we can simply do a sqrt on the 4 trace variants. Most of the
# code here is simply recovering the sign.
# return x with the sign of y
def convert_sign(x, y):
if (x > 0 and y > 0):
return x
elif (x < 0 < y):
return -x
elif (x > 0 > y):
return -x
else:
return x
pw = 1 + m00 + m11 + m22
px = 1 + m00 - m11 - m22
py = 1 - m00 + m11 - m22
pz = 1 - m00 - m11 + m22
pr = []
for v in [pw, px, py, pz]:
if v < 0:
v1 = 0
else:
v1 = v
pr.append(math.sqrt(v1) / 2)
ls = map(convert_sign, pr[1:], [m21 - m12, m02 - m20, m10 - m01])
ls.append(pr[0])
return ls
# e.g
# matrix2quaternion(0.0347695872187614, 0.773433089256287, 0.632923781871796,
# 0.774806916713715, 0.379149734973907, -0.505885183811188,
# -0.631241261959076, 0.507983148097992, -0.586078405380249)
# ->
# [-0.55715757608, -0.694704711, -7.549694273e-4, 0.45492890477] or similar
# Set the view matrix using matrix->quaternion.
#
# Useful for using a view matrix from another program, perhaps.
#
def set_view_matrix(m00, m10, m20, m01, m11, m21, m02, m12, m22):
set_view_quaternion(matrix2quaternion(m00, m10, m20,
m01, m11, m21,
m02, m12, m22))
# Miguel's molecular orientation axes
#
def miguels_axes():
set_axis_orientation_matrix(*view_matrix())
set_axis_orientation_matrix_usage(1)
# Return the molecule centre as a list of 3 numbers.
#
# Note: mol_cen could contain values less than -9999.
#
def molecule_centre(imol):
return [coot.molecule_centre_internal(imol,0),
coot.molecule_centre_internal(imol,1),
coot.molecule_centre_internal(imol,2)]
# Move the centre of molecule number imol to the current screen centre
#
def move_molecule_to_screen_centre(imol):
if valid_model_molecule_qm(imol):
rotate_centre = rotation_centre()
coot.translate_molecule_by(imol,(rotate_centre[0]-molecule_centre(imol)[0]),
(rotate_centre[1]-molecule_centre(imol)[1]),
(rotate_centre[2]-molecule_centre(imol)[2]))
# This is a short name for the above.
# deftexi move_molecule_here
move_molecule_here = move_molecule_to_screen_centre
# Return a nine-membered list of numbers.
#
def identity_matrix():
return [1,0,0,0,1,0,0,0,1]
# e.g. translation('x',2)
# -> [2, 0, 0]
# Return: False on error
#
def translation(axis,length):
import operator
# BL says: we dont check if axis is string, yet at least not directly
if (operator.isNumberType(length)):
if (axis=="x"):
return [length,0,0]
elif (axis=="y"):
return [0,length,0]
elif (axis=="z"):
return [0,0,length]
else:
print "symbol axis: ", axis, " incomprehensible"
return False
else:
print "incomprehensible length argument: ",length
return False
# Rotate degrees about screen axis, where axis is either 'x', 'y' or 'z'.
#
def rotate_about_screen_axis(axis,degrees):
import math, operator
def deg_to_rad(degs):
return (degs * 3.1415926 /180.0)
def simple_rotation_x(alpha):
cos_alpha = math.cos(alpha)
sin_alpha = math.sin(alpha)
return [1,0,0,0,cos_alpha,-sin_alpha,0,sin_alpha,cos_alpha]
def simple_rotation_y(alpha):
cos_alpha = math.cos(alpha)
sin_alpha = math.sin(alpha)
return [cos_alpha,0,sin_alpha,0,1,0,-sin_alpha,0,cos_alpha]
def simple_rotation_z(alpha):
cos_alpha = math.cos(alpha)
sin_alpha = math.sin(alpha)
return [cos_alpha,-sin_alpha,0,sin_alpha,cos_alpha,0,0,0,]
# BL says: I dont know what the next 2 defines are for...
# looks not used and/or useless to me
# seems that only 2nd matrix is used and not view_matrix!
def vm():view_matrix()
def mult(mat1,mat2):mat2
# end of uselessness...
if (operator.isNumberType(degrees)):
if (axis=="x"):
mult(view_matrix(),simple_rotation_x(deg_to_rad(degrees)))
elif (axis=="y"):
mult(view_matrix(),simple_rotation_y(deg_to_rad(degrees)))
elif (axis=="z"):
mult(view_matrix(),simple_rotation_z(deg_to_rad(degrees)))
else:
print "symbol axis: ", axis, " incomprehensible"
else:
print "incomprehensible length argument: ", degrees
# Support for old toggle functions. (consider instead the raw
# functions use the direct set_displayed functions).
#
def toggle_display_map(imol, idummy):
if (map_is_displayed(imol) == 0):
set_map_displayed(imol, 1)
else:
set_map_displayed(imol, 0)
# toggle the display of imol
#
def toggle_display_mol(imol):
if (mol_is_displayed(imol) == 0):
set_mol_displayed(imol, 1)
else:
set_mol_displayed(imol, 0)
# toggle the active state (clickability) of imol
#
def toggle_active_mol(imol):
if (mol_is_active(imol) == 0):
set_mol_active(imol, 1)
else:
set_mol_active(imol, 0)
# return a python (list) representation of molecule imol, or False if we can't
# do it (imol is a map, say)
#
def python_representation(imol):
if (not valid_model_molecule_qm(imol)):
return False
else:
ls = []
def r_info(imol, chain_id, n):
res_name = resname_from_serial_number(imol, chain_id, n)
res_no = seqnum_from_serial_number(imol, chain_id, n)
ins_code = insertion_code_from_serial_number(imol, chain_id, n)
return [res_no, ins_code, res_name, residue_info(imol, chain_id, res_no, ins_code)]
ls = [map(lambda chain_id: [chain_id, map(lambda serial_number: r_info(imol, chain_id, serial_number), range(chain_n_residues(chain_id, imol)))], chain_ids(imol))]
return ls
# reorder chains
#
def reorder_chains(imol):
# reorder elements of chain_list: e.g.
#
# chain_list: [["C", [xx]], ["A", [xx]], ["B", [xx]]]
#
def reorder_chains_in_model(chain_list):
map(lambda model: model.sort(), chain_list)
p_rep = python_representation(imol)
if (type(p_rep) is ListType):
reorder_chains_in_model(p_rep)
clear_and_update_molecule(imol, p_rep)
# transform a coordinates molecule by a coot-rtop (which is a Python
# expression of a clipper::RTop), i.e. a list of a 9-element list and
# a 3 element list, e.g. [[1, 0, 0, 0, 1, 0, 0, 0, 1], [4.5, 0.4, 1.2]]
#
def transform_coords_molecule(imol, rtop):
ls = []
for i in rtop:
for j in i:
ls.append(j)
transform_molecule_by(imol, *ls)
# @code{transform_map(imol, mat, trans, about_pt, radius, space_group, cell)}
#
# where space_group is a HM-symbol and cell is a list of 6
# parameters, where the cell angles are in degrees.
#
# or @code{transform_map(imol, trans, about_pt, radius)} for a simple translation
#
# or @code{transform_map(imol, trans, radius)} when using the default
# rotation-centre as the about-pt
#
# returns new map mol number or None if no map could be transformed/created
#
def transform_map(*args):
ret = None
def tf(imol, mat, trans, about_pt, radius, space_group, cell):
return transform_map_raw(imol,
mat[0], mat[1], mat[2],
mat[3], mat[4], mat[5],
mat[6], mat[7], mat[8],
trans[0], trans[1], trans[2],
about_pt[0], about_pt[1], about_pt[2],
radius,
space_group,
cell[0], cell[1], cell[2],
cell[3], cell[4], cell[5])
# main line
if (len(args)==7):
ret = tf(args[0], args[1], args[2], args[3], args[4], args[5], args[6])
# imol_map mat trans about_pt radius:
elif (len(args)==5):
imol = args[0]
ret = tf(imol, args[1], args[2], args[3], args[4],
space_group(imol), cell(imol))
# no matrix specified:
elif (len(args)==4):
imol = args[0]
ret = tf(imol, identity_matrix(), args[1], args[2], args[3],
space_group(imol), cell(imol))
# no matrix or about point specified:
elif (len(args)==3):
imol = args[0]
ret = tf(args[0], identity_matrix(), args[1], rotation_centre(),
args[2], space_group(imol), cell(imol))
else:
print "arguments to transform-map incomprehensible: args: ",args
return ret
# return then NCS master of the first molecule that has ncs.
#
# return "" on fail to find an ncs chain
#
def get_first_ncs_master_chain():
r = ""
for mols in model_molecule_list():
ncs_masters = ncs_master_chains(mols)
if ncs_masters:
return ncs_masters[0]
return r
# Define a map transformation function that obeys Lapthorn's Law of
# NCS Handling Programs
#
# typical usage: transform_map_using_lsq_matrix(1, "A", 10, 30, 0, "A", 10, 30, 2, rotation_centre(), 6)
#
# Remember, that now the about-pt is the "to" point, i.e. the maps are brought from
# somewhere else and generated about the about-pt.
#
def transform_map_using_lsq_matrix(imol_bref, ref_chain, ref_resno_start, ref_resno_end,
imol_mov, mov_chain, mov_resno_start, mov_resno_end,
imol_map, about_pt, radius):
clear_lsq_matches()
add_lsq_match(ref_resno_start, ref_resno_end, ref_chain,
mov_resno_start, mov_resno_end, mov_chain, 1)
space_group = symmetry_operators_to_xHM(symmetry_operators(imol_ref))
cell_params = cell(imol_ref)
if not (space_group and cell):
message = "Bad cell or symmetry for molecule" + str(cell) + str(space_group) + str(imol_ref)
print "Bad cell or symmetry for molecule", message
ret = -1 # invalid mol! or return message!?
else:
rtop = apply_lsq_matches(imol_ref, imol_mov)
ret = transform_map(imol_map, rtop[0], rtop[1], about_pt, radius,
| |
be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `_dliq_default` is used. A string specifier
of the method (e.g. 'crit') can also be passed. See _LIQMETHODS
in flu3a for valid specifiers.
:type dliq0: float or str or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Contraction coefficient in kg/kg.
:raises RuntimeWarning: If a string is passed for `dliq0` that does
not match an available method. The default is used instead.
:raises UserWarning: If a string is passed for `dliq0` that
specifies a function intended for water vapour.
:raises RuntimeWarning: If the value of dliq is more consistent with
water vapour in the subcritical region.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> contraction_t(0.035,300.,1e5)
0.732910044599
"""
dliq = _eq_tp_liq(temp,pres,dliq=dliq,chkvals=chkvals,chktol=chktol,
dliq0=dliq0,mathargs=mathargs)
_chkflubnds(temp,dliq,chkbnd=chkbnd)
_chksalbnds(salt,temp,pres,chkbnd=chkbnd)
g_p = sea_g(0,0,1,salt,temp,pres,dliq=dliq,useext=useext)
g_sp = sea_g(1,0,1,salt,temp,pres,dliq=dliq,useext=useext)
beta = -g_sp / g_p
return beta
def cp(salt,temp,pres,dliq=None,chkvals=False,chktol=_CHKTOL,dliq0=None,
chkbnd=False,useext=False,mathargs=None):
"""Calculate seawater isobaric heat capacity.
Calculate the isobaric (constant pressure) heat capacity of seawater
at salinity, temperature, and pressure.
:arg float salt: Salinity in kg/kg.
:arg float temp: Temperature in K.
:arg float pres: Pressure in Pa.
:arg dliq: Liquid water density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `_dliq_default` is used. A string specifier
of the method (e.g. 'crit') can also be passed. See _LIQMETHODS
in flu3a for valid specifiers.
:type dliq0: float or str or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Heat capacity in J/kg/K.
:raises RuntimeWarning: If a string is passed for `dliq0` that does
not match an available method. The default is used instead.
:raises UserWarning: If a string is passed for `dliq0` that
specifies a function intended for water vapour.
:raises RuntimeWarning: If the value of dliq is more consistent with
water vapour in the subcritical region.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> cp(0.035,300.,1e5)
4000.74973964
"""
dliq = _eq_tp_liq(temp,pres,dliq=dliq,chkvals=chkvals,chktol=chktol,
dliq0=dliq0,mathargs=mathargs)
_chkflubnds(temp,dliq,chkbnd=chkbnd)
_chksalbnds(salt,temp,pres,chkbnd=chkbnd)
g_tt = sea_g(0,2,0,salt,temp,pres,dliq=dliq,useext=useext)
cp = -temp * g_tt
return cp
def density(salt,temp,pres,dliq=None,chkvals=False,chktol=_CHKTOL,
dliq0=None,chkbnd=False,useext=False,mathargs=None):
"""Calculate seawater density.
Calculate the density of seawater at salinity, temperature, and
pressure.
:arg float salt: Salinity in kg/kg.
:arg float temp: Temperature in K.
:arg float pres: Pressure in Pa.
:arg dliq: Liquid water density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `_dliq_default` is used. A string specifier
of the method (e.g. 'crit') can also be passed. See _LIQMETHODS
in flu3a for valid specifiers.
:type dliq0: float or str or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Density in kg/m3.
:raises RuntimeWarning: If a string is passed for `dliq0` that does
not match an available method. The default is used instead.
:raises UserWarning: If a string is passed for `dliq0` that
specifies a function intended for water vapour.
:raises RuntimeWarning: If the value of dliq is more consistent with
water vapour in the subcritical region.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> density(0.035,300.,1e5)
1022.64272613
"""
dliq = _eq_tp_liq(temp,pres,dliq=dliq,chkvals=chkvals,chktol=chktol,
dliq0=dliq0,mathargs=mathargs)
_chkflubnds(temp,dliq,chkbnd=chkbnd)
_chksalbnds(salt,temp,pres,chkbnd=chkbnd)
g_p = sea_g(0,0,1,salt,temp,pres,dliq=dliq,useext=useext)
dsea = g_p**(-1)
return dsea
def enthalpy(salt,temp,pres,dliq=None,chkvals=False,chktol=_CHKTOL,
dliq0=None,chkbnd=False,useext=False,mathargs=None):
"""Calculate seawater enthalpy.
Calculate the specific enthalpy of seawater at salinity,
temperature, and pressure.
:arg float salt: Salinity in kg/kg.
:arg float temp: Temperature in K.
:arg float pres: Pressure in Pa.
:arg dliq: Liquid water density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `_dliq_default` is used. A string specifier
of the method (e.g. 'crit') can also be passed. See _LIQMETHODS
in flu3a for valid specifiers.
:type dliq0: float or str or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Enthalpy in J/kg.
:raises RuntimeWarning: If a string is passed for `dliq0` that does
not match an available method. The default is used instead.
:raises UserWarning: If a string is passed for `dliq0` that
specifies a function intended for water vapour.
:raises RuntimeWarning: If the value of dliq is more consistent with
water vapour in the subcritical region.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> enthalpy(0.035,300.,1e5)
107220.675963
"""
dliq = _eq_tp_liq(temp,pres,dliq=dliq,chkvals=chkvals,chktol=chktol,
dliq0=dliq0,mathargs=mathargs)
_chkflubnds(temp,dliq,chkbnd=chkbnd)
_chksalbnds(salt,temp,pres,chkbnd=chkbnd)
g = sea_g(0,0,0,salt,temp,pres,dliq=dliq,useext=useext)
g_t = sea_g(0,1,0,salt,temp,pres,dliq=dliq,useext=useext)
h = g - temp*g_t
return h
def entropy(salt,temp,pres,dliq=None,chkvals=False,chktol=_CHKTOL,
dliq0=None,chkbnd=False,useext=False,mathargs=None):
"""Calculate seawater entropy.
Calculate the specific entropy of seawater at salinity, temperature,
and pressure.
:arg float salt: Salinity in kg/kg.
:arg float temp: Temperature in K.
:arg float pres: Pressure in Pa.
:arg dliq: Liquid water density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and | |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Xception V1 model for Keras.
On ImageNet, this model gets to a top-1 validation accuracy of 0.790
and a top-5 validation accuracy of 0.945.
Do note that the input image format for this model is different than for
the VGG16 and ResNet models (299x299 instead of 224x224),
and that the input preprocessing function
is also different (same as Inception V3).
Also do note that this model is only available for the TensorFlow backend,
due to its reliance on `SeparableConvolution` layers.
# Reference
- [Xception: Deep Learning with Depthwise Separable
Convolutions](https://arxiv.org/abs/1610.02357)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras import layers
from keras.preprocessing import image
from tensorflow.python.keras._impl.keras.applications import imagenet_utils
from tensorflow.python.keras._impl.keras.applications.imagenet_utils import _obtain_input_shape
from tensorflow.python.keras._impl.keras.applications.imagenet_utils import decode_predictions # pylint: disable=unused-import
from tensorflow.python.keras._impl.keras.engine.topology import get_source_inputs
from tensorflow.python.keras._impl.keras.layers import Activation
from tensorflow.python.keras._impl.keras.layers import BatchNormalization
from tensorflow.python.keras._impl.keras.layers import Conv2D
from tensorflow.python.keras._impl.keras.layers import Dense
from tensorflow.python.keras._impl.keras.layers import GlobalAveragePooling2D
from tensorflow.python.keras._impl.keras.layers import GlobalMaxPooling2D
from tensorflow.python.keras._impl.keras.layers import Input
from tensorflow.python.keras._impl.keras.layers import MaxPooling2D
from tensorflow.python.keras._impl.keras.layers import SeparableConv2D
from tensorflow.python.keras._impl.keras.models import Model
from tensorflow.python.keras._impl.keras.utils.data_utils import get_file
from tensorflow.python.platform import tf_logging as logging
TF_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.4/xception_weights_tf_dim_ordering_tf_kernels.h5'
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.4/xception_weights_tf_dim_ordering_tf_kernels_notop.h5'
def Xception(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the Xception architecture.
Optionally loads weights pre-trained
on ImageNet. This model is available for TensorFlow only,
and can only be used with inputs following the TensorFlow
data format `(width, height, channels)`.
You should set `image_data_format="channels_last"` in your Keras config
located at ~/.keras/keras.json.
Note that the default input image size for this model is 299x299.
Arguments:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(299, 299, 3)`.
It should have exactly 3 input channels,
and width and height should be no smaller than 71.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
Returns:
A Keras model instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
"""
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
if K.image_data_format() != 'channels_last':
logging.warning(
'The Xception model is only available for the '
'input data format "channels_last" '
'(width, height, channels). '
'However your settings specify the default '
'data format "channels_first" (channels, width, height). '
'You should set `image_data_format="channels_last"` in your Keras '
'config located at ~/.keras/keras.json. '
'The model being returned right now will expect inputs '
'to follow the "channels_last" data format.')
K.set_image_data_format('channels_last')
old_data_format = 'channels_first'
else:
old_data_format = None
# Determine proper input shape
input_shape = _obtain_input_shape(
input_shape,
default_size=299,
min_size=71,
data_format=K.image_data_format(),
require_flatten=False,
weights=weights)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
img_input = Input(tensor=input_tensor, shape=input_shape)
x = Conv2D(
32, (3, 3), strides=(2, 2), use_bias=False,
name='block1_conv1')(img_input)
x = BatchNormalization(name='block1_conv1_bn')(x)
x = Activation('relu', name='block1_conv1_act')(x)
x = Conv2D(64, (3, 3), use_bias=False, name='block1_conv2')(x)
x = BatchNormalization(name='block1_conv2_bn')(x)
x = Activation('relu', name='block1_conv2_act')(x)
residual = Conv2D(
128, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = SeparableConv2D(
128, (3, 3), padding='same', use_bias=False, name='block2_sepconv1')(x)
x = BatchNormalization(name='block2_sepconv1_bn')(x)
x = Activation('relu', name='block2_sepconv2_act')(x)
x = SeparableConv2D(
128, (3, 3), padding='same', use_bias=False, name='block2_sepconv2')(x)
x = BatchNormalization(name='block2_sepconv2_bn')(x)
x = MaxPooling2D(
(3, 3), strides=(2, 2), padding='same', name='block2_pool')(x)
x = layers.add([x, residual])
residual = Conv2D(
256, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu', name='block3_sepconv1_act')(x)
x = SeparableConv2D(
256, (3, 3), padding='same', use_bias=False, name='block3_sepconv1')(x)
x = BatchNormalization(name='block3_sepconv1_bn')(x)
x = Activation('relu', name='block3_sepconv2_act')(x)
x = SeparableConv2D(
256, (3, 3), padding='same', use_bias=False, name='block3_sepconv2')(x)
x = BatchNormalization(name='block3_sepconv2_bn')(x)
x = MaxPooling2D(
(3, 3), strides=(2, 2), padding='same', name='block3_pool')(x)
x = layers.add([x, residual])
residual = Conv2D(
728, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu', name='block4_sepconv1_act')(x)
x = SeparableConv2D(
728, (3, 3), padding='same', use_bias=False, name='block4_sepconv1')(x)
x = BatchNormalization(name='block4_sepconv1_bn')(x)
x = Activation('relu', name='block4_sepconv2_act')(x)
x = SeparableConv2D(
728, (3, 3), padding='same', use_bias=False, name='block4_sepconv2')(x)
x = BatchNormalization(name='block4_sepconv2_bn')(x)
x = MaxPooling2D(
(3, 3), strides=(2, 2), padding='same', name='block4_pool')(x)
x = layers.add([x, residual])
for i in range(8):
residual = x
prefix = 'block' + str(i + 5)
x = Activation('relu', name=prefix + '_sepconv1_act')(x)
x = SeparableConv2D(
728, (3, 3), padding='same', use_bias=False,
name=prefix + '_sepconv1')(x)
x = BatchNormalization(name=prefix + '_sepconv1_bn')(x)
x = Activation('relu', name=prefix + '_sepconv2_act')(x)
x = SeparableConv2D(
728, (3, 3), padding='same', use_bias=False,
name=prefix + '_sepconv2')(x)
x = BatchNormalization(name=prefix + '_sepconv2_bn')(x)
x = Activation('relu', name=prefix + '_sepconv3_act')(x)
x = SeparableConv2D(
728, (3, 3), padding='same', use_bias=False,
name=prefix + '_sepconv3')(x)
x = BatchNormalization(name=prefix + '_sepconv3_bn')(x)
x = layers.add([x, residual])
residual = Conv2D(
1024, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu', name='block13_sepconv1_act')(x)
x = SeparableConv2D(
728, (3, 3), padding='same', use_bias=False, name='block13_sepconv1')(x)
x = BatchNormalization(name='block13_sepconv1_bn')(x)
x = Activation('relu', name='block13_sepconv2_act')(x)
x = SeparableConv2D(
1024, (3, 3), padding='same', use_bias=False, name='block13_sepconv2')(x)
x = BatchNormalization(name='block13_sepconv2_bn')(x)
x = MaxPooling2D(
(3, 3), strides=(2, 2), padding='same', name='block13_pool')(x)
x = layers.add([x, residual])
x = SeparableConv2D(
1536, (3, 3), padding='same', use_bias=False, name='block14_sepconv1')(x)
x = BatchNormalization(name='block14_sepconv1_bn')(x)
x = Activation('relu', name='block14_sepconv1_act')(x)
x = SeparableConv2D(
2048, (3, 3), padding='same', use_bias=False, name='block14_sepconv2')(x)
x = BatchNormalization(name='block14_sepconv2_bn')(x)
x = Activation('relu', name='block14_sepconv2_act')(x)
if include_top:
x = GlobalAveragePooling2D(name='avg_pool')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='xception')
# load weights
if weights == 'imagenet':
if include_top:
weights_path = get_file(
'xception_weights_tf_dim_ordering_tf_kernels.h5',
TF_WEIGHTS_PATH,
cache_subdir='models',
file_hash='0a58e3b7378bc2990ea3b43d5981f1f6')
else:
weights_path = get_file(
'xception_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
file_hash='b0042744bf5b25fce3cb969f33bebb97')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
if old_data_format:
K.set_image_data_format(old_data_format)
elif weights is not None:
model.load_weights(weights)
return model
def preprocess_input(x):
"""Preprocesses a numpy array encoding a batch of images.
Arguments:
x: a 4D numpy array consists of RGB values within [0, 255].
Returns:
Preprocessed array.
"""
return imagenet_utils.preprocess_input(x, mode='tf')
# if __name__ == '__main__':
# # image size = 299 * 299
# model = Xception(include_top=True, weights='./imagenet_xception.h5')
# img_path = 'images/000010.jpg'
# img = image.load_img(img_path, target_size=(299, 299))
# x = image.img_to_array(img)
# x = np.expand_dims(x, axis=0)
# x = preprocess_input(x)
# print('Input image shape:', x.shape)
# preds = model.predict(x)
# print(np.argmax(preds))
# print('Predicted:', decode_predictions(preds, 1))
if __name__ == '__main__':
# image size = 299 * 299
model = Xception(include_top=True, weights='./imagenet_xception.h5')
preds = model.predict(np.ones((1,299,299,3)) * 0.5)
print(preds)
print(np.argmax(preds))
#print('Predicted:', decode_predictions(preds, 1))
# img_path = 'elephant.jpg'
# img = image.load_img(img_path, | |
import asyncio
import logging
import aiopg
import psycopg2
from psycopg2.extras import RealDictCursor
LATEST_BLOCK_NUM = """
SELECT max(block_num) FROM blocks
"""
LOGGER = logging.getLogger(__name__)
class Database(object):
"""Manages connection to the postgres database and makes async queries
"""
def __init__(self, host, port, name, user, password, loop):
self._dsn = 'dbname={} user={} password={} host={} port={}'.format(
name, user, password, host, port)
self._loop = loop
self._conn = None
async def connect(self, retries=5, initial_delay=1, backoff=2):
"""Initializes a connection to the database
Args:
retries (int): Number of times to retry the connection
initial_delay (int): Number of seconds wait between reconnects
backoff (int): Multiplies the delay after each retry
"""
LOGGER.info('Connecting to database')
delay = initial_delay
for attempt in range(retries):
try:
self._conn = await aiopg.connect(
dsn=self._dsn, loop=self._loop, echo=True)
LOGGER.info('Successfully connected to database')
return
except psycopg2.OperationalError:
LOGGER.debug(
'Connection failed.'
' Retrying connection (%s retries remaining)',
retries - attempt)
await asyncio.sleep(delay)
delay *= backoff
self._conn = await aiopg.connect(
dsn=self._dsn, loop=self._loop, echo=True)
LOGGER.info('Successfully connected to database')
def disconnect(self):
"""Closes connection to the database
"""
self._conn.close()
async def fetch_current_elections_resources(self, voter_id, timestamp):
fetch_elections = """
SELECT e.*,v.name AS "admin_name",(SELECT vote_id FROM votes WHERE voter_id='{0}'
AND election_id=e.election_id LIMIT 1)
IS NOT NULL AS "voted"
FROM elections e JOIN voters v ON e.admin_id = v.voter_id
AND election_id IN (SELECT election_id FROM poll_registrations WHERE voter_id='{0}' AND status='1'
AND ({2}) >= start_block_num AND ({2}) < end_block_num)
AND start_timestamp <= {1}
AND end_timestamp >= {1}
AND e.status = '1'
AND ({2}) >= e.start_block_num
AND ({2}) < e.end_block_num
ORDER BY start_timestamp DESC;
""".format(voter_id, timestamp, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch_elections)
return await cursor.fetchall()
async def fetch_past_elections_resources(self, voter_id, timestamp):
fetch_elections = """
SELECT e.*,v.name AS "admin_name",(SELECT vote_id FROM votes WHERE voter_id='{0}'
AND election_id=e.election_id LIMIT 1)
IS NOT NULL AS "voted"
FROM elections e JOIN voters v ON e.admin_id = v.voter_id
AND election_id IN (SELECT election_id FROM poll_registrations WHERE voter_id='{0}' AND status='1'
AND ({2}) >= start_block_num AND ({2}) < end_block_num)
AND end_timestamp < {1}
AND e.status = '1'
AND ({2}) >= e.start_block_num
AND ({2}) < e.end_block_num
ORDER BY start_timestamp DESC;
""".format(voter_id, timestamp, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch_elections)
return await cursor.fetchall()
async def fetch_public_elections_resources(self, timestamp):
fetch_elections = """
SELECT *
FROM elections
WHERE start_timestamp <= {0}
AND end_timestamp >= {0}
AND status = '1'
AND results_permission = 'PUBLIC'
AND ({1}) >= start_block_num
AND ({1}) < end_block_num
ORDER BY start_timestamp DESC;
""".format(timestamp, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch_elections)
return await cursor.fetchall()
async def fetch_public_past_elections_resources(self, voter_id, timestamp):
fetch_elections = """
SELECT e.*,v.name AS "admin_name",(SELECT vote_id FROM votes WHERE voter_id='{0}'
AND election_id=e.election_id LIMIT 1)
IS NOT NULL AS "voted"
FROM elections e JOIN voters v ON e.admin_id = v.voter_id
WHERE e.results_permission = 'PUBLIC'
AND e.status = '1'
AND e.end_timestamp < {1}
AND ({2}) >= e.start_block_num
AND ({2}) < e.end_block_num
ORDER BY start_timestamp DESC;
""".format(voter_id, timestamp, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch_elections)
return await cursor.fetchall()
async def fetch_admin_elections_resources(self, admin_id):
fetch_elections = """
SELECT *
FROM elections
WHERE admin_id = '{0}'
AND ({1}) >= start_block_num
AND ({1}) < end_block_num
ORDER BY start_timestamp DESC;
""".format(admin_id, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch_elections)
return await cursor.fetchall()
async def fetch_admins_resources(self):
fetch = """
SELECT voter_id, name, type
FROM voters
WHERE ({0}) >= start_block_num
AND ({0}) < end_block_num
AND type = 'ADMIN' OR type = 'SUPERADMIN'
ORDER BY type DESC;
""".format(LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchall()
async def fetch_voters_resources(self, voter_id=None):
fetch = """
SELECT voter_id
FROM voters
WHERE type = 'VOTER'
AND voter_id LIKE '%{0}%'
AND ({1}) >= start_block_num
AND ({1}) < end_block_num
ORDER BY type DESC;
""".format(voter_id, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchall()
async def insert_voting_option_num_vote_resource(self,
voting_option_id,
name,
election_id):
num_votes = 0
insert = """
INSERT INTO count_votes (
voting_option_id,
name,
election_id,
num_votes)
VALUES ('{}', '{}', '{}', '{}')
""".format(
voting_option_id,
name,
election_id,
num_votes)
async with self._conn.cursor() as cursor:
await cursor.execute(insert)
self._conn.commit()
async def update_voting_option_num_vote_resource(self,
voting_option_id,
num_votes):
update = """
UPDATE count_votes
SET num_votes = '{1}'
WHERE voting_option_id = '{0}'
""".format(
voting_option_id,
num_votes)
async with self._conn.cursor() as cursor:
await cursor.execute(update)
self._conn.commit()
async def fetch_auth_resource(self, public_key=None):
fetch = """
SELECT * FROM auth WHERE public_key='{}'
""".format(public_key)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchone()
async def fetch_voter_resource(self, voter_id=None, public_key=None):
fetch = """
SELECT * FROM voters WHERE """ + ("""voter_id""" if voter_id else """public_key""") + """='{0}'
AND ({1}) >= start_block_num
AND ({1}) < end_block_num;
""".format(voter_id if voter_id else public_key, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchone()
async def is_voter_created(self, voter_id):
fetch = """
SELECT voter_id
FROM voters
WHERE voter_id = '{0}';
""".format(voter_id)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchone()
async def is_superadmin_created(self):
fetch = """
SELECT voter_id
FROM voters
WHERE type='SUPERADMIN'
"""
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchone()
async def fetch_election_with_can_vote_resource(self, voter_id=None, election_id=None):
fetch = """
SELECT e.*, v.name AS "admin_name", (SELECT voter_id FROM poll_registrations WHERE voter_id='{0}'
AND election_id='{1}'
AND status='1' LIMIT 1)
IS NOT NULL AS "can_vote", (SELECT vote_id FROM votes WHERE voter_id='{0}'
AND election_id='{1}' LIMIT 1)
IS NOT NULL AS "voted"
FROM elections e JOIN voters v ON e.admin_id = v.voter_id
WHERE election_id='{1}'
AND e.status = '1'
AND ({2}) >= e.start_block_num
AND ({2}) < e.end_block_num;
""".format(voter_id, election_id, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchone()
async def fetch_election_resource(self, election_id=None):
fetch = """
SELECT e.*, v.name AS "admin_name"
FROM elections e JOIN voters v ON e.admin_id = v.voter_id
WHERE election_id='{0}'
AND ({1}) >= e.start_block_num
AND ({1}) < e.end_block_num;
""".format(election_id, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchone()
async def fetch_election_with_can_vote_resource_admin(self, voter_id=None, election_id=None):
fetch = """
SELECT e.*, v.name AS "admin_name", (SELECT voter_id FROM poll_registrations WHERE voter_id='{0}'
AND election_id='{1}'
AND status='1' LIMIT 1)
IS NOT NULL AS "can_vote", (SELECT vote_id FROM votes WHERE voter_id='{0}'
AND election_id='{1}' LIMIT 1)
IS NOT NULL AS "voted"
FROM elections e JOIN voters v ON e.admin_id = v.voter_id
WHERE election_id='{1}'
AND ({2}) >= e.start_block_num
AND ({2}) < e.end_block_num;
""".format(voter_id, election_id, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchone()
async def fetch_number_of_votes(self, election_id=None):
fetch = """
SELECT * FROM count_votes
WHERE election_id='{0}';
""".format(election_id)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchall()
async def fetch_poll_book(self, election_id=None):
fetch = """
SELECT * FROM poll_registrations
WHERE election_id='{0}'
AND status='1'
AND ({1}) >= start_block_num
AND ({1}) < end_block_num;
""".format(election_id, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchall()
async def fetch_poll_book_registration(self, election_id=None, voter_id=None):
fetch = """
SELECT * FROM poll_registrations
WHERE election_id='{0}'
AND voter_id='{1}'
AND status='1'
AND ({2}) >= start_block_num
AND ({2}) < end_block_num;
""".format(election_id, voter_id, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchone()
async def count_poll_book(self, election_id=None):
fetch = """
SELECT COUNT(*)
FROM poll_registrations
WHERE election_id='{0}'
AND status='1'
AND ({1}) >= start_block_num
AND ({1}) < end_block_num;
""".format(election_id, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchone()
async def fetch_voting_option_resource(self, voting_option_id=None):
fetch = """
SELECT * FROM voting_options
WHERE voting_option_id='{0}'
AND status='1'
AND ({1}) >= start_block_num
AND ({1}) < end_block_num;
""".format(voting_option_id, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchone()
async def fetch_voting_option_num_vote_resource(self, voting_option_id=None):
fetch = """
SELECT * FROM count_votes
WHERE voting_option_id='{0}';
""".format(voting_option_id)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchone()
async def fetch_election_voting_options_resource(self, election_id=None):
fetch = """
SELECT * FROM voting_options
WHERE election_id='{0}'
AND status='1'
AND ({1}) >= start_block_num
AND ({1}) < end_block_num;
""".format(election_id, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchall()
async def fetch_vote_resource(self, vote_id=None):
fetch = """
SELECT * FROM votes WHERE timestamp=(SELECT MAX(timestamp) FROM votes WHERE vote_id='{0}')
AND ({1}) >= start_block_num
AND ({1}) < end_block_num;
""".format(vote_id, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchone()
async def fetch_my_vote__election_resource(self, voter_id=None, election_id=None):
fetch = """
SELECT * FROM votes WHERE timestamp=(SELECT MAX(timestamp) FROM votes
WHERE voter_id='{0}' AND election_id='{1}')
AND ({2}) >= start_block_num
AND ({2}) < end_block_num;
""".format(voter_id, election_id, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as | |
@property
@pulumi.getter
def ip(self) -> str:
return pulumi.get(self, "ip")
@property
@pulumi.getter(name="ipRange")
def ip_range(self) -> str:
return pulumi.get(self, "ip_range")
@property
@pulumi.getter
def port(self) -> int:
return pulumi.get(self, "port")
@property
@pulumi.getter(name="portRange")
def port_range(self) -> str:
return pulumi.get(self, "port_range")
@property
@pulumi.getter
def protocol(self) -> str:
return pulumi.get(self, "protocol")
@pulumi.output_type
class GetInstanceServerPrivateNetworkResult(dict):
def __init__(__self__, *,
mac_address: str,
pn_id: str,
status: str,
zone: str):
"""
:param str zone: `zone`) The zone in which the server exists.
"""
pulumi.set(__self__, "mac_address", mac_address)
pulumi.set(__self__, "pn_id", pn_id)
pulumi.set(__self__, "status", status)
pulumi.set(__self__, "zone", zone)
@property
@pulumi.getter(name="macAddress")
def mac_address(self) -> str:
return pulumi.get(self, "mac_address")
@property
@pulumi.getter(name="pnId")
def pn_id(self) -> str:
return pulumi.get(self, "pn_id")
@property
@pulumi.getter
def status(self) -> str:
return pulumi.get(self, "status")
@property
@pulumi.getter
def zone(self) -> str:
"""
`zone`) The zone in which the server exists.
"""
return pulumi.get(self, "zone")
@pulumi.output_type
class GetInstanceServerRootVolumeResult(dict):
def __init__(__self__, *,
boot: bool,
delete_on_termination: bool,
size_in_gb: int,
volume_id: str):
"""
:param bool delete_on_termination: Forces deletion of the root volume on instance termination.
:param int size_in_gb: Size of the root volume in gigabytes.
:param str volume_id: The volume ID of the root volume of the server.
"""
pulumi.set(__self__, "boot", boot)
pulumi.set(__self__, "delete_on_termination", delete_on_termination)
pulumi.set(__self__, "size_in_gb", size_in_gb)
pulumi.set(__self__, "volume_id", volume_id)
@property
@pulumi.getter
def boot(self) -> bool:
return pulumi.get(self, "boot")
@property
@pulumi.getter(name="deleteOnTermination")
def delete_on_termination(self) -> bool:
"""
Forces deletion of the root volume on instance termination.
"""
return pulumi.get(self, "delete_on_termination")
@property
@pulumi.getter(name="sizeInGb")
def size_in_gb(self) -> int:
"""
Size of the root volume in gigabytes.
"""
return pulumi.get(self, "size_in_gb")
@property
@pulumi.getter(name="volumeId")
def volume_id(self) -> str:
"""
The volume ID of the root volume of the server.
"""
return pulumi.get(self, "volume_id")
@pulumi.output_type
class GetKubernetesClusterAutoUpgradeResult(dict):
def __init__(__self__, *,
enable: bool,
maintenance_window_day: str,
maintenance_window_start_hour: int):
"""
:param bool enable: True if Kubernetes patch version auto upgrades is enabled.
:param str maintenance_window_day: The day of the auto upgrade maintenance window (`monday` to `sunday`, or `any`).
:param int maintenance_window_start_hour: The start hour (UTC) of the 2-hour auto upgrade maintenance window (0 to 23).
"""
pulumi.set(__self__, "enable", enable)
pulumi.set(__self__, "maintenance_window_day", maintenance_window_day)
pulumi.set(__self__, "maintenance_window_start_hour", maintenance_window_start_hour)
@property
@pulumi.getter
def enable(self) -> bool:
"""
True if Kubernetes patch version auto upgrades is enabled.
"""
return pulumi.get(self, "enable")
@property
@pulumi.getter(name="maintenanceWindowDay")
def maintenance_window_day(self) -> str:
"""
The day of the auto upgrade maintenance window (`monday` to `sunday`, or `any`).
"""
return pulumi.get(self, "maintenance_window_day")
@property
@pulumi.getter(name="maintenanceWindowStartHour")
def maintenance_window_start_hour(self) -> int:
"""
The start hour (UTC) of the 2-hour auto upgrade maintenance window (0 to 23).
"""
return pulumi.get(self, "maintenance_window_start_hour")
@pulumi.output_type
class GetKubernetesClusterAutoscalerConfigResult(dict):
def __init__(__self__, *,
balance_similar_node_groups: bool,
disable_scale_down: bool,
estimator: str,
expander: str,
expendable_pods_priority_cutoff: int,
ignore_daemonsets_utilization: bool,
max_graceful_termination_sec: int,
scale_down_delay_after_add: str,
scale_down_unneeded_time: str,
scale_down_utilization_threshold: float):
"""
:param bool balance_similar_node_groups: True if detecting similar node groups and balance the number of nodes between them is enabled.
:param bool disable_scale_down: True if the scale down feature of the autoscaler is disabled.
:param str estimator: The type of resource estimator used in scale up.
:param str expander: The type of node group expander be used in scale up.
:param int expendable_pods_priority_cutoff: Pods with priority below cutoff will be expendable. They can be killed without any consideration during scale down and they don't cause scale up. Pods with null priority (PodPriority disabled) are non expendable.
:param bool ignore_daemonsets_utilization: True if ignoring DaemonSet pods when calculating resource utilization for scaling down is enabled.
:param str scale_down_delay_after_add: The duration after scale up that scale down evaluation resumes.
:param str scale_down_unneeded_time: The duration a node should be unneeded before it is eligible for scale down.
"""
pulumi.set(__self__, "balance_similar_node_groups", balance_similar_node_groups)
pulumi.set(__self__, "disable_scale_down", disable_scale_down)
pulumi.set(__self__, "estimator", estimator)
pulumi.set(__self__, "expander", expander)
pulumi.set(__self__, "expendable_pods_priority_cutoff", expendable_pods_priority_cutoff)
pulumi.set(__self__, "ignore_daemonsets_utilization", ignore_daemonsets_utilization)
pulumi.set(__self__, "max_graceful_termination_sec", max_graceful_termination_sec)
pulumi.set(__self__, "scale_down_delay_after_add", scale_down_delay_after_add)
pulumi.set(__self__, "scale_down_unneeded_time", scale_down_unneeded_time)
pulumi.set(__self__, "scale_down_utilization_threshold", scale_down_utilization_threshold)
@property
@pulumi.getter(name="balanceSimilarNodeGroups")
def balance_similar_node_groups(self) -> bool:
"""
True if detecting similar node groups and balance the number of nodes between them is enabled.
"""
return pulumi.get(self, "balance_similar_node_groups")
@property
@pulumi.getter(name="disableScaleDown")
def disable_scale_down(self) -> bool:
"""
True if the scale down feature of the autoscaler is disabled.
"""
return pulumi.get(self, "disable_scale_down")
@property
@pulumi.getter
def estimator(self) -> str:
"""
The type of resource estimator used in scale up.
"""
return pulumi.get(self, "estimator")
@property
@pulumi.getter
def expander(self) -> str:
"""
The type of node group expander be used in scale up.
"""
return pulumi.get(self, "expander")
@property
@pulumi.getter(name="expendablePodsPriorityCutoff")
def expendable_pods_priority_cutoff(self) -> int:
"""
Pods with priority below cutoff will be expendable. They can be killed without any consideration during scale down and they don't cause scale up. Pods with null priority (PodPriority disabled) are non expendable.
"""
return pulumi.get(self, "expendable_pods_priority_cutoff")
@property
@pulumi.getter(name="ignoreDaemonsetsUtilization")
def ignore_daemonsets_utilization(self) -> bool:
"""
True if ignoring DaemonSet pods when calculating resource utilization for scaling down is enabled.
"""
return pulumi.get(self, "ignore_daemonsets_utilization")
@property
@pulumi.getter(name="maxGracefulTerminationSec")
def max_graceful_termination_sec(self) -> int:
return pulumi.get(self, "max_graceful_termination_sec")
@property
@pulumi.getter(name="scaleDownDelayAfterAdd")
def scale_down_delay_after_add(self) -> str:
"""
The duration after scale up that scale down evaluation resumes.
"""
return pulumi.get(self, "scale_down_delay_after_add")
@property
@pulumi.getter(name="scaleDownUnneededTime")
def scale_down_unneeded_time(self) -> str:
"""
The duration a node should be unneeded before it is eligible for scale down.
"""
return pulumi.get(self, "scale_down_unneeded_time")
@property
@pulumi.getter(name="scaleDownUtilizationThreshold")
def scale_down_utilization_threshold(self) -> float:
return pulumi.get(self, "scale_down_utilization_threshold")
@pulumi.output_type
class GetKubernetesClusterKubeconfigResult(dict):
def __init__(__self__, *,
cluster_ca_certificate: str,
config_file: str,
host: str,
token: str):
"""
:param str cluster_ca_certificate: The CA certificate of the Kubernetes API server.
:param str config_file: The raw kubeconfig file.
:param str host: The URL of the Kubernetes API server.
:param str token: The token to connect to the Kubernetes API server.
"""
pulumi.set(__self__, "cluster_ca_certificate", cluster_ca_certificate)
pulumi.set(__self__, "config_file", config_file)
pulumi.set(__self__, "host", host)
pulumi.set(__self__, "token", token)
@property
@pulumi.getter(name="clusterCaCertificate")
def cluster_ca_certificate(self) -> str:
"""
The CA certificate of the Kubernetes API server.
"""
return pulumi.get(self, "cluster_ca_certificate")
@property
@pulumi.getter(name="configFile")
def config_file(self) -> str:
"""
The raw kubeconfig file.
"""
return pulumi.get(self, "config_file")
@property
@pulumi.getter
def host(self) -> str:
"""
The URL of the Kubernetes API server.
"""
return pulumi.get(self, "host")
@property
@pulumi.getter
def token(self) -> str:
"""
The token to connect to the Kubernetes API server.
"""
return pulumi.get(self, "token")
@pulumi.output_type
class GetKubernetesClusterOpenIdConnectConfigResult(dict):
def __init__(__self__, *,
client_id: str,
groups_claims: Sequence[str],
groups_prefix: str,
issuer_url: str,
required_claims: Sequence[str],
username_claim: str,
username_prefix: str):
pulumi.set(__self__, "client_id", client_id)
pulumi.set(__self__, "groups_claims", groups_claims)
pulumi.set(__self__, "groups_prefix", groups_prefix)
pulumi.set(__self__, "issuer_url", issuer_url)
pulumi.set(__self__, "required_claims", required_claims)
pulumi.set(__self__, "username_claim", username_claim)
pulumi.set(__self__, "username_prefix", username_prefix)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> str:
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="groupsClaims")
def groups_claims(self) -> Sequence[str]:
return pulumi.get(self, "groups_claims")
@property
@pulumi.getter(name="groupsPrefix")
def groups_prefix(self) -> str:
return pulumi.get(self, "groups_prefix")
@property
@pulumi.getter(name="issuerUrl")
def issuer_url(self) -> str:
return pulumi.get(self, "issuer_url")
@property
@pulumi.getter(name="requiredClaims")
def required_claims(self) -> Sequence[str]:
return pulumi.get(self, "required_claims")
@property
@pulumi.getter(name="usernameClaim")
def username_claim(self) -> str:
return pulumi.get(self, "username_claim")
@property
@pulumi.getter(name="usernamePrefix")
def username_prefix(self) -> str:
return pulumi.get(self, "username_prefix")
@pulumi.output_type
class GetKubernetesNodePoolNodeResult(dict):
def __init__(__self__, *,
name: str,
public_ip: str,
public_ip_v6: str,
status: str):
"""
:param str name: The pool name. Only one of `name` and `pool_id` should be specified. `cluster_id` should be specified with `name`.
:param str public_ip: The public IPv4.
:param str public_ip_v6: The public IPv6.
:param str status: The status of the node.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "public_ip", public_ip)
pulumi.set(__self__, "public_ip_v6", public_ip_v6)
pulumi.set(__self__, "status", status)
@property
@pulumi.getter
def name(self) -> str:
"""
The pool name. Only one of `name` and `pool_id` should be specified. `cluster_id` should be specified with `name`.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="publicIp")
def public_ip(self) -> str:
"""
The public IPv4.
"""
return pulumi.get(self, "public_ip")
@property
@pulumi.getter(name="publicIpV6")
def public_ip_v6(self) -> str:
"""
The public IPv6.
"""
return pulumi.get(self, "public_ip_v6")
@property
@pulumi.getter
def status(self) -> str:
"""
The status of the node.
"""
return pulumi.get(self, "status")
@pulumi.output_type
class GetKubernetesNodePoolUpgradePolicyResult(dict):
def __init__(__self__, *,
max_surge: int,
max_unavailable: int):
pulumi.set(__self__, "max_surge", max_surge)
pulumi.set(__self__, "max_unavailable", max_unavailable)
@property
@pulumi.getter(name="maxSurge")
def max_surge(self) -> int:
return pulumi.get(self, "max_surge")
@property
@pulumi.getter(name="maxUnavailable")
def max_unavailable(self) -> int:
return pulumi.get(self, "max_unavailable")
@pulumi.output_type
class GetLoadbalancerPrivateNetworkResult(dict):
def __init__(__self__, *,
dhcp_config: bool,
private_network_id: str,
static_configs: Sequence[str],
status: str,
zone: str):
"""
:param str zone: `region`) The region in which the LB exists.
"""
pulumi.set(__self__, "dhcp_config", dhcp_config)
pulumi.set(__self__, "private_network_id", private_network_id)
pulumi.set(__self__, "static_configs", static_configs)
pulumi.set(__self__, "status", status)
pulumi.set(__self__, "zone", zone)
@property
@pulumi.getter(name="dhcpConfig")
def dhcp_config(self) -> bool:
return pulumi.get(self, "dhcp_config")
@property
@pulumi.getter(name="privateNetworkId")
def | |
stack:
# Note... these tests are in reverse, since the exception
# winds its way up the stack.
stack.Add(_mk_kls())
stack.Add(_mk_kls(ValueError, suppress=True))
stack.Add(_mk_kls(IndexError, exception_kls=ValueError))
stack.Add(_mk_kls(IndexError))
stack.Add(_mk_kls(exception_kls=IndexError))
stack.Add(_mk_kls())
self.assertEqual(invoked, list(reversed(range(6))))
class TestManifestCheckout(cros_test_lib.TempDirTestCase):
"""Tests for ManifestCheckout functionality."""
def setUp(self):
self.manifest_dir = os.path.join(self.tempdir, '.repo', 'manifests')
# Initialize a repo intance here.
# TODO(vapier, ferringb): mangle this so it inits from a local
# checkout if one is available, same for the git-repo fetch.
cmd = ['repo', 'init', '-u', constants.MANIFEST_URL]
cros_build_lib.RunCommand(cmd, cwd=self.tempdir, input='',
capture_output=True)
self.active_manifest = os.path.realpath(
os.path.join(self.tempdir, '.repo', 'manifest.xml'))
def testManifestInheritance(self):
osutils.WriteFile(self.active_manifest, """
<manifest>
<include name="include-target.xml" />
<include name="empty.xml" />
<project name="monkeys" path="baz" remote="foon" revision="master" />
</manifest>""")
# First, verify it properly explodes if the include can't be found.
self.assertRaises(EnvironmentError,
git.ManifestCheckout, self.tempdir)
# Next, verify it can read an empty manifest; this is to ensure
# that we can point Manifest at the empty manifest without exploding,
# same for ManifestCheckout; this sort of thing is primarily useful
# to ensure no step of an include assumes everything is yet assembled.
empty_path = os.path.join(self.manifest_dir, 'empty.xml')
osutils.WriteFile(empty_path, '<manifest/>')
git.Manifest(empty_path)
git.ManifestCheckout(self.tempdir, manifest_path=empty_path)
# Next, verify include works.
osutils.WriteFile(os.path.join(self.manifest_dir, 'include-target.xml'),
"""
<manifest>
<remote name="foon" fetch="http://localhost" />
</manifest>""")
manifest = git.ManifestCheckout(self.tempdir)
self.assertEqual(list(manifest.checkouts_by_name), ['monkeys'])
self.assertEqual(list(manifest.remotes), ['foon'])
# pylint: disable=E1101
def testGetManifestsBranch(self):
func = git.ManifestCheckout._GetManifestsBranch
manifest = self.manifest_dir
repo_root = self.tempdir
# pylint: disable=W0613
def reconfig(merge='master', origin='origin'):
if merge is not None:
merge = 'refs/heads/%s' % merge
for key in ('merge', 'origin'):
val = locals()[key]
key = 'branch.default.%s' % key
if val is None:
git.RunGit(manifest, ['config', '--unset', key], error_code_ok=True)
else:
git.RunGit(manifest, ['config', key, val])
# First, verify our assumptions about a fresh repo init are correct.
self.assertEqual('default', git.GetCurrentBranch(manifest))
self.assertEqual('master', func(repo_root))
# Ensure we can handle a missing origin; this can occur jumping between
# branches, and can be worked around.
reconfig(origin=None)
self.assertEqual('default', git.GetCurrentBranch(manifest))
self.assertEqual('master', func(repo_root))
# TODO(ferringb): convert this over to assertRaises2
def assertExcept(message, **kwargs):
reconfig(**kwargs)
try:
func(repo_root)
assert "Testing for %s, an exception wasn't thrown." % (message,)
except OSError as e:
self.assertEqual(e.errno, errno.ENOENT)
self.assertTrue(message in str(e),
msg="Couldn't find string %r in error message %r"
% (message, str(e)))
# No merge target means the configuration isn't usable, period.
assertExcept("git tracking configuration for that branch is broken",
merge=None)
# Ensure we detect if we're on the wrong branch, even if it has
# tracking setup.
git.RunGit(manifest, ['checkout', '-t', 'origin/master', '-b', 'test'])
assertExcept("It should be checked out to 'default'")
# Ensure we handle detached HEAD w/ an appropriate exception.
git.RunGit(manifest, ['checkout', '--detach', 'test'])
assertExcept("It should be checked out to 'default'")
# Finally, ensure that if the default branch is non-existant, we still throw
# a usable exception.
git.RunGit(manifest, ['branch', '-d', 'default'])
assertExcept("It should be checked out to 'default'")
def testGitMatchBranchName(self):
git_repo = os.path.join(self.tempdir, '.repo', 'manifests')
branches = git.MatchBranchName(git_repo, 'default', namespace='')
self.assertEqual(branches, ['refs/heads/default'])
branches = git.MatchBranchName(git_repo, 'default', namespace='refs/heads/')
self.assertEqual(branches, ['default'])
branches = git.MatchBranchName(git_repo, 'origin/f.*link',
namespace='refs/remotes/')
self.assertTrue('firmware-link-' in branches[0])
branches = git.MatchBranchName(git_repo, 'r23')
self.assertEqual(branches, ['refs/remotes/origin/release-R23-2913.B'])
class Test_iflatten_instance(cros_test_lib.TestCase):
"""Test iflatten_instance function."""
def test_it(self):
f = lambda *a: list(cros_build_lib.iflatten_instance(*a))
self.assertEqual([1, 2], f([1, 2]))
self.assertEqual([1, '2a'], f([1, '2a']))
self.assertEqual([1, 2, 'b'], f([1, [2, 'b']]))
self.assertEqual([1, 2, 'f', 'd', 'a', 's'], f([1, 2, ('fdas',)], int))
self.assertEqual([''], f(''))
class TestKeyValueFiles(cros_test_lib.TempDirTestCase):
"""Tests handling of key/value files."""
def setUp(self):
self.contents = """# A comment !@
A = 1
AA= 2
AAA =3
AAAA\t=\t4
AAAAA\t \t=\t 5
AAAAAA = 6 \t\t# Another comment
\t
\t# Aerith lives!
C = 'D'
CC= 'D'
CCC ='D'
\x20
\t# monsters go boom #
E \t= "Fxxxxx" # Blargl
EE= "Faaa\taaaa"\x20
EEE ="Fk \t kkkk"\t
Q = "'q"
\tQQ ="q'"\x20
QQQ='"q"'\t
R = "r
"
RR = "rr
rrr"
RRR = 'rrr
RRRR
rrr
'
SSS=" ss
'ssss'
ss"
T="
ttt"
"""
self.expected = {
'A': '1',
'AA': '2',
'AAA': '3',
'AAAA': '4',
'AAAAA': '5',
'AAAAAA': '6',
'C': 'D',
'CC': 'D',
'CCC': 'D',
'E': 'Fxxxxx',
'EE': 'Faaa\taaaa',
'EEE': 'Fk \t kkkk',
'Q': "'q",
'QQ': "q'",
'QQQ': '"q"',
'R': 'r\n',
'RR': 'rr\nrrr',
'RRR': 'rrr\n RRRR\n rrr\n',
'SSS': ' ss\n\'ssss\'\nss',
'T': '\nttt'
}
self.conf_file = os.path.join(self.tempdir, 'file.conf')
osutils.WriteFile(self.conf_file, self.contents)
def _RunAndCompare(self, test_input, multiline):
result = cros_build_lib.LoadKeyValueFile(test_input, multiline=multiline)
self.assertEqual(self.expected, result)
def testLoadFilePath(self):
"""Verify reading a simple file works"""
self._RunAndCompare(self.conf_file, True)
def testLoadStringIO(self):
"""Verify passing in StringIO object works."""
self._RunAndCompare(StringIO.StringIO(self.contents), True)
def testLoadFileObject(self):
"""Verify passing in open file object works."""
with open(self.conf_file) as f:
self._RunAndCompare(f, True)
def testNoMultlineValues(self):
"""Verify exception is thrown when multiline is disabled."""
self.assertRaises(ValueError, self._RunAndCompare, self.conf_file, False)
class SafeRunTest(cros_test_lib.TestCase):
"""Tests SafeRunTest functionality."""
def _raise_exception(self, e):
raise e
def testRunsSafely(self):
"""Verify that we are robust to exceptions."""
def append_val(value):
call_list.append(value)
call_list = []
f_list = [functools.partial(append_val, 1),
functools.partial(self._raise_exception,
Exception('testRunsSafely exception.')),
functools.partial(append_val, 2)]
self.assertRaises(Exception, cros_build_lib.SafeRun, f_list)
self.assertEquals(call_list, [1, 2])
def testRaisesFirstException(self):
"""Verify we raise the first exception when multiple are encountered."""
class E1(Exception):
"""Simple exception class."""
pass
class E2(Exception):
"""Simple exception class."""
pass
f_list = [functools.partial(self._raise_exception, e) for e in [E1, E2]]
self.assertRaises(E1, cros_build_lib.SafeRun, f_list)
def testCombinedRaise(self):
"""Raises a RuntimeError with exceptions combined."""
f_list = [functools.partial(self._raise_exception, Exception())] * 3
self.assertRaises(RuntimeError, cros_build_lib.SafeRun, f_list,
combine_exceptions=True)
class FrozenAttributesTest(cros_test_lib.TestCase):
"""Tests FrozenAttributesMixin functionality."""
class DummyClass(object):
"""Any class that does not override __setattr__."""
class SetattrClass(object):
"""Class that does override __setattr__."""
SETATTR_OFFSET = 10
def __setattr__(self, attr, value):
"""Adjust value here to later confirm that this code ran."""
object.__setattr__(self, attr, self.SETATTR_OFFSET + value)
def _TestBasics(self, cls):
# pylint: disable=W0201
def _Expected(val):
return getattr(cls, 'SETATTR_OFFSET', 0) + val
obj = cls()
obj.a = 1
obj.b = 2
self.assertEquals(_Expected(1), obj.a)
self.assertEquals(_Expected(2), obj.b)
obj.Freeze()
self.assertRaises(cros_build_lib.AttributeFrozenError, setattr, obj, 'a', 3)
self.assertEquals(_Expected(1), obj.a)
self.assertRaises(cros_build_lib.AttributeFrozenError, setattr, obj, 'c', 3)
self.assertFalse(hasattr(obj, 'c'))
def testFrozenByMetaclass(self):
"""Test attribute freezing with FrozenAttributesClass."""
class DummyByMeta(self.DummyClass):
"""Class that freezes DummyClass using metaclass construct."""
__metaclass__ = cros_build_lib.FrozenAttributesClass
self._TestBasics(DummyByMeta)
class SetattrByMeta(self.SetattrClass):
"""Class that freezes SetattrClass using metaclass construct."""
__metaclass__ = cros_build_lib.FrozenAttributesClass
self._TestBasics(SetattrByMeta)
def testFrozenByMixinFirst(self):
"""Test attribute freezing with FrozenAttributesMixin first in hierarchy."""
class Dummy(cros_build_lib.FrozenAttributesMixin, self.DummyClass):
"""Class that freezes DummyClass using mixin construct."""
self._TestBasics(Dummy)
class Setattr(cros_build_lib.FrozenAttributesMixin, self.SetattrClass):
"""Class that freezes SetattrClass using mixin construct."""
self._TestBasics(Setattr)
def testFrozenByMixinLast(self):
"""Test attribute freezing with FrozenAttributesMixin last in hierarchy."""
class Dummy(self.DummyClass, cros_build_lib.FrozenAttributesMixin):
"""Class that freezes DummyClass using mixin construct."""
self._TestBasics(Dummy)
class Setattr(self.SetattrClass, cros_build_lib.FrozenAttributesMixin):
"""Class that freezes SetattrClass using mixin construct."""
self._TestBasics(Setattr)
class TestGetIPv4Address(RunCommandTestCase):
"""Tests the GetIPv4Address function."""
IP_GLOBAL_OUTPUT = """
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
2: eth0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc pfifo_fast state \
DOWN qlen 1000
link/ether cc:cc:cc:cc:cc:cc brd ff:ff:ff:ff:ff:ff
3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP \
qlen 1000
link/ether dd:dd:dd:dd:dd:dd brd ff:ff:ff:ff:ff:ff
inet 172.16.58.3/22 brd 172.16.58.3 scope global eth1
inet6 cdef:0:cdef:cdef:cdef:cdef:cdef:cdef/64 scope global dynamic
valid_lft 2592000sec preferred_lft 604800sec
"""
def testGetIPv4AddressParseResult(self):
"""Verifies we can parse the output and get correct IP address."""
self.rc.AddCmdResult(partial_mock.In('ip'), output=self.IP_GLOBAL_OUTPUT)
self.assertEqual(cros_build_lib.GetIPv4Address(), '172.16.58.3')
def testGetIPv4Address(self):
"""Tests that correct shell commmand is called."""
cros_build_lib.GetIPv4Address(global_ip=False, dev='eth0')
self.rc.assertCommandContains(
['ip', 'addr', 'show', 'scope', 'host', 'dev', 'eth0'])
cros_build_lib.GetIPv4Address(global_ip=True)
self.rc.assertCommandContains(['ip', 'addr', 'show', 'scope', 'global'])
class TestGetChrootVersion(cros_test_lib.MockTestCase):
"""Tests GetChrootVersion functionality."""
def testSimpleBuildroot(self):
"""Verify buildroot arg works"""
read_mock = self.PatchObject(osutils, 'ReadFile', return_value='12\n')
ret = cros_build_lib.GetChrootVersion(buildroot='/build/root')
self.assertEqual(ret, '12')
read_mock.assert_called_with('/build/root/chroot/etc/cros_chroot_version')
def testSimpleChroot(self):
"""Verify chroot arg works"""
read_mock = self.PatchObject(osutils, 'ReadFile', return_value='70')
ret = cros_build_lib.GetChrootVersion(chroot='/ch/root')
self.assertEqual(ret, '70')
read_mock.assert_called_with('/ch/root/etc/cros_chroot_version')
def testNoChroot(self):
"""Verify we don't blow up when there is no chroot yet"""
ret = cros_build_lib.GetChrootVersion(chroot='/.$om3/place/nowhere')
self.assertEqual(ret, None)
class TestChrootPathHelpers(cros_test_lib.TestCase):
"""Verify we correctly reinterpret paths to be used inside/outside chroot."""
@mock.patch('chromite.lib.cros_build_lib.IsInsideChroot', return_value=True)
def testToChrootPathInChroot(self, _inchroot_mock):
"""Test we return the original path to be used in chroot while in chroot."""
path = '/foo/woo/bar'
self.assertEqual(cros_build_lib.ToChrootPath(path), path)
@mock.patch('chromite.lib.cros_build_lib.IsInsideChroot', return_value=False)
def testToChrootPathOutChroot(self, _inchroot_mock):
"""Test we convert the path to be used in chroot while outside chroot."""
subpath = 'bar/haa/ooo'
path = os.path.join(constants.SOURCE_ROOT, subpath)
chroot_path = git.ReinterpretPathForChroot(path)
self.assertEqual(cros_build_lib.ToChrootPath(path), chroot_path)
@mock.patch('chromite.lib.cros_build_lib.IsInsideChroot', return_value=True)
def testFromChrootInChroot(self, _inchroot_mock):
"""Test we return the original chroot path while in chroot."""
path = '/foo/woo/bar'
self.assertEqual(cros_build_lib.FromChrootPath(path), path)
@mock.patch('chromite.lib.cros_build_lib.IsInsideChroot', return_value=False)
def testFromChrootOutChroot(self, _inchroot_mock):
"""Test we convert the chroot path to be used outside chroot."""
# Test that chroot source root has been replaced in the path.
subpath = 'foo/woo/bar'
chroot_path = os.path.join(constants.CHROOT_SOURCE_ROOT, subpath)
path = os.path.join(constants.SOURCE_ROOT, subpath)
self.assertEqual(cros_build_lib.FromChrootPath(chroot_path), path)
# Test that a chroot path has been converted.
chroot_path = '/foo/woo/bar'
path = os.path.join(constants.SOURCE_ROOT,
constants.DEFAULT_CHROOT_DIR,
chroot_path.strip(os.path.sep))
self.assertEqual(cros_build_lib.FromChrootPath(chroot_path), path)
class CollectionTest(cros_test_lib.TestCase):
"""Tests for Collection helper."""
def testDefaults(self):
"""Verify default values kick in."""
O = cros_build_lib.Collection('O', a=0, b='string', c={})
o = O()
| |
# Run from home directory with python -m pytest tests
import copy
import pytest
import random
import numpy as np
import torch.nn as nn
import torch
from nn_builder.pytorch.RNN import RNN
import torch.optim as optim
N = 250
X = torch.randn((N, 5, 15))
X[0:125, 0, 3] += 20.0
y = X[:, 0, 3] > 5.0
y = y.float()
def test_user_hidden_layers_input_rejections():
"""Tests whether network rejects invalid hidden_layers inputted from user"""
inputs_that_should_fail = [[["linearr", 33]], [["linear", 12, 33]], [["gru", 2, 33]], [["lstm", 2, 33]], [["lstmr", 33]],
[["gruu", 33]], [["gru", 33], ["xxx", 33]], [["linear", 33], ["gru", 12], ["gru", 33]] ]
for input in inputs_that_should_fail:
with pytest.raises(AssertionError):
RNN(input_dim=1, layers_info=input, hidden_activations="relu",
output_activation="relu")
def test_user_hidden_layers_input_acceptances():
"""Tests whether network rejects invalid hidden_layers inputted from user"""
inputs_that_should_work = [[["linear", 33]], [["linear", 12]], [["gru", 2]], [["lstm", 2]], [["lstm", 1]],
[["gru", 330]], [["gru", 33], ["linear", 2]] ]
for input in inputs_that_should_work:
assert RNN(input_dim=1, layers_info=input, hidden_activations="relu",
output_activation="relu")
def test_hidden_layers_created_correctly():
"""Tests that create_hidden_layers works correctly"""
layers = [["gru", 25], ["lstm", 23], ["linear", 5], ["linear", 10]]
rnn = RNN(input_dim=5, layers_info=layers, hidden_activations="relu",
output_activation="relu")
assert type(rnn.hidden_layers[0]) == nn.GRU
assert rnn.hidden_layers[0].input_size == 5
assert rnn.hidden_layers[0].hidden_size == 25
assert type(rnn.hidden_layers[1]) == nn.LSTM
assert rnn.hidden_layers[1].input_size == 25
assert rnn.hidden_layers[1].hidden_size == 23
assert type(rnn.hidden_layers[2]) == nn.Linear
assert rnn.hidden_layers[2].in_features == 23
assert rnn.hidden_layers[2].out_features == 5
assert type(rnn.output_layers[0]) == nn.Linear
assert rnn.output_layers[0].in_features == 5
assert rnn.output_layers[0].out_features == 10
def test_output_layers_created_correctly():
"""Tests that create_output_layers works correctly"""
layers = [["gru", 25], ["lstm", 23], ["linear", 5], ["linear", 10]]
rnn = RNN(input_dim=5, layers_info=layers, hidden_activations="relu", output_activation="relu")
assert rnn.output_layers[0].in_features == 5
assert rnn.output_layers[0].out_features == 10
layers = [["gru", 25], ["lstm", 23], ["lstm", 10]]
rnn = RNN(input_dim=5, layers_info=layers, hidden_activations="relu",
output_activation="relu")
assert rnn.output_layers[0].input_size == 23
assert rnn.output_layers[0].hidden_size == 10
layers = [["gru", 25], ["lstm", 23], [["lstm", 10], ["linear", 15]]]
rnn = RNN(input_dim=5, layers_info=layers, hidden_activations="relu",
output_activation=["relu", "softmax"])
assert rnn.output_layers[0].input_size == 23
assert rnn.output_layers[0].hidden_size == 10
assert rnn.output_layers[1].in_features == 23
assert rnn.output_layers[1].out_features == 15
def test_output_dim_user_input():
"""Tests whether network rejects an invalid output_dim input from user"""
inputs_that_should_fail = [-1, "aa", ["dd"], [2], 0, 2.5, {2}]
for input_value in inputs_that_should_fail:
with pytest.raises(AssertionError):
RNN(input_dim=3, layers_info=[2, input_value], hidden_activations="relu", output_activation="relu")
with pytest.raises(AssertionError):
RNN(input_dim=6, layers_info=input_value, hidden_activations="relu", output_activation="relu")
def test_activations_user_input():
"""Tests whether network rejects an invalid hidden_activations or output_activation from user"""
inputs_that_should_fail = [-1, "aa", ["dd"], [2], 0, 2.5, {2}, "Xavier_"]
for input_value in inputs_that_should_fail:
with pytest.raises(AssertionError):
RNN(input_dim=4, layers_info=[["linear", 2]], hidden_activations=input_value,
output_activation="relu")
RNN(input_dim=4, layers_info=[["linear", 2]], hidden_activations="relu",
output_activation=input_value)
def test_initialiser_user_input():
"""Tests whether network rejects an invalid initialiser from user"""
inputs_that_should_fail = [-1, "aa", ["dd"], [2], 0, 2.5, {2}, "Xavier_"]
for input_value in inputs_that_should_fail:
with pytest.raises(AssertionError):
RNN(input_dim=4, layers_info=[["linear", 2]], hidden_activations="relu",
output_activation="relu", initialiser=input_value)
RNN(layers_info=[["linear", 2], ["linear", 2]], hidden_activations="relu",
output_activation="relu", initialiser="xavier", input_dim=4)
def test_batch_norm_layers():
"""Tests whether batch_norm_layers method works correctly"""
layers = [["gru", 20], ["lstm", 3], ["linear", 4], ["linear", 10]]
rnn = RNN(layers_info=layers, hidden_activations="relu", input_dim=5,
output_activation="relu", initialiser="xavier", batch_norm=True)
assert len(rnn.batch_norm_layers) == 3
assert rnn.batch_norm_layers[0].num_features == 20
assert rnn.batch_norm_layers[1].num_features == 3
assert rnn.batch_norm_layers[2].num_features == 4
def test_linear_layers_only_come_at_end():
"""Tests that it throws an error if user tries to provide list of hidden layers that include linear layers where they
don't only come at the end"""
layers = [["gru", 20], ["linear", 4], ["lstm", 3], ["linear", 10]]
with pytest.raises(AssertionError):
rnn = RNN(layers_info=layers, hidden_activations="relu", input_dim=4,
output_activation="relu", initialiser="xavier", batch_norm=True)
layers = [["gru", 20], ["lstm", 3], ["linear", 4], ["linear", 10]]
assert RNN(layers_info=layers, hidden_activations="relu", input_dim=4,
output_activation="relu", initialiser="xavier", batch_norm=True)
def test_output_activation():
"""Tests whether network outputs data that has gone through correct activation function"""
RANDOM_ITERATIONS = 20
input_dim = 100
for _ in range(RANDOM_ITERATIONS):
data = torch.randn((25, 10, 100))
RNN_instance = RNN(layers_info=[["lstm", 20], ["gru", 5], ["linear", 10], ["linear", 3]],
hidden_activations="relu", input_dim=input_dim,
output_activation="relu", initialiser="xavier", batch_norm=True)
out = RNN_instance.forward(data)
assert all(out.reshape(1, -1).squeeze() >= 0)
RNN_instance = RNN(layers_info=[["lstm", 20], ["gru", 5]],
hidden_activations="relu", input_dim=input_dim,
output_activation="relu", initialiser="xavier")
out = RNN_instance.forward(data)
assert all(out.reshape(1, -1).squeeze() >= 0)
RNN_instance = RNN(layers_info=[["lstm", 20], ["gru", 5], ["linear", 10], ["linear", 3]],
hidden_activations="relu", input_dim=input_dim,
output_activation="relu", initialiser="xavier")
out = RNN_instance.forward(data)
assert all(out.reshape(1, -1).squeeze() >= 0)
RNN_instance = RNN(layers_info=[["lstm", 20], ["gru", 5], ["linear", 10], ["linear", 3]],
hidden_activations="relu", input_dim=input_dim,
output_activation="sigmoid", initialiser="xavier")
out = RNN_instance.forward(data)
assert all(out.reshape(1, -1).squeeze() >= 0)
assert all(out.reshape(1, -1).squeeze() <= 1)
summed_result = torch.sum(out, dim=1)
assert all(summed_result.reshape(1, -1).squeeze() != 1.0)
RNN_instance = RNN(layers_info=[["lstm", 20], ["gru", 5], ["linear", 10], ["linear", 3]],
hidden_activations="relu", input_dim=input_dim,
output_activation="softmax", initialiser="xavier")
out = RNN_instance.forward(data)
assert all(out.reshape(1, -1).squeeze() >= 0)
assert all(out.reshape(1, -1).squeeze() <= 1)
summed_result = torch.sum(out, dim=1)
summed_result = summed_result.reshape(1, -1).squeeze()
summed_result = torch.round( (summed_result * 10 ** 5) / (10 ** 5))
assert all( summed_result == 1.0)
RNN_instance = RNN(layers_info=[["lstm", 20], ["gru", 5], ["lstm", 25]],
hidden_activations="relu", input_dim=input_dim,
output_activation="softmax", initialiser="xavier")
out = RNN_instance.forward(data)
assert all(out.reshape(1, -1).squeeze() >= 0)
assert all(out.reshape(1, -1).squeeze() <= 1)
summed_result = torch.sum(out, dim=1)
summed_result = summed_result.reshape(1, -1).squeeze()
summed_result = torch.round( (summed_result * 10 ** 5) / (10 ** 5))
assert all( summed_result == 1.0)
RNN_instance = RNN(layers_info=[["lstm", 20], ["gru", 5], ["lstm", 25]],
hidden_activations="relu", input_dim=input_dim,
initialiser="xavier")
out = RNN_instance.forward(data)
assert not all(out.reshape(1, -1).squeeze() >= 0)
assert not all(out.reshape(1, -1).squeeze() <= 0)
summed_result = torch.sum(out, dim=1)
summed_result = summed_result.reshape(1, -1).squeeze()
summed_result = torch.round( (summed_result * 10 ** 5) / (10 ** 5))
assert not all(summed_result == 1.0)
RNN_instance = RNN(layers_info=[["lstm", 20], ["gru", 5], ["lstm", 25], ["linear", 8]],
hidden_activations="relu", input_dim=input_dim,
initialiser="xavier")
out = RNN_instance.forward(data)
assert not all(out.reshape(1, -1).squeeze() >= 0)
assert not all(out.reshape(1, -1).squeeze() <= 0)
summed_result = torch.sum(out, dim=1)
summed_result = summed_result.reshape(1, -1).squeeze()
summed_result = torch.round( (summed_result * 10 ** 5) / (10 ** 5))
assert not all( summed_result == 1.0)
def test_output_activation_return_return_final_seq_only_off():
"""Tests whether network outputs data that has gone through correct activation function"""
RANDOM_ITERATIONS = 20
input_dim = 100
for _ in range(RANDOM_ITERATIONS):
data = torch.randn((25, 10, 100))
RNN_instance = RNN(layers_info=[["lstm", 20], ["gru", 5], ["linear", 10], ["linear", 3]],
hidden_activations="relu", input_dim=input_dim, return_final_seq_only=False,
output_activation="relu", initialiser="xavier", batch_norm=True)
out = RNN_instance.forward(data)
assert all(out.reshape(1, -1).squeeze() >= 0)
RNN_instance = RNN(layers_info=[["lstm", 20], ["gru", 5]],
hidden_activations="relu", input_dim=input_dim, return_final_seq_only=False,
output_activation="relu", initialiser="xavier")
out = RNN_instance.forward(data)
assert all(out.reshape(1, -1).squeeze() >= 0)
RNN_instance = RNN(layers_info=[["lstm", 20], ["gru", 5], ["linear", 10], ["linear", 3]],
hidden_activations="relu", input_dim=input_dim, return_final_seq_only=False,
output_activation="relu", initialiser="xavier")
out = RNN_instance.forward(data)
assert all(out.reshape(1, -1).squeeze() >= 0)
RNN_instance = RNN(layers_info=[["lstm", 20], ["gru", 5], ["linear", 10], ["linear", 3]],
hidden_activations="relu", input_dim=input_dim, return_final_seq_only=False,
output_activation="sigmoid", initialiser="xavier")
out = RNN_instance.forward(data)
assert all(out.reshape(1, -1).squeeze() >= 0)
assert all(out.reshape(1, -1).squeeze() <= 1)
summed_result = torch.sum(out, dim=2)
assert all(summed_result.reshape(1, -1).squeeze() != 1.0)
RNN_instance = RNN(layers_info=[["lstm", 20], ["gru", 5], ["linear", 10], ["linear", 3]],
hidden_activations="relu", input_dim=input_dim, return_final_seq_only=False,
output_activation="softmax", initialiser="xavier")
out = RNN_instance.forward(data)
assert all(out.reshape(1, -1).squeeze() >= 0)
assert all(out.reshape(1, -1).squeeze() <= 1)
summed_result = torch.sum(out, dim=2)
summed_result = summed_result.reshape(1, -1).squeeze()
summed_result = torch.round( (summed_result * 10 ** 5) / (10 ** 5))
assert all( summed_result == 1.0)
RNN_instance = RNN(layers_info=[["lstm", 20], ["gru", 5], ["lstm", 25]],
hidden_activations="relu", input_dim=input_dim, return_final_seq_only=False,
output_activation="softmax", initialiser="xavier")
out = RNN_instance.forward(data)
assert all(out.reshape(1, -1).squeeze() >= 0)
assert all(out.reshape(1, -1).squeeze() <= 1)
summed_result = torch.sum(out, dim=2)
summed_result = summed_result.reshape(1, -1).squeeze()
summed_result = torch.round( (summed_result * 10 ** 5) / (10 ** 5))
assert all( summed_result == 1.0)
RNN_instance = RNN(layers_info=[["lstm", 20], ["gru", 5], ["lstm", 25]],
hidden_activations="relu", input_dim=input_dim, return_final_seq_only=False,
initialiser="xavier")
out = RNN_instance.forward(data)
assert not all(out.reshape(1, -1).squeeze() >= 0)
assert not all(out.reshape(1, -1).squeeze() <= 0)
summed_result = torch.sum(out, dim=2)
summed_result = summed_result.reshape(1, -1).squeeze()
summed_result = torch.round( (summed_result * 10 ** 5) / (10 ** 5))
assert not all( summed_result == 1.0)
RNN_instance = RNN(layers_info=[["lstm", 20], ["gru", 5], ["lstm", 25], ["linear", 8]],
hidden_activations="relu", input_dim=input_dim, return_final_seq_only=False,
initialiser="xavier")
out = RNN_instance.forward(data)
assert not all(out.reshape(1, -1).squeeze() >= 0)
assert not all(out.reshape(1, -1).squeeze() <= 0)
summed_result = torch.sum(out, dim=2)
summed_result = summed_result.reshape(1, -1).squeeze()
summed_result = torch.round( (summed_result * 10 ** 5) / (10 ** 5))
assert not all( summed_result == 1.0)
def test_y_range():
"""Tests whether setting a y range works correctly"""
for _ in range(100):
val1 = random.random() - 3.0*random.random()
val2 = random.random() + 2.0*random.random()
lower_bound = min(val1, val2)
upper_bound = max(val1, val2)
rnn = RNN(layers_info=[["lstm", 20], ["gru", 5], ["lstm", 25]],
hidden_activations="relu", y_range=(lower_bound, upper_bound),
initialiser="xavier", input_dim=22)
random_data = torch.randn((10, 11, 22))
out = rnn.forward(random_data)
out = out.reshape(1, -1).squeeze()
assert torch.sum(out > lower_bound).item() == 25*10, "lower {} vs. {} ".format(lower_bound, out)
assert torch.sum(out < upper_bound).item() == 25*10, "upper {} vs. {} ".format(upper_bound, out)
def test_deals_with_None_activation():
"""Tests whether is able to handle user inputting None as output activation"""
assert RNN(layers_info=[["lstm", 20], ["gru", | |
<reponame>Elrine/discord-dK2-destin
from typing import List, Tuple, Union
import sqlalchemy
from sqlalchemy.orm.session import Session
from sqlalchemy.sql.elements import False_, True_
from sqlalchemy.sql.expression import insert, select, update
import discord_bot.model as model
from sqlalchemy import create_engine
class DBManager():
def __init__(self) -> None:
self.engine = create_engine(
"sqlite:///db.sqlite3", future=True, echo=True)
model.Base.metadata.create_all(self.engine)
self.initSkill()
self.initAsset()
def initSkill(self):
with Session(self.engine, future=True) as session:
session : Session
stmt = select(model.SkillModel)
result = session.execute(stmt).all()
if len(result) == 0:
session.add_all(
[
model.SkillModel(
name="Athlétisme",
characteristic_1="STR",
characteristic_2="DEX",
description="Utilisez cette compétence pour courir, sauter, faire des bonds et des cabrioles, escalaer des murs et des falaises, nager. C'est aussi la compétence des sports et de presque toutes les activités physiques athlétiques. Cette compétence peut aussi réduire les dommages encaissés lors d'une chute.",
clutter_malus=True
),
model.SkillModel(
name="Bluff",
characteristic_1="INT",
characteristic_2="CHA",
description="Vous permet de mentir ou de baratiner. On peut résister à l'utilisation de cette compétance grâce à un jet de *Psychologie* ou de *bluff*.",
clutter_malus=False
),
model.SkillModel(
name="Concentration",
characteristic_1="CON",
characteristic_2="WIS",
description="Servez-vous de cette compétence pour vous concentrer sur une tâche ou pour méditer. Vous pouvez aussi regagner ou faire regagner des points d'énergie.",
clutter_malus=False
),
model.SkillModel(
name="Déguisement",
characteristic_1="DEX",
characteristic_2="CHA",
description="Cette compétence est utile pour se travestir ou se maquiller. On peut percer à jour un déguisement grâce à un jet de Perception en opposition. Il faut un minimum de 10 tours pour se déguiser, à condition d'avoir le matériel adéquat.",
clutter_malus=False
),
model.SkillModel(
name="Diplomatie",
characteristic_1="INT",
characteristic_2="CHA",
description="Utilisez cette compétence pour parler à un auditoire, chercher à convaincre un interlocuteur grâce à des arguments cohérents ou cal mer des personnes hostiles. C'est aussi la compétence qu'il vous faut si vous voulez savoir vous habiller correctement et vous tenir à table. On peut résister à l'utilisation de cette compétence grâce à un jet de *Psychologie* ou de *Diplomatic*.",
clutter_malus=False
),
model.SkillModel(
name="Discrétion",
characteristic_1="DEX",
characteristic_2="INT",
description="Elle vous servira pour vous déplacer en silence, vous dissimuler ou camoufler sur vous un petit objet. On peut repérer un personnage discret grâce à un jet de *Perception* en opposition.",
clutter_malus=True
),
model.SkillModel(
name="Équitation",
characteristic_1="DEX",
characteristic_2="WIS",
description="Les héros en font usage pour monter tous les animaux qui l'acceptent. Il est évidemment plus facile de chevaucher une créature entraînée qu'une bête sauvage. Dans certains univers, cette compétence peut être remplacée par *Conduite*.",
clutter_malus=False
),
model.SkillModel(
name="Érudition",
characteristic_1="INT",
characteristic_2="WIS",
description="Cette compétence vous permet de savoir tout un tas de trucs sur tout un tas de sujets très différents. Bien entendu, vous n'êtes pas vraiment un spécialiste, mais vous en savez assez pour savoir de quoi il s'agit.",
clutter_malus=False
),
model.SkillModel(
name="Foi",
characteristic_1="WIS",
characteristic_2="CHA",
description="Vous connaissez les dieux et leurs histoires de coucherie. Vous avez lu leur interview dans le dernier People magazine qui leur est consacré. Si ça se trouve, vous croyez même tout ce qu'ils racon tent et, en échange, vous savez provoquer des miracles et vous occuper des créatures que votre dieu déteste. Trop cool!",
clutter_malus=False
),
model.SkillModel(
name="Influence",
characteristic_1="CHA",
characteristic_2="INT",
description="Vous connaissez quelqu'un qui connait quelqu'un qui peut vous aider. Cette com pétence vous permet d'obtenir des appuis matériels et des services de la part de personnes ou d'organisa tions. Bien entendu, comme pour Renseignements, il vous faut du temps et de l'argent pour obtenir de tel les faveurs ponctuelles - en général, une bonne soirée et un ou plusieurs d6 DO (les Dragons d'Or, la mon naie du dK) pour débloquer les verrous. Vous pouvez aussi utiliser cette compétence comme une connais sance des différents réseaux sociaux de Tendroit où vous êtes. On peut résister à cette compétence grace à un jet de *Psychologie* ou d'*Influence*.",
clutter_malus=False
),
model.SkillModel(
name="Initiative",
characteristic_1="DEX",
characteristic_2="WIS",
description="Avez-vous de bons réflexes? Cette compétence sert au conteur à détermi ner qui agit le premier dans une situation conflictuelle comme un combat.",
clutter_malus=True
),
model.SkillModel(
name="Intimidation",
characteristic_1="STR",
characteristic_2="CHA",
description="Utilisez cette compé tence pour impressionner un adversaire, pour donner des ordres ou pour mener des interrogatoires un peu musclés. On peut résister à cette compétence grâce à un jet de *Psychologie* ou d'*Intimidation*.",
clutter_malus=False
),
model.SkillModel(
name="Linguistique",
characteristic_1="INT",
characteristic_2="CHA",
description="Vous savez parler vore langue maternelle. Vous pouvez aussi apprendre un nombre de langues égal à votre Intelligence (peut-être les connaissez-vous au début de vos aventures ou les apprendrez-vous au cours de celle-ci). Dans tous le autres cas, *Linguistique* vous permet de vous faire comprendre et d'échanger avec ceux que vous rencontre grâce à un sabir déroutant et à beaucoup de gestes. Lorsque vous ne parlez pas la même langue que vos interlocuteurs, vos compétences sociales sont limitées pa celle-ci : utilisez la moins bonne des deux.",
clutter_malus=False
),
model.SkillModel(
name="Magie",
characteristic_1="INT",
characteristic_2="WIS",
description="Avec cette compétence, vous voilà prêt à utiliser des pouvoirs cosmiques phénoménaux (dans un mouchoir de poche) ! En tout ca vous savez de quoi il retourne quand on vous parle sortilège, rituel, focus, gestuelle et composantes..",
clutter_malus=False
),
model.SkillModel(
name="Métier",
characteristic_1="INT",
characteristic_2="DEX",
description="Vous êtes à l'aise avec les outils et le travail manuel. Vous avez appris des techniques dans des domaines très différents. Sans être un pe cialiste d'aucun, vous vous débrouillez assez bien.",
clutter_malus=False
),
model.SkillModel(
name="Perception",
characteristic_1="INT",
characteristic_2="WIS",
description="C'est la compétence reine pour voir, entendre, goûter, sentir votre environnement. Vous pouvez aussi fouiller une pièce, repérer une embuscade, examiner un lieu, etc...",
clutter_malus=False
),
model.SkillModel(
name="Psychologie",
characteristic_1="INT",
characteristic_2="WIS",
description="Utilisez cette compétence résister à toutes les tentatives de manoeuvre sociale et d'influence. Vous pouvez aussi l'utiliser pour comprendre les motivations et les émotions des gens que vous côtoyez ou dont vous avez témoignage.",
clutter_malus=False
),
model.SkillModel(
name="Renseignements",
characteristic_1="WIS",
characteristic_2="CHA",
description="Cette compétence vous servira à trouver des renseignements en traînant dans des endroits publics et en posant des questions. Il faut l'équivalent d'une demi-journée pour obtenir des informations. En général, vous dépensez 1d6 DO en verres payés à vos informateurs. Vous pouvez aussi utiliser cette compétence pour vous y retrouver dans les méandres d'une bureaucratie tentaculaire.",
clutter_malus=False
),
model.SkillModel(
name="Représentation",
characteristic_1="CHA",
characteristic_2="WIS",
description="Cette compétence est celle des artistes, saltimbanques, musiciens, acteurs et autres pratiquants des arts vivants.",
clutter_malus=False
),
model.SkillModel(
name="Reputation",
characteristic_1="CHA",
characteristic_2="WIS",
description="Vous cherchez vraiment à être connu partout parce que, quand même, c'est bon la gloire parfois. En jouant cette compétence contre une difficulté qui dépend de la nature des personnes à qui vous vous adressez (de 15 pour des gens de votre pays à 40 et plus pour de parfaits étrangers loin de chez vous), vous pouvez vous faire reconnaître et, peut-être, profiter de votre célébrité.",
clutter_malus=False
),
model.SkillModel(
name="Sécurité",
characteristic_1="DEX",
characteristic_2="INT",
description="Cette compétence est pratique pour crocheter des serrures et désamorcer (ou poser) des pièges.",
clutter_malus=False
),
model.SkillModel(
name="Subterfuge",
characteristic_1="DEX",
characteristic_2="INT",
description="Compétence des illusionnistes et des prestidigitateurs, elle est utile pour faire des tours de passe-passe, du vol à l'étalage ou à la tire, pour vous faufiler dans les passages étroits, vous contorsionner ou échapper à des liens. On peut résis ter à l'utilisation de cette compétence grâce à un jet de *Perception*.",
clutter_malus=True
),
model.SkillModel(
name="Survie",
characteristic_1="CON",
characteristic_2="WIS",
description="La compétence préférée de tous les asociaux qui s'en servent pour chasser, monter un camp, suivre des traces, trouver leur chemin et de manière générale survivre en milieu hostile.",
clutter_malus=False
)
]
)
session.commit()
def initAsset(self):
with Session(self.engine, future=True) as session:
session : Session
stmt = select(model.AssetModel)
result = session.execute(stmt).all()
if len(result) == 0:
session.add_all(
[
]
)
session.commit()
def addAsset(self, _name, _description) -> None:
with Session(self.engine, future=True) as session:
session : Session
session.add(
model.AssetModel(name=_name, description=_description)
)
session.commit()
def addUser(self, _user_name, _user_id) -> None:
with Session(self.engine, future=True) as session:
session : Session
session.add(
model.UserModel(user_name=_user_name, user_discord_id=_user_id)
)
session.commit()
def createCharacter(self, _character_name, **kwargs) -> Union[model.CharacterModel, None]:
with Session(self.engine, future=True) as session:
session : Session
user_id = None
if "user_id" in kwargs:
user_id = kwargs['user_id']
elif 'user_discord_id' in kwargs:
stmt = select(model.UserModel.id).where(model.UserModel.user_discord_id == kwargs['user_discord_id'])
user_id = session.execute(stmt).scalars().first()
if user_id != None:
character = model.CharacterModel(name=_character_name, user_id=user_id)
session.add(
character
)
session.flush()
stmt = select(model.SkillModel.id)
for row | |
import random
import operator
class locale(object):
"""Holds the characteristics of a state or district ("locale")
All margins are assumed to be Democratic % - Republican %
I chose this semi-arbitrarily, on the basis that Biden was leading in most polls, so using positive
Democratic margins would reduce the likelihood of an error caused by a missing "-"
abbr is the abbreviation for the locale
pollQuality is a subjective assessment of the quality of polls available, if any
polls is the current average margin in the available polls MINUS the average national polling margin
prior is the margin in 2016
evs is the value in electoral votes
demMargin is the predicted margin of victory for a given simulation
demWins is the percentage of simulations in which Biden won the locale
demAvg is the average margin of victory acrosss all simulations for a locale
tip is the likelihood that the locale would be the tipping point in the Electoral College
sm is the likelihood that the state would have the smallest margin of victory (districts excluded)
actual_dem_margin was added in 2022 when this code was uploaded and represents the actual margin of victory
"""
def __init__(self, abbr, pollQuality, polls, prior, evs, demMargin, demWins, demAvg, tip, sm):
self.abbr = abbr
self.pollQuality = pollQuality
self.polls = polls
self.prior = prior
self.evs = evs
self.demMargin = demMargin
self.demWins = demWins
self.demAvg = demAvg
self.tip = tip
self.sm = sm
# Added in 2022
self.actual_dem_margin = 0
# The locales are separated into those for which polling data is available and those without it
# "polledEVs" are the electoral votes for each state; polledPriors were the margins of victory in 2016
polledAbbrs = ["NV","AZ","TX","IA","MN","WI","MI","OH","GA","FL","NC","VA","PA","NH","NM","SC","ME"]
polledEVs = [6,11,38,6,10,10,16,18,16,29,15,13,20,4,5,9,2]
polledPriors = [2.42,-3.55,-8.99,-9.41,1.52,-0.77,-0.23,-8.13,-5.13,-1.2,-3.66,5.32,-0.72,0.37,8.21,-14.27,2.96]
# These are the polling margins for the states which have polling data available, taken from RealClearPolitics.
# Note that they are relative to the national polling margin, not absolute
# Also note that these were last updated sometime in the early fall of 2020, a month or two prior to the
# election; thus, they represent my prediction of the results as of that time
polledPolls = [
# Nevada:
2.3,
# Arizona:
-4.8,
# Texas:
-10.2,
# Iowa:
-8.6,
# Minnesota:
-1.2,
# Wisconsin:
-1.7,
# Michigan:
-0.5,
# Ohio:
-6.3,
# Georgia:
-9.6,
# Florida:
-3.1,
# North Carolina:
-7,
# Virginia:
4.9,
# Pennsylvania:
-0.4,
# New Hampshire:
1.8,
# New Mexico:
4.1,
# South Carolina:
-13.1,
# Maine:
3.2]
# These are subjective assessments of the quality and quantity of the polls in each state
polledPollsQuality = [
# Nevada:
"Low",
# Arizona:
"High",
# Texas:
"High",
# Iowa:
"High",
# Minnesota:
"High",
# Wisconsin:
"High",
# Michigan:
"High",
# Ohio:
"High",
# Georgia:
"High",
# Florida:
"High",
# North Carolina:
"High",
# Virginia:
"High",
# Pennsylvania:
"High",
# New Hampshire:
"High",
# New Mexico:
"Low",
# South Carolina:
"Low",
# Maine:
"High"]
# "trailingDemMargin" is the average margin in recent national polls, taken from RealClearPolitics. As with the
# state polling data, this was last updated several months before the election
# "natMarginEDSTD" is the expected standard deviation of the difference between the national polling
# average at the point in time in which the simulation was run and the actual national polling margin on
# Election Day. It was estimated from past recent presidential elections and is semi-subjective
# "stateMarginEDModifierSTD" is the expected standard deviation of the difference between the current polls
# for a locale and the polls on Election Day, after accounting for the change in the national average. Like
# "natMarginEDSTD" it was estimated from past elections and is semi-subjective
trailingDemMargin = 6.33
natMarginEDSTD = 2.5
stateMarginEDModifierSTD = 4
nonPolledAbbrs = ["NE2","ME2","CO","OR","DE","AK","MS","UT","MO","IN","CT","NJ","ME1","RI","WA","IL","NY","VT",
"MD","MA","CA","HI","LA","MT","KS","NE1","TN","AR","AL","SD","KY","ID","ND","OK","WV","WY",
"NE3","DC","NE"]
nonPolledPriors = [-2.24,-10.29,4.91,10.98,11.37,-14.73,-17.83,-18.08,-18.64,-19.17,13.64,14.1,14.81,15.51,15.71,
17.06,22.49,26.41,26.42,27.2,30.11,32.18,-19.64,-20.42,-20.6,-20.72,-26.01,-26.92,-27.73,
-29.79,-29.84,-31.77,-35.73,-37.08,-42.07,-46.3,-54.19,86.78,0]
nonPolledEVs = [1,1,9,7,3,3,6,6,10,11,7,14,1,4,12,20,29,3,10,11,55,4,8,3,6,1,11,6,9,3,8,4,3,7,5,3,1,3,2]
allStates = []
# Initializing the locales and adding them to the working list allStates
for x in range(len(polledAbbrs)):
allStates.append(locale(polledAbbrs[x],polledPollsQuality[x],polledPolls[x],polledPriors[x],polledEVs[x],
0,0,0,0,0))
for x in range(len(nonPolledAbbrs)):
allStates.append(locale(nonPolledAbbrs[x],"None",0,nonPolledPriors[x],nonPolledEVs[x],0,0,0,0,0))
# These are the average number of Democratic and Republican electoral votes across all the simulations
avgDemEVs = 0
avgGOPEVs = 0
# This is the percentage of simulations won by Biden, Trump, or tied
demWins = 0
GOPWins = 0
ties = 0
# This tracks the likelihood that the Electoral College margin of victory will fall into a given bin. The bins
# are taken from a PredictIt market
outcomes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# This tracks the likelihood that the popular vote winner will lose the Electoral College
PVWinnerLoses = 0
# This code runs 100,000 simulations of Election Day. I chose not to parameterize the number of simulations,
# although in retrospect it would probably have saved time during testing
for y in range(100000):
# "natMarginED" represents a randomly generated polling margin on Election Day
# "avgPolledPollError" represents a randomly generated average error in the state polls. 3.17 is the standard
# devation of the value from recent previous elections. Positive values represent Biden being underestimated,
# negative values represent Biden being overestimated
# "avgNationalPollError" represents a randomly generated average margin of error in the national polls, also
# estimated using values from previous elections
natMarginED = trailingDemMargin + random.gauss(0, natMarginEDSTD)
avgPolledPollError = random.gauss(0,3.17)
avgNationalPollError = avgPolledPollError + random.gauss(0,2.22)
# demEVs and GOPEVs are the number of electoral votes obtained by each party in this iteration of the
# simulation
demEVs = 0
GOPEVs = 0
# Maine and Nebraska had to be treated separately from the others due to their unique methods of allocating
# electoral votes
# "maineMargin" is the simulated margin of victory in Maine across the entire state and will be used to
# calculate the margins of victory in its two congressional districts later in the program
maineMargin = 0
# Resetting the margins in each state for the simulation
for x in range(len(allStates)):
allStates[x].demMargin = 0
for x in range(len(allStates)):
# Locales are divided into those with high poll quality, those with low quality, and those with none
if allStates[x].pollQuality == "High":
# "stateMarginEDModifier" is the difference between the expected polling margin on Election Day
# and the actual polling margin on Election Day in this locale for this simulation
stateMarginEDModifier = random.gauss(0,stateMarginEDModifierSTD)
# "stateMarginED" is the actual polling margin on Election Day in this locale for this simulation
stateMarginED = natMarginED + allStates[x].polls + stateMarginEDModifier
# "demMargin" is the actual simulated margin in the locale. It is calculated by applying an average
# polling error ("avgPolledPollError") to the locale polling margin on Election Day ("stateMarginED")
# and then a further locale-specific polling error (the random Gaussian element). The standard
# deviation for this locale-specific error is estimated based on previous elections
demMargin = stateMarginED + avgPolledPollError * random.gauss(0,1)
allStates[x].demMargin = demMargin
elif allStates[x].pollQuality == "Low":
stateMarginEDModifier = random.gauss(0,stateMarginEDModifierSTD)
# The Election Day poll margin for locales with low quality polls is simulated differently than for
# those with high quality. The margin in the locale in the 2016 election and the current polls are
# averaged. Note that the former is adjusted to cancel out the nationwide Democratic margin in 2016
# by subtracting 2.1
stateMarginED = natMarginED + 0.5 * allStates[x].polls + 0.5 * (allStates[x].prior - 2.1)
+ stateMarginEDModifier
demMargin = stateMarginED + avgPolledPollError * random.gauss(0,1)
allStates[x].demMargin = demMargin
elif allStates[x].pollQuality == "None" and allStates[x].abbr != "NE" and allStates[x].abbr != "ME1" \
and allStates[x].abbr != "ME2":
# Since there is no polling data for these locales, their results are simulated solely from 2016
# results and national polls. 5.59 is the standard deviation of the change in each state's margin
# between 2012 and 2016, after adjusting for the change in | |
32.12*m.x892 - 32.12*m.x901 - 32.12*m.x910 - 32.12*m.x926
- 55.08*m.x938 - 55.08*m.x948 - 55.08*m.x963 - 54.66*m.x975 - 54.66*m.x983 - 54.66*m.x993
- 54.66*m.x1002 - 33.09*m.x1017 - 33.09*m.x1025 - 33.09*m.x1034 - 33.09*m.x1044 - 40.24*m.x1082
+ 14.55*m.x1094 + 16.17*m.x1124 - 21.13*m.x1164 + 4.59*m.x1176 + 3.18*m.x1186 - 38.83*m.x1200
- 55.08*m.x1226 - 54.66*m.x1234 <= 0)
m.c50 = Constraint(expr= - 19*m.x88 - 19*m.x96 - 19*m.x103 - 19*m.x112 - 19*m.x119 - 19*m.x128 - 70.92*m.x140
- 70.92*m.x150 - 70.92*m.x159 - 70.92*m.x168 - 33.7*m.x182 - 33.7*m.x190 - 33.7*m.x200
- 33.7*m.x207 - 33.7*m.x214 - 33.7*m.x229 - 61.82*m.x241 - 61.82*m.x251 - 61.82*m.x258
- 61.82*m.x265 - 61.82*m.x273 - 61.82*m.x289 - 51.53*m.x301 - 51.53*m.x309 - 51.53*m.x319
- 51.53*m.x326 - 51.53*m.x335 - 17.74*m.x343 - 17.74*m.x351 - 17.74*m.x358 - 17.74*m.x367
- 17.74*m.x375 - 17.74*m.x391 - 69.62*m.x403 - 69.62*m.x411 - 69.62*m.x421 - 69.62*m.x428
- 69.62*m.x435 - 69.62*m.x445 - 18.44*m.x457 - 18.44*m.x465 - 18.44*m.x475 - 18.44*m.x482
- 18.44*m.x491 - 18.44*m.x500 - 18.44*m.x508 - 18.44*m.x518 - 36.23*m.x530 - 36.23*m.x540
- 36.23*m.x547 - 36.23*m.x554 - 36.23*m.x563 - 36.23*m.x579 - 14.98*m.x591 - 14.98*m.x601
- 14.98*m.x608 - 14.98*m.x617 - 14.98*m.x624 - 14.98*m.x633 - 14.98*m.x641 - 14.98*m.x651
- 63.99*m.x663 - 63.99*m.x671 - 63.99*m.x680 - 63.99*m.x687 - 63.99*m.x697 - 82.24*m.x709
- 82.24*m.x716 - 82.24*m.x725 - 82.24*m.x732 - 82.24*m.x741 - 15.6*m.x749 - 15.6*m.x757
- 15.6*m.x767 - 15.6*m.x774 - 15.6*m.x783 - 43.01*m.x791 - 43.01*m.x799 - 43.01*m.x809
- 43.01*m.x816 - 43.01*m.x825 - 43.01*m.x835 - 26.2*m.x847 - 3.91*m.x859 - 3.91*m.x867
- 3.91*m.x877 - 81.72*m.x892 - 81.72*m.x901 - 81.72*m.x910 - 81.72*m.x926 - 54.44*m.x938
- 54.44*m.x948 - 54.44*m.x963 - 19.69*m.x975 - 19.69*m.x983 - 19.69*m.x993 - 19.69*m.x1002
- 24.15*m.x1017 - 24.15*m.x1025 - 24.15*m.x1034 - 24.15*m.x1044 - 33.7*m.x1082 - 61.82*m.x1094
- 69.62*m.x1124 - 63.99*m.x1164 - 82.24*m.x1176 - 15.6*m.x1186 - 26.2*m.x1200 - 54.44*m.x1226
- 19.69*m.x1234 <= 0)
m.c51 = Constraint(expr= - 34.52*m.x88 - 34.52*m.x96 - 34.52*m.x103 - 34.52*m.x112 - 34.52*m.x119 - 34.52*m.x128
+ 0.329999999999998*m.x140 + 0.329999999999998*m.x150 + 0.329999999999998*m.x159
+ 0.329999999999998*m.x168 + 12.67*m.x182 + 12.67*m.x190 + 12.67*m.x200 + 12.67*m.x207
+ 12.67*m.x214 + 12.67*m.x229 + 7.25*m.x241 + 7.25*m.x251 + 7.25*m.x258 + 7.25*m.x265
+ 7.25*m.x273 + 7.25*m.x289 - 16.69*m.x301 - 16.69*m.x309 - 16.69*m.x319 - 16.69*m.x326
- 16.69*m.x335 - 55.46*m.x343 - 55.46*m.x351 - 55.46*m.x358 - 55.46*m.x367 - 55.46*m.x375
- 55.46*m.x391 + 5.98999999999999*m.x403 + 5.98999999999999*m.x411 + 5.98999999999999*m.x421
+ 5.98999999999999*m.x428 + 5.98999999999999*m.x435 + 5.98999999999999*m.x445 - 11.29*m.x457
- 11.29*m.x465 - 11.29*m.x475 - 11.29*m.x482 - 11.29*m.x491 - 11.29*m.x500 - 11.29*m.x508
- 11.29*m.x518 + 4.34*m.x530 + 4.34*m.x540 + 4.34*m.x547 + 4.34*m.x554 + 4.34*m.x563
+ 4.34*m.x579 - 42.1*m.x591 - 42.1*m.x601 - 42.1*m.x608 - 42.1*m.x617 - 42.1*m.x624
- 42.1*m.x633 - 42.1*m.x641 - 42.1*m.x651 - 35.7*m.x663 - 35.7*m.x671 - 35.7*m.x680
- 35.7*m.x687 - 35.7*m.x697 - 11.52*m.x709 - 11.52*m.x716 - 11.52*m.x725 - 11.52*m.x732
- 11.52*m.x741 - 36.02*m.x749 - 36.02*m.x757 - 36.02*m.x767 - 36.02*m.x774 - 36.02*m.x783
- 36.46*m.x791 - 36.46*m.x799 - 36.46*m.x809 - 36.46*m.x816 - 36.46*m.x825 - 36.46*m.x835
+ 9.84*m.x847 - 55.85*m.x859 - 55.85*m.x867 - 55.85*m.x877 - 3.72000000000001*m.x892
- 3.72000000000001*m.x901 - 3.72000000000001*m.x910 - 3.72000000000001*m.x926 - 23.68*m.x938
- 23.68*m.x948 - 23.68*m.x963 + 16.97*m.x975 + 16.97*m.x983 + 16.97*m.x993 + 16.97*m.x1002
+ 6.52*m.x1017 + 6.52*m.x1025 + 6.52*m.x1034 + 6.52*m.x1044 + 12.67*m.x1082 + 7.25*m.x1094
+ 5.98999999999999*m.x1124 - 35.7*m.x1164 - 11.52*m.x1176 - 36.02*m.x1186 + 9.84*m.x1200
- 23.68*m.x1226 + 16.97*m.x1234 <= 0)
m.c52 = Constraint(expr= - 79.99*m.x88 - 79.99*m.x96 - 79.99*m.x103 - 79.99*m.x112 - 79.99*m.x119 - 79.99*m.x128
- 23.86*m.x140 - 23.86*m.x150 - 23.86*m.x159 - 23.86*m.x168 - 75.22*m.x182 - 75.22*m.x190
- 75.22*m.x200 - 75.22*m.x207 - 75.22*m.x214 - 75.22*m.x229 - 4.92*m.x241 - 4.92*m.x251
- 4.92*m.x258 - 4.92*m.x265 - 4.92*m.x273 - 4.92*m.x289 - 24.77*m.x301 - 24.77*m.x309
- 24.77*m.x319 - 24.77*m.x326 - 24.77*m.x335 - 7.41*m.x343 - 7.41*m.x351 - 7.41*m.x358
- 7.41*m.x367 - 7.41*m.x375 - 7.41*m.x391 - 20.65*m.x403 - 20.65*m.x411 - 20.65*m.x421
- 20.65*m.x428 - 20.65*m.x435 - 20.65*m.x445 - 66.57*m.x457 - 66.57*m.x465 - 66.57*m.x475
- 66.57*m.x482 - 66.57*m.x491 - 66.57*m.x500 - 66.57*m.x508 - 66.57*m.x518 - 43.35*m.x530
- 43.35*m.x540 - 43.35*m.x547 - 43.35*m.x554 - 43.35*m.x563 - 43.35*m.x579 - 77.45*m.x591
- 77.45*m.x601 - 77.45*m.x608 - 77.45*m.x617 - 77.45*m.x624 - 77.45*m.x633 - 77.45*m.x641
- 77.45*m.x651 - 23.3*m.x663 - 23.3*m.x671 - 23.3*m.x680 - 23.3*m.x687 - 23.3*m.x697
- 27.91*m.x709 - 27.91*m.x716 - 27.91*m.x725 - 27.91*m.x732 - 27.91*m.x741 - 18.69*m.x749
- 18.69*m.x757 - 18.69*m.x767 - 18.69*m.x774 - 18.69*m.x783 - 18.18*m.x791 - 18.18*m.x799
- 18.18*m.x809 - 18.18*m.x816 - 18.18*m.x825 - 18.18*m.x835 - 61.52*m.x847 - 27.94*m.x859
- 27.94*m.x867 - 27.94*m.x877 - 10.03*m.x892 - 10.03*m.x901 - 10.03*m.x910 - 10.03*m.x926
- 25.06*m.x938 - 25.06*m.x948 - 25.06*m.x963 - 45.53*m.x975 - 45.53*m.x983 - 45.53*m.x993
- 45.53*m.x1002 - 75.5*m.x1017 - 75.5*m.x1025 - 75.5*m.x1034 - 75.5*m.x1044 - 75.22*m.x1082
- 4.92*m.x1094 - 20.65*m.x1124 - 23.3*m.x1164 - 27.91*m.x1176 - 18.69*m.x1186 - 61.52*m.x1200
- 25.06*m.x1226 - 45.53*m.x1234 <= 0)
m.c53 = Constraint(expr= - 27.68*m.x88 - 27.68*m.x96 - 27.68*m.x103 - 27.68*m.x112 - 27.68*m.x119 - 27.68*m.x128
- 8.47000000000001*m.x140 - 8.47000000000001*m.x150 - 8.47000000000001*m.x159
- 8.47000000000001*m.x168 - 16.53*m.x182 - 16.53*m.x190 - 16.53*m.x200 - 16.53*m.x207
- 16.53*m.x214 - 16.53*m.x229 + 10.33*m.x241 + 10.33*m.x251 + 10.33*m.x258 + 10.33*m.x265
+ 10.33*m.x273 + 10.33*m.x289 - 15.34*m.x301 - 15.34*m.x309 - 15.34*m.x319 - 15.34*m.x326
- 15.34*m.x335 + 11.45*m.x343 + 11.45*m.x351 + 11.45*m.x358 + 11.45*m.x367 + 11.45*m.x375
+ 11.45*m.x391 - 18.5*m.x403 - 18.5*m.x411 - 18.5*m.x421 - 18.5*m.x428 - 18.5*m.x435
- 18.5*m.x445 - 32.63*m.x457 - 32.63*m.x465 - 32.63*m.x475 - 32.63*m.x482 - 32.63*m.x491
- 32.63*m.x500 - 32.63*m.x508 - 32.63*m.x518 - 32.82*m.x530 - 32.82*m.x540 - 32.82*m.x547
- 32.82*m.x554 - 32.82*m.x563 - 32.82*m.x579 - 11.35*m.x591 - 11.35*m.x601 - 11.35*m.x608
- 11.35*m.x617 - 11.35*m.x624 - 11.35*m.x633 - 11.35*m.x641 - 11.35*m.x651 - 16.14*m.x663
- 16.14*m.x671 - 16.14*m.x680 - 16.14*m.x687 - 16.14*m.x697 - 20.65*m.x709 - 20.65*m.x716
- 20.65*m.x725 - 20.65*m.x732 - 20.65*m.x741 + 32*m.x749 + 32*m.x757 + 32*m.x767 + 32*m.x774
+ 32*m.x783 - 18.41*m.x791 - 18.41*m.x799 - 18.41*m.x809 - 18.41*m.x816 - 18.41*m.x825
- 18.41*m.x835 + 18.77*m.x847 - 37.4*m.x859 - 37.4*m.x867 - 37.4*m.x877 - 24.19*m.x892
- 24.19*m.x901 - 24.19*m.x910 - 24.19*m.x926 + 35.79*m.x938 + 35.79*m.x948 + 35.79*m.x963
+ 33.09*m.x975 + 33.09*m.x983 + 33.09*m.x993 + 33.09*m.x1002 + 2.52999999999999*m.x1017
+ 2.52999999999999*m.x1025 + 2.52999999999999*m.x1034 + 2.52999999999999*m.x1044
- 16.53*m.x1082 + 10.33*m.x1094 - 18.5*m.x1124 - 16.14*m.x1164 - 20.65*m.x1176 + 32*m.x1186
+ 18.77*m.x1200 + 35.79*m.x1226 + 33.09*m.x1234 <= 0)
m.c54 = Constraint(expr= - 0.329999999999998*m.x88 - 0.329999999999998*m.x96 - 0.329999999999998*m.x103
- 0.329999999999998*m.x112 - 0.329999999999998*m.x119 - 0.329999999999998*m.x128 - 3.38*m.x140
- 3.38*m.x150 - 3.38*m.x159 - 3.38*m.x168 + 43.28*m.x182 + 43.28*m.x190 + 43.28*m.x200
+ 43.28*m.x207 + 43.28*m.x214 + 43.28*m.x229 - 5.43*m.x241 - 5.43*m.x251 - 5.43*m.x258
- 5.43*m.x265 - 5.43*m.x273 - 5.43*m.x289 + 46.24*m.x301 + 46.24*m.x309 + 46.24*m.x319
+ 46.24*m.x326 + 46.24*m.x335 + 8.83*m.x343 + 8.83*m.x351 + 8.83*m.x358 + 8.83*m.x367
+ 8.83*m.x375 + 8.83*m.x391 - 1.56*m.x403 - 1.56*m.x411 - 1.56*m.x421 - 1.56*m.x428
- 1.56*m.x435 - 1.56*m.x445 - 12.51*m.x457 - 12.51*m.x465 - 12.51*m.x475 - 12.51*m.x482
- 12.51*m.x491 - 12.51*m.x500 - 12.51*m.x508 - 12.51*m.x518 + 36.8*m.x530 + 36.8*m.x540
+ 36.8*m.x547 + 36.8*m.x554 + 36.8*m.x563 + 36.8*m.x579 + 8.55*m.x591 + 8.55*m.x601
+ 8.55*m.x608 + 8.55*m.x617 + 8.55*m.x624 + 8.55*m.x633 + 8.55*m.x641 + 8.55*m.x651
- 20.88*m.x663 - 20.88*m.x671 - 20.88*m.x680 - 20.88*m.x687 - 20.88*m.x697 + 7.83*m.x709
+ 7.83*m.x716 + 7.83*m.x725 + 7.83*m.x732 + 7.83*m.x741 - 18.88*m.x749 - 18.88*m.x757
- 18.88*m.x767 - 18.88*m.x774 - 18.88*m.x783 + 45.36*m.x791 + 45.36*m.x799 + 45.36*m.x809
+ 45.36*m.x816 + 45.36*m.x825 + 45.36*m.x835 + 41.83*m.x847 + 35.43*m.x859 + 35.43*m.x867
+ 35.43*m.x877 - 10.99*m.x892 - 10.99*m.x901 - 10.99*m.x910 - 10.99*m.x926 - 4.91*m.x938
- 4.91*m.x948 - 4.91*m.x963 + 29.81*m.x975 + 29.81*m.x983 + 29.81*m.x993 + 29.81*m.x1002
+ 23.85*m.x1017 + 23.85*m.x1025 + 23.85*m.x1034 + 23.85*m.x1044 + 43.28*m.x1082 - 5.43*m.x1094
- 1.56*m.x1124 - 20.88*m.x1164 + 7.83*m.x1176 - 18.88*m.x1186 + 41.83*m.x1200 - 4.91*m.x1226
+ 29.81*m.x1234 <= 0)
m.c55 = Constraint(expr= - 65.99*m.x88 - 65.99*m.x96 - 65.99*m.x103 - 65.99*m.x112 - 65.99*m.x119 - 65.99*m.x128
- 70.82*m.x140 - 70.82*m.x150 - 70.82*m.x159 - 70.82*m.x168 - 13.86*m.x182 - 13.86*m.x190
- 13.86*m.x200 - 13.86*m.x207 - 13.86*m.x214 - 13.86*m.x229 - 17.7*m.x241 - 17.7*m.x251
- 17.7*m.x258 - 17.7*m.x265 - 17.7*m.x273 - 17.7*m.x289 - 70*m.x301 - 70*m.x309 - 70*m.x319
- 70*m.x326 - 70*m.x335 - 27.74*m.x343 - 27.74*m.x351 - 27.74*m.x358 - 27.74*m.x367
- 27.74*m.x375 - 27.74*m.x391 - 9.02*m.x403 - 9.02*m.x411 - 9.02*m.x421 - 9.02*m.x428
- 9.02*m.x435 - 9.02*m.x445 - 44.87*m.x457 - 44.87*m.x465 - 44.87*m.x475 - 44.87*m.x482
- 44.87*m.x491 - 44.87*m.x500 - 44.87*m.x508 - 44.87*m.x518 - 52.25*m.x530 - 52.25*m.x540
- 52.25*m.x547 - 52.25*m.x554 - 52.25*m.x563 - 52.25*m.x579 - 76.33*m.x591 - | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""CARLA Enhance dataset and datamodule.
"""
from __future__ import annotations
import glob
import os
import random
from typing import Callable
from typing import Optional
import matplotlib.pyplot as plt
from torch.utils.data import random_split
from onevision.cv import show_images
from onevision.cv import VisionBackend
from onevision.data import DataModule
from onevision.data import ImageEnhancementDataset
from onevision.data import ImageInfo
from onevision.data import VisionData
from onevision.data import VisionDataHandler
from onevision.factory import DATAMODULES
from onevision.factory import DATASETS
from onevision.nn import Phase
from onevision.type import Augment_
from onevision.type import Int3T
from onevision.type import Tasks
from onevision.utils import console
from onevision.utils import datasets_dir
from onevision.utils import progress_bar
__all__ = [
"CARLAEnhance",
"CARLAEnhanceDataModule"
]
# MARK: - Rain
@DATASETS.register(name="carlaenhance")
class CARLAEnhance(ImageEnhancementDataset):
"""CARLA Enhance dataset consists of multiple datasets related to rain removal
task.
Args:
weather (Tasks, optional):
Weather to use. One of the values in `self.weathers`.
Can also be a list to include multiple subsets. When `all`, `*`, or
`None`, all subsets will be included. Default: `*`.
time (Tasks, optional):
Time of day to use. One of the values in `self.times`.
Can also be a list to include multiple subsets. When `all`, `*`, or
`None`, all subsets will be included. Default: `*`.
"""
weathers = ["clear", "cloudy", "soft_rainy", "mid_rainy", "hard_rainy", "wet",
"wet_cloudy"]
times = ["noon", "sunset", "night"]
# MARK: Magic Functions
def __init__(
self,
root : str,
weather : Optional[Tasks] = "*",
time : Optional[Tasks] = "*",
split : str = "train",
shape : Int3T = (720, 1280, 3),
caching_labels : bool = False,
caching_images : bool = False,
write_labels : bool = False,
fast_dev_run : bool = False,
load_augment : Optional[dict] = None,
augment : Optional[Augment_] = None,
vision_backend : Optional[VisionBackend] = None,
transforms : Optional[Callable] = None,
transform : Optional[Callable] = None,
target_transform: Optional[Callable] = None,
*args, **kwargs
):
self.weather = weather
self.time = time
super().__init__(
root = root,
split = split,
shape = shape,
caching_labels = caching_labels,
caching_images = caching_images,
write_labels = write_labels,
fast_dev_run = fast_dev_run,
load_augment = load_augment,
augment = augment,
transforms = transforms,
transform = transform,
target_transform = target_transform,
vision_backend = vision_backend,
*args, **kwargs
)
# MARK: Properties
@property
def weather(self) -> list[str]:
return self._weather
@weather.setter
def weather(self, weather: Optional[Tasks]):
weather = [weather] if isinstance(weather, str) else weather
if weather is None or "all" in weather or "*" in weather:
weather = self.weathers
self._weather = weather
@property
def time(self) -> list[str]:
return self._time
@time.setter
def time(self, time: Optional[Tasks]):
time = [time] if isinstance(time, str) else time
if time is None or "all" in time or "*" in time:
time = self.times
self._time = time
# MARK: List Files
def list_files(self):
"""List image and label files."""
# NOTE: List all files
if "clear" in self.weather:
self.list_weather_files("clear")
if "cloudy" in self.weather:
self.list_weather_files("cloudy")
if "soft_rainy" in self.weather:
self.list_weather_files("soft_rainy")
if "mid_rainy" in self.weather:
self.list_weather_files("mid_rainy")
if "hard_rainy" in self.weather:
self.list_weather_files("hard_rainy")
if "wet" in self.weather:
self.list_weather_files("wet")
if "wet_cloudy" in self.weather:
self.list_weather_files("wet_cloudy")
# NOTE: fast_dev_run, select only a subset of images
if self.fast_dev_run:
indices = [random.randint(0, len(self.image_paths) - 1)
for _ in range(self.batch_size)]
self.image_paths = [self.image_paths[i] for i in indices]
self.eimage_paths = [self.eimage_paths[i] for i in indices]
# self.label_paths = [self.label_paths[i] for i in indices]
self.custom_label_paths = [self.custom_label_paths[i] for i in indices]
# NOTE: Assertion
if (
len(self.image_paths) <= 0
or len(self.image_paths) != len(self.eimage_paths)
):
raise ValueError(
f"Number of images != Number of enhanced images: "
f"{len(self.image_paths)} != {len(self.eimage_paths)}."
)
console.log(f"Number of images: {len(self.image_paths)}.")
def list_weather_files(self, weather: str):
"""List all files."""
if weather not in self.weathers:
return
with progress_bar() as pbar:
eimage_pattern = os.path.join(
self.root, "enhance", "*", "*_default.png"
)
for eimage_path in pbar.track(
glob.glob(eimage_pattern),
description=f"[bright_yellow]Listing {weather} images"
):
for t in self.time:
postfix = f"{weather}_{t}"
image_path = eimage_path.replace("default", postfix)
custom_label_path = eimage_path.replace("default", "annotations_custom")
custom_label_path = custom_label_path.replace(".png", ".json")
self.image_paths.append(image_path)
self.eimage_paths.append(eimage_path)
# self.label_paths.append(label_path)
self.custom_label_paths.append(custom_label_path)
# MARK: Load Data
def load_label(
self,
image_path : str,
enhance_path : str,
label_path : Optional[str] = None,
custom_label_path: Optional[str] = None
) -> VisionData:
"""Load all labels from a raw label `file`.
Args:
image_path (str):
Image file.
enhance_path (str):
Target image file.
label_path (str, optional):
Label file. Default: `None`.
custom_label_path (str, optional):
Custom label file. Default: `None`.
Returns:
data (VisionData):
`VisionData` object.
"""
# NOTE: If we have custom labels
if custom_label_path and os.path.isfile(custom_label_path):
return VisionDataHandler().load_from_file(
image_path = image_path,
label_path = custom_label_path,
eimage_path = enhance_path
)
# NOTE: Parse info
image_info = ImageInfo.from_file(image_path=image_path)
eimage_info = ImageInfo.from_file(image_path=enhance_path)
return VisionData(image_info=image_info, eimage_info=eimage_info)
def load_class_labels(self):
"""Load ClassLabels."""
pass
# MARK: - RainDataModule
@DATAMODULES.register(name="carlaenhance")
class CARLAEnhanceDataModule(DataModule):
"""CARLA Enhance DataModule."""
# MARK: Magic Functions
def __init__(
self,
dataset_dir: str = os.path.join(datasets_dir, "carla"),
name : str = "carlaenhance",
*args, **kwargs
):
super().__init__(dataset_dir=dataset_dir, name=name, *args, **kwargs)
self.dataset_kwargs = kwargs
# MARK: Prepare Data
def prepare_data(self, *args, **kwargs):
"""Use this method to do things that might write to disk or that need
to be done only from a single GPU in distributed settings.
- Download.
- Tokenize.
"""
if self.class_labels is None:
self.load_class_labels()
def setup(self, phase: Optional[Phase] = None):
"""There are also data operations you might want to perform on every
GPU.
Todos:
- Count number of classes.
- Build class_labels vocabulary.
- Perform train/val/test splits.
- Apply transforms (defined explicitly in your datamodule or
assigned in init).
- Define collate_fn for you custom dataset.
Args:
phase (Phase, optional):
Phase to use: [None, Phase.TRAINING, Phase.TESTING].
Set to "None" to setup all train, val, and test data.
Default: `None`.
"""
console.log(f"Setup [red]CARLA Enhance[/red] datasets.")
# NOTE: Assign train/val datasets for use in dataloaders
full_dataset = CARLAEnhance(
root=self.dataset_dir, split="train", **self.dataset_kwargs
)
train_size = int(0.8 * len(full_dataset))
val_size = int((len(full_dataset) - train_size) / 2)
test_size = len(full_dataset) - train_size - val_size
self.train, self.val, self.test = random_split(
full_dataset, [train_size, val_size, test_size]
)
if phase in [None, Phase.TRAINING]:
self.class_labels = getattr(self.train, "class_labels", None)
self.collate_fn = getattr(self.train, "collate_fn", None)
# NOTE: Assign test datasets for use in dataloader(s)
if phase in [None, Phase.TESTING]:
self.class_labels = getattr(self.test, "class_labels", None)
self.collate_fn = getattr(self.test, "collate_fn", None)
if self.class_labels is None:
self.load_class_labels()
self.summarize()
def load_class_labels(self):
"""Load ClassLabels."""
pass
# MARK: - Main
if __name__ == "__main__":
# NOTE: Get DataModule
cfgs = {
"name": "carlaenhance",
# Dataset's name.
"weather": ["*"],
# Weather to use. One of: ["clear", "cloudy", "soft_rainy", "mid_rainy",
# "hard_rainy", "wet", "wet_cloudy"]. Can also be a list to include multiple
# subsets. When `all`, `*`, or `None`, all subsets will be included.
# Default: `*`.
"time": ["*"],
# Time of day to use. One of: ["noon", "sunset", "night"]. Can also be a
# list to include multiple subsets. When `all`, `*`, or `None`, all subsets
# will be included. Default: `*`.
"shape": [512, 512, 3],
# Image shape as [H, W, C]. This is compatible with OpenCV format.
"batch_size": 4,
# Number of samples in one forward & backward pass.
"caching_labels": True,
# Should overwrite the existing cached labels? Default: `False`.
"caching_images": False,
# Cache images into memory for faster training. Default: `False`.
"write_labels": False,
# After loading images and labels for the first time, we will convert it
# to our custom data format and write to files. If `True`, we will
# overwrite these files. Default: `False`.
"fast_dev_run": False,
# Take a small subset of the data for fast debug (i.e, like unit testing).
# Default: `False`.
"shuffle": True,
# Set to `True` to have the data reshuffled at every training epoch.
# Default: `True`.
"load_augment": {
"mosaic": 0.0,
"mixup" : 0.5,
},
# Augmented loading policy.
"augment": {
"name": "paired_images_auto_augment",
# Name of the augmentation policy.
"policy": "enhancement",
# Augmentation policy. One of: [`enhancement`]. Default: `enhancement`.
"fill": None,
# Pixel fill value for the area outside the transformed image.
# If given a number, the value is used for all bands respectively.
"to_tensor": True,
# Convert a PIL Image or numpy.ndarray [H, W, C] in the range [0, 255]
# to a torch.FloatTensor of shape [C, H, W] in the range [0.0, 1.0].
# Default: `True`.
},
# Augmentation policy.
"vision_backend": VisionBackend.PIL,
# Vision backend option.
}
dm = CARLAEnhanceDataModule(**cfgs)
dm.setup()
# NOTE: Visualize labels
if dm.class_labels:
dm.class_labels.print()
# NOTE: Visualize one | |
= client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=None,
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"request_type", [transition_route_group.ListTransitionRouteGroupsRequest, dict,]
)
def test_list_transition_route_groups(request_type, transport: str = "grpc"):
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = transition_route_group.ListTransitionRouteGroupsResponse(
next_page_token="next_page_token_value",
)
response = client.list_transition_route_groups(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == transition_route_group.ListTransitionRouteGroupsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTransitionRouteGroupsPager)
assert response.next_page_token == "<PASSWORD>"
def test_list_transition_route_groups_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups), "__call__"
) as call:
client.list_transition_route_groups()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == transition_route_group.ListTransitionRouteGroupsRequest()
@pytest.mark.asyncio
async def test_list_transition_route_groups_async(
transport: str = "grpc_asyncio",
request_type=transition_route_group.ListTransitionRouteGroupsRequest,
):
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
transition_route_group.ListTransitionRouteGroupsResponse(
next_page_token="<PASSWORD>",
)
)
response = await client.list_transition_route_groups(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == transition_route_group.ListTransitionRouteGroupsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTransitionRouteGroupsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_transition_route_groups_async_from_dict():
await test_list_transition_route_groups_async(request_type=dict)
def test_list_transition_route_groups_field_headers():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = transition_route_group.ListTransitionRouteGroupsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups), "__call__"
) as call:
call.return_value = transition_route_group.ListTransitionRouteGroupsResponse()
client.list_transition_route_groups(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_transition_route_groups_field_headers_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = transition_route_group.ListTransitionRouteGroupsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
transition_route_group.ListTransitionRouteGroupsResponse()
)
await client.list_transition_route_groups(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_transition_route_groups_flattened():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = transition_route_group.ListTransitionRouteGroupsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_transition_route_groups(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_transition_route_groups_flattened_error():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_transition_route_groups(
transition_route_group.ListTransitionRouteGroupsRequest(),
parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_transition_route_groups_flattened_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = transition_route_group.ListTransitionRouteGroupsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
transition_route_group.ListTransitionRouteGroupsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_transition_route_groups(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_transition_route_groups_flattened_error_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_transition_route_groups(
transition_route_group.ListTransitionRouteGroupsRequest(),
parent="parent_value",
)
def test_list_transition_route_groups_pager(transport_name: str = "grpc"):
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
],
next_page_token="abc",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[], next_page_token="def",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
],
next_page_token="ghi",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_transition_route_groups(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(
isinstance(i, transition_route_group.TransitionRouteGroup) for i in results
)
def test_list_transition_route_groups_pages(transport_name: str = "grpc"):
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
],
next_page_token="abc",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[], next_page_token="def",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
],
next_page_token="ghi",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
],
),
RuntimeError,
)
pages = list(client.list_transition_route_groups(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_transition_route_groups_async_pager():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
],
next_page_token="abc",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[], next_page_token="def",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
],
next_page_token="ghi",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
],
),
RuntimeError,
)
async_pager = await client.list_transition_route_groups(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(
isinstance(i, transition_route_group.TransitionRouteGroup)
for i in responses
)
@pytest.mark.asyncio
async def test_list_transition_route_groups_async_pages():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
],
next_page_token="abc",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[], next_page_token="def",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
],
next_page_token="ghi",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
],
),
RuntimeError,
)
pages = []
async for page_ in (
await client.list_transition_route_groups(request={})
).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [transition_route_group.GetTransitionRouteGroupRequest, dict,]
)
def test_get_transition_route_group(request_type, transport: str = "grpc"):
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual | |
def addFeatures(self, features, gdbVersion='', rollbackOnFailure=True):
"""add new features to feature service layer
features -- esri JSON representation of features
ex:
adds = [{"geometry":
{"x":-10350208.415443439,
"y":5663994.806146532,
"spatialReference":
{"wkid":102100}},
"attributes":
{"Utility_Type":2,"Five_Yr_Plan":"No","Rating":None,"Inspection_Date":1429885595000}}]
"""
add_url = self.url + '/addFeatures'
if isinstance(features, (list, tuple)):
features = json.dumps(features, ensure_ascii=False)
params = {FEATURES: features,
GDB_VERSION: gdbVersion,
ROLLBACK_ON_FAILURE: rollbackOnFailure,
F: PJSON}
# add features
return self.__edit_handler(self.request(add_url, params))
def updateFeatures(self, features, gdbVersion='', rollbackOnFailure=True):
"""update features in feature service layer
Required:
features -- features to be updated (JSON)
Optional:
gdbVersion -- geodatabase version to apply edits
rollbackOnFailure -- specify if the edits should be applied only if all submitted edits succeed
# example syntax
updates = [{"geometry":
{"x":-10350208.415443439,
"y":5663994.806146532,
"spatialReference":
{"wkid":102100}},
"attributes":
{"Five_Yr_Plan":"Yes","Rating":90,"OBJECTID":1}}] #only fields that were changed!
"""
if isinstance(features, (list, tuple)):
features = json.dumps(features, ensure_ascii=False)
update_url = self.url + '/updateFeatures'
params = {FEATURES: features,
GDB_VERSION: gdbVersion,
ROLLBACK_ON_FAILURE: rollbackOnFailure,
F: JSON}
# update features
return self.__edit_handler(self.reques(update_url, params))
def deleteFeatures(self, oids='', where='', geometry='', geometryType='',
spatialRel='', inSR='', gdbVersion='', rollbackOnFailure=True):
"""deletes features based on list of OIDs
Optional:
oids -- list of oids or comma separated values
where -- where clause for features to be deleted. All selected features will be deleted
geometry -- geometry JSON object used to delete features.
geometryType -- type of geometry
spatialRel -- spatial relationship. Default is "esriSpatialRelationshipIntersects"
inSR -- input spatial reference for geometry
gdbVersion -- geodatabase version to apply edits
rollbackOnFailure -- specify if the edits should be applied only if all submitted edits succeed
oids format example:
oids = [1, 2, 3] # list
oids = "1, 2, 4" # as string
"""
if not geometryType:
geometryType = ESRI_ENVELOPE
if not spatialRel:
spatialRel = ESRI_INTERSECT
del_url = self.url + '/deleteFeatures'
if isinstance(oids, (list, tuple)):
oids = ', '.join(map(str, oids))
params = {OBJECT_IDS: oids,
WHERE: where,
GEOMETRY: geometry,
GEOMETRY_TYPE: geometryType,
SPATIAL_REL: spatialRel,
GDB_VERSION: gdbVersion,
ROLLBACK_ON_FAILURE: rollbackOnFailure,
F: JSON}
# delete features
return self.__edit_handler(self.request(del_url, params))
def applyEdits(self, adds=None, updates=None, deletes=None, attachments=None, gdbVersion=None, rollbackOnFailure=TRUE, useGlobalIds=False, **kwargs):
"""apply edits on a feature service layer
Optional:
adds -- features to add (JSON)
updates -- features to be updated (JSON)
deletes -- oids to be deleted (list, tuple, or comma separated string)
attachments -- attachments to be added, updated or deleted (added at version 10.4). Attachments
in this instance must use global IDs and the layer's "supportsApplyEditsWithGlobalIds" must
be true.
gdbVersion -- geodatabase version to apply edits
rollbackOnFailure -- specify if the edits should be applied only if all submitted edits succeed
useGlobalIds -- (added at 10.4) Optional parameter which is false by default. Requires
the layer's supportsApplyEditsWithGlobalIds property to be true. When set to true, the
features and attachments in the adds, updates, deletes, and attachments parameters are
identified by their globalIds. When true, the service adds the new features and attachments
while preserving the globalIds submitted in the payload. If the globalId of a feature
(or an attachment) collides with a pre-existing feature (or an attachment), that feature
and/or attachment add fails. Other adds, updates, or deletes are attempted if rollbackOnFailure
is false. If rollbackOnFailure is true, the whole operation fails and rolls back on any failure
including a globalId collision.
When useGlobalIds is true, updates and deletes are identified by each feature or attachment
globalId rather than their objectId or attachmentId.
kwargs -- any additional keyword arguments supported by the applyEdits method of the REST API, see
http://resources.arcgis.com/en/help/arcgis-rest-api/index.html#/Apply_Edits_Feature_Service_Layer/02r3000000r6000000/
attachments example (supported only in 10.4 and above):
{
"adds": [{
"globalId": "{55E85F98-FBDD-4129-9F0B-848DD40BD911}",
"parentGlobalId": "{02041AEF-4174-4d81-8A98-D7AC5B9F4C2F}",
"contentType": "image/pjpeg",
"name": "Pothole.jpg",
"uploadId": "{DD1D0A30-CD6E-4ad7-A516-C2468FD95E5E}"
},
{
"globalId": "{3373EE9A-4619-41B7-918B-DB54575465BB}",
"parentGlobalId": "{6FA4AA68-76D8-4856-971D-B91468FCF7B7}",
"contentType": "image/pjpeg",
"name": "Debree.jpg",
"data": "<base 64 encoded data>"
}
],
"updates": [{
"globalId": "{8FDD9AEF-E05E-440A-9426-1D7F301E1EBA}",
"contentType": "image/pjpeg",
"name": "IllegalParking.jpg",
"uploadId": "{57860BE4-3B85-44DD-A0E7-BE252AC79061}"
}],
"deletes": [
"{95059311-741C-4596-88EF-C437C50F7C00}",
" {18F43B1C-2754-4D05-BCB0-C4643C331C29}"
]
}
"""
edits_url = self.url + '/applyEdits'
if isinstance(adds, FeatureSet):
adds = json.dumps(adds.features, ensure_ascii=False, cls=RestapiEncoder)
elif isinstance(adds, (list, tuple)):
adds = json.dumps(adds, ensure_ascii=False, cls=RestapiEncoder)
if isinstance(updates, FeatureSet):
updates = json.dumps(updates.features, ensure_ascii=False, cls=RestapiEncoder)
elif isinstance(updates, (list, tuple)):
updates = json.dumps(updates, ensure_ascii=False, cls=RestapiEncoder)
if isinstance(deletes, (list, tuple)):
deletes = ', '.join(map(str, deletes))
params = {ADDS: adds,
UPDATES: updates,
DELETES: deletes,
GDB_VERSION: gdbVersion,
ROLLBACK_ON_FAILURE: rollbackOnFailure,
USE_GLOBALIDS: useGlobalIds
}
# handle attachment edits (added at version 10.4) cannot get this to work :(
## if self.canApplyEditsWithAttachments and isinstance(attachments, dict):
## for edit_type in (ADDS, UPDATES):
## if edit_type in attachments:
## for att in attachments[edit_type]:
## if att.get(DATA) and os.path.isfile(att.get(DATA)):
## # multipart encoded files
## ct = self.guess_content_type(att.get(DATA))
## if CONTENT_TYPE not in att:
## att[CONTENT_TYPE] = ct
## if NAME not in att:
## att[NAME] = os.path.basename(att.get(DATA))
## with open(att.get(DATA), 'rb') as f:
## att[DATA] = 'data:{};base64,'.format(ct) + base64.b64encode(f.read())
## print(att[DATA][:50])
## if GLOBALID_CAMEL not in att:
## att[GLOBALID_CAMEL] = 'f5e0f368-17a1-4062-b848-48eee2dee1d5'
## temp = {k:v for k,v in att.iteritems() if k != 'data'}
## temp[DATA] = att['data'][:50]
## print(json.dumps(temp, indent=2))
## params[ATTACHMENTS] = attachments
## if any([params[ATTACHMENTS].get(k) for k in (ADDS, UPDATES, DELETES)]):
## params[USE_GLOBALIDS] = TRUE
# add other keyword arguments
for k,v in kwargs.iteritems():
kwargs[k] = v
return self.__edit_handler(self.request(edits_url, params))
def addAttachment(self, oid, attachment, content_type='', gdbVersion=''):
"""add an attachment to a feature service layer
Required:
oid -- OBJECT ID of feature in which to add attachment
attachment -- path to attachment
Optional:
content_type -- html media type for "content_type" header. If nothing provided,
will use a best guess based on file extension (using mimetypes)
gdbVersion -- geodatabase version for attachment
valid content types can be found here @:
http://en.wikipedia.org/wiki/Internet_media_type
"""
if self.hasAttachments:
content_type = self.guess_content_type(attachment, content_type)
# make post request
att_url = '{}/{}/addAttachment'.format(self.url, oid)
files = {ATTACHMENT: (os.path.basename(attachment), open(attachment, 'rb'), content_type)}
params = {F: JSON}
if isinstance(self.token, Token) and self.token.isAGOL:
params[TOKEN] = str(self.token)
if gdbVersion:
params[GDB_VERSION] = gdbVersion
return self.__edit_handler(requests.post(att_url, params, files=files, cookies=self._cookie, verify=False).json(), oid)
else:
raise NotImplementedError('FeatureLayer "{}" does not support attachments!'.format(self.name))
def deleteAttachments(self, oid, attachmentIds, gdbVersion='', **kwargs):
"""deletes attachments in a feature layer
Required:
oid -- OBJECT ID of feature in which to add attachment
attachmentIds -- IDs of attachments to be deleted. If attachmentIds param is set to "All", all
attachments for this feature will be deleted.
Optional:
kwargs -- additional keyword arguments supported by deleteAttachments method
"""
if self.hasAttachments:
att_url = '{}/{}/deleteAttachments'.format(self.url, oid)
if isinstance(attachmentIds, (list, tuple)):
attachmentIds = ','.join(map(str, attachmentIds))
elif isinstance(attachmentIds, basestring) and attachmentIds.title() == 'All':
attachmentIds = ','.join(map(str, [getattr(att, ID) for att in self.attachments(oid)]))
if not attachmentIds:
return
params = {F: JSON, ATTACHMENT_IDS: attachmentIds}
if isinstance(self.token, Token) and self.token.isAGOL:
params[TOKEN] = str(self.token)
if gdbVersion:
params[GDB_VERSION] = gdbVersion
for k,v in kwargs.iteritems():
params[k] = v
return self.__edit_handler(requests.post(att_url, params, cookies=self._cookie, verify=False).json(), oid)
else:
raise NotImplementedError('FeatureLayer "{}" does not support attachments!'.format(self.name))
def updateAttachment(self, oid, attachmentId, attachment, content_type='', gdbVersion='', validate=False):
"""add an attachment to a feature service layer
Required:
oid -- OBJECT ID of feature in which to add attachment
attachmentId -- ID of feature attachment
attachment -- path to attachment
Optional:
content_type -- html media type for "content_type" header. If nothing provided,
will use a best guess based on file extension (using mimetypes)
gdbVersion -- geodatabase version for attachment
validate -- option to check if attachment ID exists within feature first before
attempting an update, this adds a small amount of overhead to method because
a request to fetch attachments is made prior to updating. Default is False.
valid content types can be found here @:
http://en.wikipedia.org/wiki/Internet_media_type
"""
if self.hasAttachments:
content_type = self.guess_content_type(attachment, content_type)
# make post request
att_url = '{}/{}/updateAttachment'.format(self.url, oid)
if validate:
if attachmentId not in [getattr(att, ID) for att in self.attachments(oid)]:
raise ValueError('Attachment with ID "{}" not found in Feature with OID "{}"'.format(oid, attachmentId))
files = {ATTACHMENT: (os.path.basename(attachment), open(attachment, 'rb'), content_type)}
params = {F: JSON, ATTACHMENT_ID: attachmentId}
if isinstance(self.token, Token) and self.token.isAGOL:
params[TOKEN] = str(self.token)
if gdbVersion:
params[GDB_VERSION] = gdbVersion
return self.__edit_handler(requests.post(att_url, params, files=files, cookies=self._cookie, verify=False).json(), oid)
else:
raise NotImplementedError('FeatureLayer "{}" does not support attachments!'.format(self.name))
def calculate(self, exp, where='1=1', sqlFormat='standard'):
"""calculate a field in a Feature Layer
Required:
exp -- expression as JSON [{"field": "Street", "value": "Main St"},..]
Optional:
where -- where clause for field calculator
sqlFormat -- SQL |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.